def show_corners(points, calib, corners_anchors, ctr_points=None, sv_img_path=None): """ Input: points: [N, 3] calib: a calib object anchors: [N, 8, 3] """ if 'mlab' not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d # first project anchors back to the velo location points = calib.project_rect_to_velo(points) # first project it back to velo corners_anchors = np.reshape(corners_anchors, [-1, 3]) corners_anchors = calib.project_rect_to_velo(corners_anchors) corners_anchors = np.reshape(corners_anchors, [-1, 8, 3]) # now, we get the corners_anchors, then, we only have to draw them fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(points, fig=fig) if ctr_points is not None: draw_lidar(ctr_points, fig=fig, pts_scale=0.10, pts_color=(1.0, 0.0, 0.0)) draw_gt_boxes3d(corners_anchors, fig=fig) if sv_img_path is not None: mlab.savefig(sv_img_path, figure=fig) else: mlab.show(1)
def show_lidar_with_boxes(pc_velo, objects, calib, sensor, img_fov=False, img_width=None, img_height=None): ''' Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) ''' if 'mlab' not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_lidar_simple #from viz_util import draw_lidar from viz_util import draw_gt_boxes3d view = getattr(calib, sensor) print(('All point num: ', pc_velo.shape[0])) #fig = mlab.figure(figure=None, bgcolor=(0,0,0), # fgcolor=None, engine=None, size=(1000, 500)) if img_fov: pc_velo = get_lidar_in_image_fov(pc_velo, calib, sensor, 0, 0, img_width, img_height) print(('FOV point num: ', pc_velo.shape[0])) #draw_lidar(pc_velo, fig=fig) #fig = draw_nusc_lidar(pc_velo,pts_scale=0.1,pts_mode='sphere') fig = utils.draw_nusc_lidar(pc_velo) obj_mean = np.array([0.0, 0.0, 0.0]) obj_count = 0 for obj in objects: if obj.type == 'DontCare': continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d( obj, np.eye(4)) #(8,2),(8,3) box3d_pts_3d_global = calib.project_cam_to_global( box3d_pts_3d.T, sensor) # (3,8) box3d_pts_3d_velo = calib.project_global_to_lidar( box3d_pts_3d_global).T #(8,3) # box3d_pts_3d_velo = box3d_pts_3d # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d( obj, np.eye(4)) #(2,2),(2,3) ori3d_pts_3d_global = calib.project_cam_to_global( ori3d_pts_3d.T, sensor) #(3,2) ori3d_pts_3d_velo = calib.project_global_to_lidar( ori3d_pts_3d_global).T #(2,3) ori3d_pts_3d_velo = ori3d_pts_3d x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.5, 0.5, 0.5), tube_radius=None, line_width=1, figure=fig) obj_mean += np.sum(box3d_pts_3d_velo, axis=0) obj_count += 1 obj_mean = obj_mean / obj_count print("mean:", obj_mean) mlab.show(1)
def visualize_one_sample(self, old_points, expand_points, gt_box_3d, prop_box_3d, box2d_center_rect): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d # fig = draw_lidar(pc_in_prop_box, pts_color=(1,1,1)) fig = draw_lidar(expand_points[:, :3], pts_color=(1, 1, 1)) fig = draw_lidar(old_points[:, :3], pts_color=(0, 1, 0), fig=fig) fig = draw_gt_boxes3d([gt_box_3d], fig, color=(1, 0, 0)) fig = draw_gt_boxes3d([prop_box_3d], fig, draw_text=False, color=(1, 1, 1)) # roi_feature_map # roi_features_size = 7 * 7 * 32 # img_roi_features = prop.roi_features[0:roi_features_size].reshape((7, 7, -1)) # bev_roi_features = prop.roi_features[roi_features_size:].reshape((7, 7, -1)) # img_roi_features = np.amax(img_roi_features, axis=-1) # bev_roi_features = np.amax(bev_roi_features, axis=-1) # fig1 = mlab.figure(figure=None, bgcolor=(0,0,0), # fgcolor=None, engine=None, size=(500, 500)) # fig2 = mlab.figure(figure=None, bgcolor=(0,0,0), # fgcolor=None, engine=None, size=(500, 500)) # mlab.imshow(img_roi_features, colormap='gist_earth', name='img_roi_features', figure=fig1) # mlab.imshow(bev_roi_features, colormap='gist_earth', name='bev_roi_features', figure=fig2) mlab.plot3d([0, box2d_center_rect[0][0]], [0, box2d_center_rect[0][1]], [0, box2d_center_rect[0][2]], color=(1, 1, 1), tube_radius=None, figure=fig) raw_input()
def show_lidar_with_boxes(pc_velo, objects, calib, img_fov=False, img_width=None, img_height=None): ''' Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) ''' if 'mlab' not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d print(('All point num: ', pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) if img_fov: pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width, img_height) print(('FOV point num: ', pc_velo.shape[0])) draw_lidar(pc_velo, fig=fig) for obj in objects: if obj.type=='DontCare':continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1,y1,z1 = ori3d_pts_3d_velo[0,:] x2,y2,z2 = ori3d_pts_3d_velo[1,:] draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.plot3d([x1, x2], [y1, y2], [z1,z2], color=(0.5,0.5,0.5), tube_radius=None, line_width=1, figure=fig) mlab.show(1)
def visualize_proposals(self, pc_rect, prop_boxes, neg_boxes, gt_boxes): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_gt_boxes3d fig = draw_lidar(pc_rect) fig = draw_gt_boxes3d(prop_boxes, fig, draw_text=False, color=(1, 0, 0)) fig = draw_gt_boxes3d(neg_boxes, fig, draw_text=False, color=(0, 1, 0)) fig = draw_gt_boxes3d(gt_boxes, fig, draw_text=False, color=(1, 1, 1)) raw_input()
def viz_frame(self, pc_rect, mask, gt_boxes, proposals): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d fig = draw_lidar(pc_rect) fig = draw_lidar(pc_rect[mask == 1], fig=fig, pts_color=(1, 1, 1)) fig = draw_gt_boxes3d(gt_boxes, fig, draw_text=False, color=(1, 1, 1)) fig = draw_gt_boxes3d(proposals, fig, draw_text=False, color=(1, 0, 0)) raw_input()
def tf_main_points_in_hull(): import os import numpy as np import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) print(BASE_DIR) sys.path.append(os.path.join(BASE_DIR,'../utils')) import utils def get_2048_points(depth): num_point = depth.shape[0] pc_in_box_fov = depth[:, :3] if num_point > 2048: choice = np.random.choice(depth.shape[0], 2048, replace=False) pc_in_box_fov = depth[choice, :3] return pc_in_box_fov pc_in_box_fov = get_2048_points(np.loadtxt('/Users/jiangyy/projects/VoteNet/utils/test_datas/000001.txt')) calib = utils.SUNRGBD_Calibration("/Users/jiangyy/projects/VoteNet/utils/test_datas/000001.txt.calib") objects = utils.read_sunrgbd_label("/Users/jiangyy/projects/VoteNet/utils/test_datas/000001.txt.label") box_list = [] for obj_idx in range(len(objects)): obj = objects[obj_idx] box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib) box_list.append(box3d_pts_3d) tf_points1 = tf.Variable(pc_in_box_fov, dtype=tf.float32) # print(box_list) e = np.array(box_list) tf_boxes1 = tf.Variable(np.array(box_list), dtype=tf.float32) tf_points2 = tf.expand_dims(tf_points1, axis=0) tf_boxes2 = tf.expand_dims(tf_boxes1, axis=0) tf_idx_in_hull = tf_points_in_hull(tf_points2, tf_boxes2) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) tf_idx_in_hull_1 = sess.run(tf_idx_in_hull) print(tf_idx_in_hull_1.shape) tf_idx_in_hull_2 = tf_idx_in_hull_1.sum(axis=2) import mayavi.mlab as mlab fig = mlab.figure(figure=None, bgcolor=(0.4, 0.4, 0.4), fgcolor=None, engine=None, size=(1000, 500)) mlab.points3d(pc_in_box_fov[:, 0], pc_in_box_fov[:, 1], pc_in_box_fov[:, 2], tf_idx_in_hull_2[0], mode='point', colormap='gnuplot', scale_factor=1, figure=fig) mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2, figure=fig) sys.path.append(os.path.join(BASE_DIR, '../mayavi')) from viz_util import draw_gt_boxes3d draw_gt_boxes3d(box_list, fig, color=(1, 0, 0)) mlab.orientation_axes() mlab.show()
def visualize(self, img_id, box2d, ps, segp, box3d, corners_3d_pred, center): img_filename = os.path.join(glb.IMG_DIR, '%06d.jpg' % (img_id)) img = cv2.imread(img_filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.rectangle(img, (int(box2d[0]), int(box2d[1])), (int(box2d[2]), int(box2d[3])), (0, 255, 0), 3) Image.fromarray(img).show() # Draw figures fig = mlab.figure(figure=None, bgcolor=(0.6, 0.6, 0.6), fgcolor=None, engine=None, size=(1000, 500)) mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2, figure=fig) mlab.points3d(ps[:, 0], ps[:, 1], ps[:, 2], segp, mode='point', colormap='gnuplot', scale_factor=1, figure=fig) draw_gt_boxes3d([box3d], fig, color=(0, 0, 1), draw_text=False) draw_gt_boxes3d([corners_3d_pred], fig, color=(0, 1, 0), draw_text=False) mlab.points3d(center[0], center[1], center[2], color=(0, 1, 0), mode='sphere', scale_factor=0.4, figure=fig) mlab.orientation_axes() input()
def demo(): if 'mlab' not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_gt_boxes3d dataset = kitti_object_infer('/media/vdc/backup/database_backup/Chris/f-pointnet/2011_09_26_drive_0001_sync') calibs = dataset.get_calibration() #calibs = calib_infer('/media/vdc/backup/database_backup/Chris/f-pointnet/2011_09_26_drive_0001_sync/2011_09_26_calib/2011_09_26') #dataset = kitti_object_infer('D:\\Detectron_Data\\2011_09_26_drive_0001_sync') net = gluoncv.model_zoo.get_model('yolo3_darknet53_voc', pretrained=True, ctx=mx.gpu(0)) sess, ops = get_session_and_ops(batch_size=BATCH_SIZE, num_point=NUM_POINT) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) for i in range(len(dataset)) : time1 = time.time() data = extract_data(dataset, net, i) # 0.9s TEST_DATASET = frustum_data_infer(data, 1024, rotate_to_center=True, one_hot=True) # us级 box_3d_list = test_from_rgb_detection(TEST_DATASET, sess, ops, FLAGS.output+'.pickle', FLAGS.output) # 0.1s time2 = time.time() print('time: ', time2 - time1) mlab.clf(fig) img, _ = dataset.get_image(i) pc = dataset.get_lidar(i)[:, 0:3] show_lidar(pc, calibs, fig, img_fov=True, img_width=img.shape[1], img_height=img.shape[0]) box3d_pts_3d_velo_list = [] for box_3d in box_3d_list: box3d_pts_3d_velo = calibs.project_rect_to_velo(box_3d) box3d_pts_3d_velo_list.append(box3d_pts_3d_velo) draw_gt_boxes3d(box3d_pts_3d_velo_list, fig) ''' img, _ = dataset.get_image(i) print('img: ', img.shape) pc = dataset.get_lidar(i)[:, 0:3] cv2.imshow('0', img) #show_lidar(pc, calibs, fig, img_fov = False, img_width = img.shape[1], img_height = img.shape[0]) #show_lidar_on_image(pc, img, calibs, img_width=img.shape[1], img_height=img.shape[0]) #cv2.waitKey(1) class_IDs, scores, bounding_boxs = get_2d_box_yolo(img, net) print('shape: ', class_IDs.shape, scores.shape, bounding_boxs.shape) ''' if i % 10 == 0: input() input()
def viz_sample(self, sample): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d pc_rect = sample['pointcloud'] proposal = ProposalObject(sample['proposal_box']) proposal.ry = 0 proposal.t = np.zeros((3,)) _, prop_box = utils.compute_box_3d(proposal, sample['calib']) fig = draw_lidar(pc_rect) mask = pc_rect[:,5] == 1 fig = draw_lidar(pc_rect[mask], fig=fig, pts_color=(1, 1, 1)) fig = draw_gt_boxes3d([prop_box], fig, draw_text=False, color=(1, 1, 1)) raw_input()
def test(): parser = argparse.ArgumentParser() parser.add_argument('--avod_config_path', type=str, dest='avod_config_path', required=True, help='avod_config_path') parser.add_argument('--sample_idx', type=str, dest='sample_idx', required=True, help='sample id') args = parser.parse_args() _, _, _, dataset_config = \ config_builder.get_configs_from_pipeline_file( args.avod_config_path, is_training=False) dataset = get_dataset(dataset_config, 'val') idx = np.argwhere(dataset.sample_names==args.sample_idx).squeeze() # print(idx) kitti_samples = dataset.load_samples([idx]) sample = kitti_samples[0] label_mask = np.equal(sample[constants.KEY_LABEL_CLASSES], g_type2onehotclass['Car']+1) gt_cls = sample[constants.KEY_LABEL_CLASSES][label_mask] gt_boxes_3d = sample[constants.KEY_LABEL_BOXES_3D][label_mask] gt_boxes_bev = [] for i in range(len(gt_cls)): gt_obj = box_3d_encoder.box_3d_to_object_label(gt_boxes_3d[i], gt_cls[i]) gt_corner_3d = compute_box_3d(gt_obj) gt_boxes_bev.append(gt_corner_3d[:4, [0,2]]) print(gt_boxes_bev) rpn_out = pickle.load(open("rpn_out/%s" % sample[constants.KEY_SAMPLE_NAME], "rb")) pos_prop = [] for prop in rpn_out['proposals_and_scores']: corners = compute_box_3d(box_3d_encoder.box_3d_to_object_label(prop[:7])) label_idx, iou = find_match_label(corners[:4, [0,2]], gt_boxes_bev) if iou > 0.65: pos_prop.append(corners) pc = sample[constants.KEY_POINT_CLOUD].T import mayavi.mlab as mlab from viz_util import draw_lidar, draw_gt_boxes3d fig = draw_lidar(pc) fig = draw_gt_boxes3d(pos_prop, fig, draw_text=False, color=(1, 1, 1)) input() # visualize_rpn_out(sample, rpn_out['proposals_and_scores']) prediction = pickle.load(open("%s"%sample[constants.KEY_SAMPLE_NAME], "rb")) print(prediction) visualize(dataset, sample, prediction)
def visualize_rpn_out(sample, proposals_and_scores, rpn_score_threshold=0.1): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_gt_boxes3d nms_idxs = nms_on_bev(proposals_and_scores, 0.5) proposals_and_scores = proposals_and_scores[nms_idxs] proposal_boxes_3d = proposals_and_scores[:, 0:7] proposal_scores = proposals_and_scores[:, 7] score_mask = proposal_scores > rpn_score_threshold # 3D box in the format [x, y, z, l, w, h, ry] proposal_boxes_3d = proposal_boxes_3d[score_mask] proposal_scores = proposal_scores[score_mask] proposal_objs = list(map(lambda pair: ProposalObject(pair[0], pair[1], None, None), zip(proposal_boxes_3d, proposal_scores))) propsasl_corners = list(map(lambda obj: compute_box_3d(obj), proposal_objs)) pc = sample[constants.KEY_POINT_CLOUD].T fig = draw_lidar(pc) fig = draw_gt_boxes3d(propsasl_corners, fig, draw_text=False, color=(1, 1, 1)) input()
def show_lidar_with_depth( pc_velo, objects, calib, fig, img_fov=False, img_width=None, img_height=None, objects_pred=None, depth=None, cam_img=None, constraint_box=False, pc_label=False, save=False, ): """ Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) """ from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d print(("All point num: ", pc_velo.shape[0])) if img_fov: pc_velo_index = get_lidar_index_in_image_fov( pc_velo[:, :3], calib, 0, 0, img_width, img_height ) pc_velo = pc_velo[pc_velo_index, :] print(("FOV point num: ", pc_velo.shape)) print("pc_velo", pc_velo.shape) draw_lidar(pc_velo, fig=fig, pc_label=pc_label) # Draw depth if depth is not None: depth_pc_velo = calib.project_depth_to_velo(depth, constraint_box) indensity = np.ones((depth_pc_velo.shape[0], 1)) * 0.5 depth_pc_velo = np.hstack((depth_pc_velo, indensity)) print("depth_pc_velo:", depth_pc_velo.shape) print("depth_pc_velo:", type(depth_pc_velo)) print(depth_pc_velo[:5]) draw_lidar(depth_pc_velo, fig=fig, pts_color=(1, 1, 1)) if save: data_idx = 0 vely_dir = "data/obj/training/depth_pc" save_filename = os.path.join(vely_dir, "%06d.bin" % (data_idx)) print(save_filename) # np.save(save_filename+".npy", np.array(depth_pc_velo)) depth_pc_velo = depth_pc_velo.astype(np.float32) depth_pc_velo.tofile(save_filename) color = (0, 1, 0) for obj in objects: if obj.type == "DontCare": continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) print("box3d_pts_3d_velo:") print(box3d_pts_3d_velo) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color) if objects_pred is not None: color = (1, 0, 0) for obj in objects_pred: if obj.type == "DontCare": continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) print("box3d_pts_3d_velo:") print(box3d_pts_3d_velo) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color) # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] mlab.plot3d( [x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None, line_width=1, figure=fig, ) mlab.show(1)
def demo(data_idx=11, object_idx=0, show_images=True, show_lidar=True, show_lidar_2d=True, show_lidar_box=True, show_project=True, show_lidar_frustum=True): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object')) # Load data from dataset objects = dataset.get_label_objects( data_idx) #objects = [Object3d(line) for line in lines] objects[object_idx].print_object() calib = dataset.get_calibration( data_idx) #utils.Calibration(calib_filename) box2d = objects[object_idx].box2d xmin, ymin, xmax, ymax = box2d box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]) uvdepth = np.zeros((1, 3)) uvdepth[0, 0:2] = box2d_center uvdepth[0, 2] = 20 # some random depth box2d_center_rect = calib.project_image_to_rect(uvdepth) frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2], box2d_center_rect[0, 0]) print('frustum_angle:', frustum_angle) ''' Type, truncation, occlusion, alpha: Pedestrian, 0, 0, -0.200000 2d bbox (x0,y0,x1,y1): 712.400000, 143.000000, 810.730000, 307.920000 3d bbox h,w,l: 1.890000, 0.480000, 1.200000 3d bbox location, ry: (1.840000, 1.470000, 8.410000), 0.010000 ''' img = dataset.get_image(data_idx) #(370, 1224, 3) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_height, img_width, img_channel = img.shape print(('Image shape: ', img.shape)) pc_velo = dataset.get_lidar(data_idx)[:, 0:3] #(115384, 3) calib = dataset.get_calibration( data_idx) #utils.Calibration(calib_filename) ## Draw lidar in rect camera coord #print(' -------- LiDAR points in rect camera coordination --------') #pc_rect = calib.project_velo_to_rect(pc_velo) #fig = draw_lidar_simple(pc_rect) #raw_input() # Draw 2d and 3d boxes on image if show_images: print(' -------- 2D/3D bounding boxes in images --------') show_image_with_boxes(img, objects, calib) raw_input() if show_lidar: # Show all LiDAR points. Draw 3d box in LiDAR point cloud print( ' -------- LiDAR points and 3D boxes in velodyne coordinate --------' ) #show_lidar_with_boxes(pc_velo, objects, calib) #raw_input() show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height) raw_input() if show_lidar_2d: # Visualize LiDAR points on images print(' -------- LiDAR points projected to image plane --------') show_lidar_on_image(pc_velo, img, calib, img_width, img_height, showtime=True) raw_input() if show_lidar_box: # Show LiDAR points that are in the 3d box print(' -------- LiDAR points in a 3D bounding box --------') box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d( objects[object_idx], calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo) print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(box3droi_pc_velo, fig=fig) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.show(1) raw_input() if show_project: # UVDepth Image and its backprojection to point clouds print(' -------- LiDAR points in a frustum from a 2D box --------') imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov( pc_velo, calib, 0, 0, img_width, img_height, True) imgfov_pts_2d = pts_2d[fov_inds, :] imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo) cameraUVDepth = np.zeros_like(imgfov_pc_rect) cameraUVDepth[:, 0:2] = imgfov_pts_2d cameraUVDepth[:, 2] = imgfov_pc_rect[:, 2] # Show that the points are exactly the same backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth) print(imgfov_pc_velo[0:20]) print(backprojected_pc_velo[0:20]) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(backprojected_pc_velo, fig=fig) raw_input() if show_lidar_frustum: # Only display those points that fall into 2d box print(' -------- LiDAR points in a frustum from a 2D box --------') xmin,ymin,xmax,ymax = \ objects[object_idx].xmin, objects[object_idx].ymin, objects[object_idx].xmax, objects[object_idx].ymax boxfov_pc_velo = \ get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax) print(('2d box FOV point num: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input()
def demo_object(data_idx=11, object_idx=0): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d def draw_3d_object(pc, color=None): ''' Draw lidar points. simplest set up. ''' fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1600, 1000)) if color is None: color = (pc[:, 2] - np.min(pc[:, 2])) / (np.max(pc[:, 2]) - np.min(pc[:, 2])) # draw points #nodes = mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], colormap='gnuplot', scale_factor=0.04, # figure=fig) #nodes.mlab_source.dataset.point_data.scalars = color pts = mlab.pipeline.scalar_scatter(pc[:, 0], pc[:, 1], pc[:, 2]) pts.add_attribute(color, 'colors') pts.data.point_data.set_active_scalars('colors') g = mlab.pipeline.glyph(pts) g.glyph.glyph.scale_factor = 0.05 # set scaling for all the points g.glyph.scale_mode = 'data_scaling_off' # make all the points same size # draw origin mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2) # draw axis axes = np.array([ [2., 0., 0., 0.], [0., 2., 0., 0.], [0., 0., 2., 0.], ], dtype=np.float64) mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig) mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig) mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig) mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=62.0, figure=fig) return fig dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object')) objects = dataset.get_label_objects(data_idx) obj = objects[object_idx] obj.print_object() calib = dataset.get_calibration( data_idx) #utils.Calibration(calib_filename) box2d = obj.box2d xmin, ymin, xmax, ymax = box2d cx, cy = (xmin + xmax) / 2, (ymin + ymax) / 2 w, l = xmax - xmin, ymax - ymin # box3d x, y, z = obj.t # show 3d pc_velo = dataset.get_lidar(data_idx)[:, 0:3] pc_rect = calib.project_velo_to_rect(pc_velo) pc_norm = pc_rect - obj.t keep = [] for i in range(len(pc_norm)): if np.sum(pc_norm[i]**2) < 4: keep.append(i) pc_keep = pc_norm[keep, :] pc_keep[:, 1] *= -1 pc_keep = pc_keep[:, [0, 2, 1]] fig = draw_3d_object(pc_keep) box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[object_idx], calib.P) box3d_pts_3d -= obj.t box3d_pts_3d[:, 1] *= -1 box3d_pts_3d = box3d_pts_3d[:, [0, 2, 1]] draw_gt_boxes3d([box3d_pts_3d], fig=fig, draw_text=False) input()
def dataset_viz(show_frustum=False): sunrgbd = sunrgbd_object(data_dir) idxs = np.array(range(1,len(sunrgbd)+1)) np.random.shuffle(idxs) for idx in range(len(sunrgbd)): data_idx = idxs[idx] print('--------------------', data_idx) pc = sunrgbd.get_depth(data_idx) print(pc.shape) # Project points to image calib = sunrgbd.get_calibration(data_idx) uv,d = calib.project_upright_depth_to_image(pc[:,0:3]) print(uv) print(d) raw_input() import matplotlib.pyplot as plt cmap = plt.cm.get_cmap('hsv', 256) cmap = np.array([cmap(i) for i in range(256)])[:,:3]*255 img = sunrgbd.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i in range(uv.shape[0]): depth = d[i] color = cmap[int(120.0/depth),:] cv2.circle(img, (int(np.round(uv[i,0])), int(np.round(uv[i,1]))), 2, color=tuple(color), thickness=-1) Image.fromarray(img).show() raw_input() # Load box labels objects = sunrgbd.get_label_objects(data_idx) print(objects) raw_input() # Draw 2D boxes on image img = sunrgbd.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i,obj in enumerate(objects): cv2.rectangle(img, (int(obj.xmin),int(obj.ymin)), (int(obj.xmax),int(obj.ymax)), (0,255,0), 2) cv2.putText(img, '%d %s'%(i,obj.classname), (max(int(obj.xmin),15), max(int(obj.ymin),15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2) Image.fromarray(img).show() raw_input() # Draw 3D boxes on depth points box3d = [] ori3d = [] for obj in objects: corners_3d_image, corners_3d = utils.compute_box_3d(obj, calib) ori_3d_image, ori_3d = utils.compute_orientation_3d(obj, calib) print('Corners 3D: ', corners_3d) box3d.append(corners_3d) ori3d.append(ori_3d) raw_input() bgcolor=(0,0,0) fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000)) mlab.points3d(pc[:,0], pc[:,1], pc[:,2], pc[:,2], mode='point', colormap='gnuplot', figure=fig) mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2) draw_gt_boxes3d(box3d, fig=fig) for i in range(len(ori3d)): ori_3d = ori3d[i] x1,y1,z1 = ori_3d[0,:] x2,y2,z2 = ori_3d[1,:] mlab.plot3d([x1, x2], [y1, y2], [z1,z2], color=(0.5,0.5,0.5), tube_radius=None, line_width=1, figure=fig) mlab.orientation_axes() for i,obj in enumerate(objects): print('Orientation: ', i, np.arctan2(obj.orientation[1], obj.orientation[0])) print('Dimension: ', i, obj.l, obj.w, obj.h) raw_input() if show_frustum: img = sunrgbd.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i,obj in enumerate(objects): box2d_fov_inds = (uv[:,0]<obj.xmax) & (uv[:,0]>=obj.xmin) & (uv[:,1]<obj.ymax) & (uv[:,1]>=obj.ymin) box2d_fov_pc = pc[box2d_fov_inds, :] img2 = np.copy(img) cv2.rectangle(img2, (int(obj.xmin),int(obj.ymin)), (int(obj.xmax),int(obj.ymax)), (0,255,0), 2) cv2.putText(img2, '%d %s'%(i,obj.classname), (max(int(obj.xmin),15), max(int(obj.ymin),15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2) Image.fromarray(img2).show() fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1000, 1000)) mlab.points3d(box2d_fov_pc[:,0], box2d_fov_pc[:,1], box2d_fov_pc[:,2], box2d_fov_pc[:,2], mode='point', colormap='gnuplot', figure=fig) raw_input()
def demo(): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object')) data_idx = 38 # Load data from dataset objects = dataset.get_label_objects(data_idx) objects[0].print_object() img = dataset.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_height, img_width, img_channel = img.shape print(('Image Left shape: ', img.shape)) img_r = dataset.get_image_r(data_idx) img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2RGB) img_height_r, img_width_r, img_channel_r = img_r.shape print(('Image Right shape: ', img_r.shape)) pc_velo = dataset.get_lidar(data_idx)[:,0:3] calib = dataset.get_calibration(data_idx) ## Draw lidar in rect camera coord #print(' -------- LiDAR points in rect camera coordination --------') #pc_rect = calib.project_velo_to_rect(pc_velo) #fig = draw_lidar_simple(pc_rect) #raw_input() # Draw 2d and 3d boxes on image print(' -------- 2D/3D bounding boxes in images --------') show_image_with_boxes(img, objects, calib) raw_input() # Show all LiDAR points. Draw 3d box in LiDAR point cloud print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------') #show_lidar_with_boxes(pc_velo, objects, calib) #raw_input() show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height) raw_input() ## Visualize LiDAR points on images #print(' -------- LiDAR points projected to image plane --------') #show_lidar_on_image(pc_velo, img, calib, img_width, img_height) #raw_input() # Show LiDAR points that are in the 3d box print(' -------- LiDAR points in a 3D bounding box --------') box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo) print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(box3droi_pc_velo, fig=fig) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.show(1) raw_input() # UVDepth Image and its backprojection to point clouds print(' -------- LiDAR points in a frustum from a 2D box --------') imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width, img_height, True) imgfov_pts_2d = pts_2d[fov_inds,:] imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo) cameraUVDepth = np.zeros_like(imgfov_pc_rect) cameraUVDepth[:,0:2] = imgfov_pts_2d cameraUVDepth[:,2] = imgfov_pc_rect[:,2] # Show that the points are exactly the same backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth) print(imgfov_pc_velo[0:20]) print(backprojected_pc_velo[0:20]) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(backprojected_pc_velo, fig=fig) raw_input() # Only display those points that fall into the left 2d box print(' -------- LiDAR points in the left frustum from a 2D box --------') xmin,ymin,xmax,ymax = \ objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax boxfov_pc_velo = \ get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax) print(('Left 2D box FOV point num: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input() # Only display those points that fall into the right 2d box print(' -------- LiDAR points in the right frustums from a 2D box Method 1--------') # We need to project xmin,ymin,xmax,ymax to the right image cordinate box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P3) xcord = box3d_pts_2d[:,0] ycord = box3d_pts_2d[:,1] xmin_r = int(min(xcord)) ymin_r = int(min(ycord)) xmax_r = int(max(xcord)) ymax_r = int(max(ycord)) box2d_r = xmin_r,ymin_r,xmax_r,ymax_r boxfov_pc_velo = get_lidar_in_image_fov_right(pc_velo, calib, xmin_r, ymin_r, xmax_r, ymax_r) print(('Right 2D box FOV point num Method 1: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input() # Draw 2d box in right image print(' -------- 2D bounding box in right images using Method 1 --------') draw_2d_box(img_r, [xmin_r, ymin_r, xmax_r, ymax_r], calib) raw_input() # Only display those points that fall into the right 2d box print(' -------- LiDAR points in the right frustums from a 2D box Method 2 --------') xmin_r,ymin_r,xmax_r,ymax_r = objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax point = np.zeros((1,3)) point[0,0]= xmin_r point[0,1]= ymin_r point[0,2]= 30 point_r = calib.project_rect_to_image_right(calib.project_image_to_rect(point)) xmin_r = point_r[0,0] ymin_r = point_r[0,1] point[0,0] = xmax_r point[0,1] = ymax_r point_r = calib.project_rect_to_image_right(calib.project_image_to_rect(point)) xmax_r = point_r[0,0] ymax_r = point_r[0,1] boxfov_pc_velo = get_lidar_in_image_fov_right(pc_velo, calib, xmin_r, ymin_r, xmax_r, ymax_r) print(('Right 2D box FOV point num Method 2: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input() # Draw 2d box in right image print(' -------- 2D bounding box in right images using Method 2 --------') draw_2d_box(img_r, [xmin_r, ymin_r, xmax_r, ymax_r], calib) raw_input() # Displays those points that fall into the intersection of right and left print(' -------- LiDAR points in the intersection right and left frustums --------') xmin,ymin,xmax,ymax = objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax # We need to project xmin,ymin,xmax,ymax to the right image cordinate box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P3) xcord = box3d_pts_2d[:,0] ycord = box3d_pts_2d[:,1] xmin_r = int(min(xcord)) ymin_r = int(min(ycord)) xmax_r = int(max(xcord)) ymax_r = int(max(ycord)) #box2d_r = xmin_r, ymin_r, xmax_r, ymax_r boxfov_pc_velo = get_lidar_in_image_fov_intersect(pc_velo, calib, xmin, ymin, xmax, ymax, xmin_r, ymin_r, xmax_r, ymax_r) print((' Intersection of frustums point num: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input()
box3d_from_label = get_3d_box(class2size(data[5], data[6]), class2angle(data[3], data[4], 12), data[2]) ps = data[0] seg = data[1] fig = mlab.figure(figure=None, bgcolor=(0.4, 0.4, 0.4), fgcolor=None, engine=None, size=(1000, 500)) mlab.points3d(ps[:, 0], ps[:, 1], ps[:, 2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig) mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2, figure=fig) draw_gt_boxes3d([box3d_from_label], fig, color=(1, 0, 0)) mlab.orientation_axes() raw_input() print(np.mean(np.abs(median_list)))
def demo(data_idx=0, obj_idx=-1): sensor = 'CAM_FRONT' import mayavi.mlab as mlab from viz_util import draw_lidar_simple, draw_gt_boxes3d dataset = nuscenes2kitti_object( os.path.join(ROOT_DIR, 'dataset/nuScenes2KITTI')) # Load data from dataset objects = dataset.get_label_objects( sensor, data_idx) # objects = [Object3d(line) for line in lines] for i, obj in enumerate(objects): print('obj %d' % (i)) objects[obj_idx].print_object() calib = dataset.get_calibration( data_idx) # utils.Calibration(calib_filename) box2d = objects[obj_idx].box2d xmin, ymin, xmax, ymax = box2d box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]) uvdepth = np.zeros((1, 3)) uvdepth[0, 0:2] = box2d_center uvdepth[0, 2] = 20 # some random depth #box2d_center_rect = calib.project_image_to_rect(uvdepth) #frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2], # box2d_center_rect[0, 0]) #print('frustum_angle:', frustum_angle) img = dataset.get_image(sensor, data_idx) # (370, 1224, 3) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_height, img_width, img_channel = img.shape print(('Image shape: ', img.shape)) print(dataset.get_lidar(data_idx).shape) pc_velo = dataset.get_lidar(data_idx)[:, 0:3] # (115384, 3) calib = dataset.get_calibration( data_idx) # utils.Calibration(calib_filename) # 1.Draw lidar with boxes in LIDAR_TOP coord print(' -------- LiDAR points in LIDAR_TOP coordination --------') print('pc_velo.shape:', pc_velo.shape) print('pc_velo[:10,:]:', pc_velo[:10, :]) ##view = np.eye(4) ##pc_velo[:, :3] = utils.view_points(pc_velo[:, :3].T, view, normalize=False).T ##pc_rect = calib.project_velo_to_rect(pc_velo) #fig = draw_lidar_simple(pc_velo) show_lidar_with_boxes(pc_velo, objects, calib, sensor, False, img_width, img_height) raw_input() # 2.Draw frustum lidar with boxes in LIDAR_TOP coord print( ' -------- LiDAR points and 3D boxes in velodyne coordinate --------') #show_lidar_with_boxes(pc_velo, objects, calib) show_lidar_with_boxes(pc_velo.copy(), objects, calib, sensor, True, img_width, img_height) raw_input() # 3.Draw 2d and 3d boxes on CAM_FRONT image print(' -------- 2D/3D bounding boxes in images --------') show_image_with_boxes(img, objects, calib, sensor) raw_input() print( ' -------- render LiDAR points (and 3D boxes) in LIDAR_TOP coordinate --------' ) render_lidar_bev(pc_velo, objects, calib, sensor) raw_input() # Visualize LiDAR points on images print(' -------- LiDAR points projected to image plane --------') show_lidar_on_image(pc_velo, img.copy(), calib, sensor, img_width, img_height) #pc_velo:(n,3) raw_input() # Show LiDAR points that are in the 3d box print(' -------- LiDAR points in a 3D bounding box --------') for obj_idx, obj in enumerate(objects): box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d( objects[obj_idx], np.eye(4)) box3d_pts_3d_global = calib.project_cam_to_global( box3d_pts_3d.T, sensor) # (3,8) box3d_pts_3d_velo = calib.project_global_to_lidar( box3d_pts_3d_global) # (3,8) box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo.T) print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) utils.draw_nusc_lidar(box3droi_pc_velo, fig=fig) draw_gt_boxes3d([box3d_pts_3d_velo.T], fig=fig) mlab.show(1) raw_input() # UVDepth Image and its backprojection to point clouds print(' -------- LiDAR points in a frustum --------') imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov( pc_velo, calib, sensor, 0, 0, img_width, img_height, True) imgfov_pts_2d = pts_2d[fov_inds, :] #(n, 3) imgfov_pc_global = calib.project_lidar_to_global(imgfov_pc_velo.T) imgfov_pc_cam = calib.project_global_to_cam(imgfov_pc_global, sensor) #(3,n) #cameraUVDepth = utils.view_points(imgfov_pc_cam[:3, :], getattr(calib,sensor), normalize=True)#(3,3067) #cameraUVDepth = cameraUVDepth#(3067, 3) #ipdb.set_trace() #cameraUVDepth = np.zeros_like(imgfov_pc_cam) #cameraUVDepth[:,0:2] = imgfov_pts_2d[:, 0:2] #cameraUVDepth[:,2] = imgfov_pc_cam[:,2] # miss intrisic # cameraUVDepth = imgfov_pc_cam # backprojected_pc_cam = cameraUVDepth #consider intrinsic print('imgfov_pc_cam.shape:', imgfov_pc_cam.shape) print('imgfov_pc_cam[:,0:5].T:\n', imgfov_pc_cam[:, 0:5].T) cameraUVDepth = calib.project_cam_to_image(imgfov_pc_cam, sensor) #(3,n) cameraUVDepth[2, :] = imgfov_pc_cam[2, :] print('cameraUVDepth.shape:', cameraUVDepth.shape) print('cameraUVDepth[:,0:5].T:\n', cameraUVDepth[:, 0:5].T) backprojected_pc_cam = calib.project_image_to_cam(cameraUVDepth, sensor) #(3,n) print('backprojected_pc_cam.shape:', backprojected_pc_cam.shape) print('backprojected_pc_cam[:,0:5].T\n:', backprojected_pc_cam[:, 0:5].T) print('error:') print(np.mean(backprojected_pc_cam - imgfov_pc_cam, axis=1)) # Show that the points are exactly the same backprojected_pc_global = calib.project_cam_to_global( backprojected_pc_cam, sensor) #(3,n) backprojected_pc_velo = calib.project_global_to_lidar( backprojected_pc_global).T #(n,3) print('imgfov_pc_velo.shape:', imgfov_pc_velo.shape) print(imgfov_pc_velo[0:5, :]) print('backprojected_pc_velo.shape:', backprojected_pc_velo.shape) print(backprojected_pc_velo[0:5, :]) print('error:') print(np.mean(backprojected_pc_velo - imgfov_pc_velo, axis=0)) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) utils.draw_nusc_lidar(backprojected_pc_velo, fig=fig) raw_input() # Only display those points that fall into 2d box print(' -------- LiDAR points in a frustum from a 2D box --------') xmin,ymin,xmax,ymax = \ objects[obj_idx].xmin, objects[obj_idx].ymin, objects[obj_idx].xmax, objects[obj_idx].ymax boxfov_pc_velo = \ get_lidar_in_image_fov(pc_velo, calib, sensor, xmin, ymin, xmax, ymax) print(('2d box FOV point num: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) utils.draw_nusc_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input()
def demo(): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object')) data_idx = 0 # Load data from dataset objects = dataset.get_label_objects(data_idx) objects[0].print_object() img = dataset.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_height, img_width, img_channel = img.shape print(('Image shape: ', img.shape)) pc_velo = dataset.get_lidar(data_idx)[:,0:3] calib = dataset.get_calibration(data_idx) ## Draw lidar in rect camera coord #print(' -------- LiDAR points in rect camera coordination --------') #pc_rect = calib.project_velo_to_rect(pc_velo) #fig = draw_lidar_simple(pc_rect) #raw_input() # Draw 2d and 3d boxes on image print(' -------- 2D/3D bounding boxes in images --------') show_image_with_boxes(img, objects, calib) raw_input() # Show all LiDAR points. Draw 3d box in LiDAR point cloud print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------') #show_lidar_with_boxes(pc_velo, objects, calib) #raw_input() show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height) raw_input() # Visualize LiDAR points on images print(' -------- LiDAR points projected to image plane --------') show_lidar_on_image(pc_velo, img, calib, img_width, img_height) raw_input() # Show LiDAR points that are in the 3d box print(' -------- LiDAR points in a 3D bounding box --------') box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo) print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(box3droi_pc_velo, fig=fig) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.show(1) raw_input() # UVDepth Image and its backprojection to point clouds print(' -------- LiDAR points in a frustum from a 2D box --------') imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width, img_height, True) imgfov_pts_2d = pts_2d[fov_inds,:] imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo) cameraUVDepth = np.zeros_like(imgfov_pc_rect) cameraUVDepth[:,0:2] = imgfov_pts_2d cameraUVDepth[:,2] = imgfov_pc_rect[:,2] # Show that the points are exactly the same backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth) print(imgfov_pc_velo[0:20]) print(backprojected_pc_velo[0:20]) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(backprojected_pc_velo, fig=fig) raw_input() # Only display those points that fall into 2d box print(' -------- LiDAR points in a frustum from a 2D box --------') xmin,ymin,xmax,ymax = \ objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax boxfov_pc_velo = \ get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax) print(('2d box FOV point num: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input()
def dataset_viz(show_frustum=False): sunrgbd = sunrgbd_object(data_dir) idxs = np.array(range(1, len(sunrgbd) + 1)) np.random.shuffle(idxs) for idx in range(len(sunrgbd)): data_idx = idxs[idx] print('--------------------', data_idx) pc = sunrgbd.get_depth(data_idx) print(pc.shape) # Project points to image calib = sunrgbd.get_calibration(data_idx) uv, d = calib.project_upright_depth_to_image(pc[:, 0:3]) print(uv) print(d) raw_input() import matplotlib.pyplot as plt cmap = plt.cm.get_cmap('hsv', 256) cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255 img = sunrgbd.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i in range(uv.shape[0]): depth = d[i] color = cmap[int(120.0 / depth), :] cv2.circle(img, (int(np.round(uv[i, 0])), int(np.round(uv[i, 1]))), 2, color=tuple(color), thickness=-1) Image.fromarray(img).show() raw_input() # Load box labels objects = sunrgbd.get_label_objects(data_idx) print(objects) raw_input() # Draw 2D boxes on image img = sunrgbd.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i, obj in enumerate(objects): cv2.rectangle(img, (int(obj.xmin), int(obj.ymin)), (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2) cv2.putText(img, '%d %s' % (i, obj.classname), (max(int(obj.xmin), 15), max(int(obj.ymin), 15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2) Image.fromarray(img).show() raw_input() # Draw 3D boxes on depth points box3d = [] ori3d = [] for obj in objects: corners_3d_image, corners_3d = utils.compute_box_3d(obj, calib) ori_3d_image, ori_3d = utils.compute_orientation_3d(obj, calib) print('Corners 3D: ', corners_3d) box3d.append(corners_3d) ori3d.append(ori_3d) raw_input() bgcolor = (0, 0, 0) fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000)) mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], pc[:, 2], mode='point', colormap='gnuplot', figure=fig) mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2) draw_gt_boxes3d(box3d, fig=fig) for i in range(len(ori3d)): ori_3d = ori3d[i] x1, y1, z1 = ori_3d[0, :] x2, y2, z2 = ori_3d[1, :] mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.5, 0.5, 0.5), tube_radius=None, line_width=1, figure=fig) mlab.orientation_axes() for i, obj in enumerate(objects): print('Orientation: ', i, np.arctan2(obj.orientation[1], obj.orientation[0])) print('Dimension: ', i, obj.l, obj.w, obj.h) raw_input() if show_frustum: img = sunrgbd.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i, obj in enumerate(objects): box2d_fov_inds = (uv[:, 0] < obj.xmax) & ( uv[:, 0] >= obj.xmin) & (uv[:, 1] < obj.ymax) & (uv[:, 1] >= obj.ymin) box2d_fov_pc = pc[box2d_fov_inds, :] img2 = np.copy(img) cv2.rectangle(img2, (int(obj.xmin), int(obj.ymin)), (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2) cv2.putText(img2, '%d %s' % (i, obj.classname), (max(int(obj.xmin), 15), max(int(obj.ymin), 15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2) Image.fromarray(img2).show() fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1000, 1000)) mlab.points3d(box2d_fov_pc[:, 0], box2d_fov_pc[:, 1], box2d_fov_pc[:, 2], box2d_fov_pc[:, 2], mode='point', colormap='gnuplot', figure=fig) raw_input()
def extract_proposal_data(idx_filename, split, output_filename, viz=False, perturb_box3d=False, augmentX=1, type_whitelist=['Car'], kitti_path=os.path.join(ROOT_DIR, 'dataset/KITTI/object'), pos_neg_ratio=None, source='label'): ''' Extract point clouds and corresponding annotations in frustums defined generated from 2D bounding boxes Lidar points and 3d boxes are in *rect camera* coord system (as that in 3d box label files) Input: idx_filename: string, each line of the file is a sample ID split: string, either trianing or testing output_filename: string, the name for output .pickle file viz: bool, whether to visualize extracted data perturb_box3d: bool, whether to perturb the box2d (used for data augmentation in train set) augmentX: scalar, how many augmentations to have for each 2D box. type_whitelist: a list of strings, object types we are interested in. Output: None (will write a .pickle file to the disk) ''' # import mayavi.mlab as mlab dataset = kitti_object_avod(kitti_path, split) data_idx_list = [int(line.rstrip()) for line in open(idx_filename)] id_list = [] # int number box2d_list = [] # [xmin,ymin,xmax,ymax] box3d_list = [] # (8,3) array in rect camera coord input_list = [] # channel number = 4, xyz,intensity in rect camera coord label_list = [] # 1 for roi object, 0 for clutter type_list = [] # string e.g. Car heading_list = [] # ry (along y-axis in rect camera coord) radius of # (cont.) clockwise angle from positive x axis in velo coord. box3d_size_list = [] # array of l,w,h frustum_angle_list = [] # angle of 2d box center from pos x-axis roi_feature_list = [] # feature map crop for proposal region # type_idxs = {key: [] for key in type_whitelist} # idxs of each type type_idxs = {key: [] for key in type_whitelist[:3]} # idxs of each type type_idxs['NonCar'] = [] type_idxs['NonPeople'] = [] prop_idx = 0 for data_idx in data_idx_list: print('------------- ', data_idx) calib = dataset.get_calibration(data_idx) # 3 by 4 matrix # ground truth objects = dataset.get_label_objects(data_idx) # proposal boxes from avod if source == 'avod': proposals = dataset.get_proposals(data_idx, rpn_score_threshold=0.1, nms_iou_thres=0.8) else: proposals = [] pc_velo = dataset.get_lidar(data_idx) pc_rect = np.zeros_like(pc_velo) pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3]) pc_rect[:, 3] = pc_velo[:, 3] gt_boxes_xy = [] gt_boxes_3d = [] objects = filter(lambda obj: obj.type in type_whitelist, objects) for obj in objects: _, gt_corners_3d = utils.compute_box_3d(obj, calib.P) gt_boxes_xy.append(gt_corners_3d[:4, [0, 2]]) gt_boxes_3d.append(gt_corners_3d) if source == 'label': # generate proposal from label to ensure recall true_prop = get_proposal_from_label(obj, calib, type_whitelist) proposals.append(true_prop) pos_proposals_in_frame = [] neg_proposals_in_frame = [] for prop_ in proposals: prop_corners_image_2d, prop_corners_3d = utils.compute_box_3d( prop_, calib.P) if prop_corners_image_2d is None: print('skip proposal behind camera') continue prop_box_xy = prop_corners_3d[:4, [0, 2]] # find corresponding label object obj_idx, iou_with_gt = find_match_label(prop_box_xy, gt_boxes_xy) if obj_idx == -1: # non-object obj_type = 'NonObject' gt_box_3d = np.zeros((8, 3)) heading_angle = 0 box3d_size = np.zeros((1, 3)) frustum_angle = 0 neg_proposals_in_frame.append(prop_corners_3d) # get points within proposal box _, prop_inds = extract_pc_in_box3d(pc_rect, prop_corners_3d) pc_in_prop_box = pc_rect[prop_inds, :] # shuffle points order np.random.shuffle(pc_in_prop_box) label = np.zeros((pc_in_prop_box.shape[0])) id_list.append(data_idx) # box2d_list.append(np.array([xmin,ymin,xmax,ymax])) box3d_list.append(gt_box_3d) input_list.append(pc_in_prop_box) label_list.append(label) type_list.append(obj_type) heading_list.append(heading_angle) box3d_size_list.append(box3d_size) frustum_angle_list.append(frustum_angle) roi_feature_list.append(prop_.roi_features) # type_idxs[obj_type].append(prop_idx) if prop_.l > 2 or prop_.w > 2: type_idxs['NonCar'].append(prop_idx) else: type_idxs['NonPeople'].append(prop_idx) prop_idx += 1 else: # only do augmentation on objects for _ in range(augmentX): prop = copy.deepcopy(prop_) if perturb_box3d: prop = random_shift_box3d(prop) prop_corners_image_2d, prop_corners_3d = utils.compute_box_3d( prop, calib.P) if prop_corners_image_2d is None: print('skip proposal behind camera') continue # get points within proposal box _, prop_inds = extract_pc_in_box3d(pc_rect, prop_corners_3d) pc_in_prop_box = pc_rect[prop_inds, :] # shuffle points order np.random.shuffle(pc_in_prop_box) # segmentation label label = np.zeros((pc_in_prop_box.shape[0])) obj = objects[obj_idx] obj_type = obj.type gt_box_3d = gt_boxes_3d[obj_idx] _, inds = extract_pc_in_box3d(pc_in_prop_box, gt_box_3d) label[inds] = 1 # Reject object without points if np.sum(label) == 0: print('Reject object without points') continue pos_proposals_in_frame.append(prop_corners_3d) # Get 3D BOX heading heading_angle = obj.ry # Get 3D BOX size box3d_size = np.array([obj.l, obj.w, obj.h]) # Get frustum angle xmin, ymin = prop_corners_image_2d.min(0) xmax, ymax = prop_corners_image_2d.max(0) box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]) uvdepth = np.zeros((1, 3)) uvdepth[0, 0:2] = box2d_center uvdepth[0, 2] = 20 # some random depth box2d_center_rect = calib.project_image_to_rect(uvdepth) frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2], box2d_center_rect[0, 0]) id_list.append(data_idx) # box2d_list.append(np.array([xmin,ymin,xmax,ymax])) box3d_list.append(gt_box_3d) input_list.append(pc_in_prop_box) label_list.append(label) type_list.append(obj_type) heading_list.append(heading_angle) box3d_size_list.append(box3d_size) frustum_angle_list.append(frustum_angle) roi_feature_list.append(prop.roi_features) type_idxs[obj_type].append(prop_idx) prop_idx += 1 # visualize one proposal if viz and False: import mayavi.mlab as mlab from viz_util import draw_lidar, draw_gt_boxes3d fig = draw_lidar(pc_rect) fig = draw_gt_boxes3d([gt_box_3d], fig, color=(1, 0, 0)) fig = draw_gt_boxes3d([prop_corners_3d], fig, draw_text=False, color=(1, 1, 1)) # roi_feature_map roi_features_size = 7 * 7 * 32 img_roi_features = prop.roi_features[ 0:roi_features_size].reshape((7, 7, -1)) bev_roi_features = prop.roi_features[ roi_features_size:].reshape((7, 7, -1)) img_roi_features = np.amax(img_roi_features, axis=-1) bev_roi_features = np.amax(bev_roi_features, axis=-1) fig1 = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(500, 500)) fig2 = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(500, 500)) mlab.imshow(img_roi_features, colormap='gist_earth', name='img_roi_features', figure=fig1) mlab.imshow(bev_roi_features, colormap='gist_earth', name='bev_roi_features', figure=fig2) # mlab.plot3d([0, box2d_center_rect[0][0]], [0, box2d_center_rect[0][1]], [0, box2d_center_rect[0][2]], color=(1,1,1), tube_radius=None, figure=fig) raw_input() print('%d positive proposal in frame %d' % (len(pos_proposals_in_frame), data_idx)) print('%d negative proposal in frame %d' % (len(neg_proposals_in_frame), data_idx)) # draw all proposal in frame if viz: import mayavi.mlab as mlab from viz_util import draw_lidar, draw_gt_boxes3d fig = draw_lidar(pc_rect) fig = draw_gt_boxes3d(gt_boxes_3d, fig, color=(1, 1, 1)) fig = draw_gt_boxes3d(pos_proposals_in_frame, fig, draw_text=False, color=(1, 0, 0)) fig = draw_gt_boxes3d(neg_proposals_in_frame, fig, draw_text=False, color=(0, 1, 0)) raw_input() if pos_neg_ratio is not None: keep_idxs = balance(type_idxs, pos_neg_ratio) else: keep_idxs = [idx for sublist in type_idxs.values() for idx in sublist] random.shuffle(keep_idxs) id_list = [id_list[i] for i in keep_idxs] box3d_list = [box3d_list[i] for i in keep_idxs] input_list = [input_list[i] for i in keep_idxs] label_list = [label_list[i] for i in keep_idxs] type_list = [type_list[i] for i in keep_idxs] heading_list = [heading_list[i] for i in keep_idxs] box3d_size_list = [box3d_size_list[i] for i in keep_idxs] frustum_angle_list = [frustum_angle_list[i] for i in keep_idxs] roi_feature_list = [roi_feature_list[i] for i in keep_idxs] print_statics(input_list, label_list, type_list, type_whitelist) with open(output_filename, 'wb') as fp: pickle.dump(id_list, fp) pickle.dump(box2d_list, fp) pickle.dump(box3d_list, fp) pickle.dump(input_list, fp) pickle.dump(label_list, fp) pickle.dump(type_list, fp) pickle.dump(heading_list, fp) pickle.dump(box3d_size_list, fp) pickle.dump(frustum_angle_list, fp) pickle.dump(roi_feature_list, fp)
def show_lidar_with_depth(pc_velo, objects, calib, fig, img_fov=False, img_width=None, img_height=None, objects_pred=None, depth=None, cam_img=None, data_idx=0, constraint_box=False, pc_label=False, save=False, fov_restrict=False): """ Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) """ from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d print(("All point num: ", pc_velo.shape[0])) if img_fov: pc_velo_index = get_lidar_index_in_image_fov(pc_velo[:, :3], calib, 0, 0, img_width, img_height) pc_velo = pc_velo[pc_velo_index, :] print(("FOV point num: ", pc_velo.shape)) pc_velo = pc_velo[pc_velo[:, 0] < 70.4] if fov_restrict: x_range = [0, 70.4] y_range = [-40, 40] z_range = [-1, 3] pts_x, pts_y, pts_z = pc_velo[:, 0], pc_velo[:, 1], pc_velo[:, 2] range_flag = (pts_x >= x_range[0]) & (pts_x <= x_range[1]) \ & (pts_y >= y_range[0]) & (pts_y <= y_range[1]) \ & (pts_z >= z_range[0]) & (pts_z <= z_range[1]) pc_velo = pc_velo[range_flag, :] print("pc_velo", pc_velo.shape) draw_lidar(pc_velo, fig=fig, pc_label=pc_label, pts_scale=1.0) color = (0, 1, 0) for obj in objects: if obj.type != "Car" or obj.t[2] > 70.4: continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) print("box3d_pts_3d_velo:") print(box3d_pts_3d_velo) color_list = [(0, 1, 0), (0, 0, 1)] draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color, label=obj.type, color_list=color_list, draw_text=False) if objects_pred is not None: color = (1, 0, 0) for obj in objects_pred: if obj.type != "Car": continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) print("box3d_pts_3d_velo:") print(box3d_pts_3d_velo) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color) # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d( obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] mlab.plot3d( [x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None, line_width=1, figure=fig, ) if (args.save_img): mlab.savefig(args.save_dir + 'lidar/' + str(data_idx).zfill(6) + '.png') mlab.clf() else: mlab.show(1)
color_list.append((0,1,0)) else: color_list.append((1,0,0)) viz_gt_box3d_list.append(gt_box3d_list[i]) if img_id==403: if i>=2: continue cv2.rectangle(img, (int(box2d[0]),int(box2d[1])), (int(box2d[2]),int(box2d[3])), (0,122,122), 3) cv2.putText(img,'%d-%s'%(i,classname),(int(box2d[0]),int(box2d[1])),cv2.FONT_HERSHEY_SIMPLEX,1,(0,122,122),2,cv2.LINE_AA) # For later OpenCV version use LINE_AA Image.fromarray(img).show() print pred2gt_idx_map #gt_box3d_list = [box3d for box3d in gt_box3d_list if np.linalg.norm(box3d[0,:])<=80] #draw_gt_boxes3d(box3d_pred_list, fig, color = (0,1,0), text_scale = (0.2,0.2,0.2), line_width=3) if img_id==403: box3d_pred_list = box3d_pred_list[0:2] draw_gt_boxes3d(box3d_pred_list, fig, color = (0,1,0), text_scale = (0.1,0.1,0.1), line_width=3, color_list=color_list) #draw_gt_boxes3d(box3d_list, fig, color = (0,0,1), draw_text=False, line_width=3) #mlab.orientation_axes() #raw_input() #draw_gt_boxes3d(viz_gt_box3d_list, fig, color = (0,0,1), draw_text=False, line_width=3) #print np.max(pc_upright_camera[:,1]) fig = mlab.figure(figure=None, bgcolor=(0.9,0.9,0.9), fgcolor=None, engine=None, size=(800, 800)) mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.1, figure=fig) axes=np.array([ [0.3,0.,0.,0.], [0.,0.3,0.,0.], [0.,0.,0.3,0.], ],dtype=np.float64) mlab.plot3d([0, axes[0,0]], [0, axes[0,1]], [0, axes[0,2]], color=(1,0,0), tube_radius=None, figure=fig)
calib_filename = os.path.join('./kitti/calib/', '{0}.txt'.format(frame_id)) calib = kitti_util.Calibration(calib_filename) fig = draw_lidar(points) # ground truth obj_labels = obj_utils.read_labels('./kitti/label_2', int(frame_id)) gt_boxes = [] for obj in obj_labels: if obj.type not in ['Car']: continue _, corners = kitti_util.compute_box_3d(obj, calib.P) corners_velo = calib.project_rect_to_velo(corners) gt_boxes.append(corners_velo) fig = draw_gt_boxes3d(gt_boxes, fig, color=(1, 0, 0)) # proposals proposal_objs = load_proposals(frame_id) boxes = [] box_scores = [] for obj in proposal_objs: _, corners = kitti_util.compute_box_3d(obj, calib.P) corners_velo = calib.project_rect_to_velo(corners) boxes.append(corners_velo) box_scores.append(obj.score) bev_boxes = list( map( lambda bs: [bs[0][1][0], bs[0][1][1], bs[0][3][0], bs[0][3][1], bs[1]],
ty += h/2.0 return h,w,l,tx,ty,tz,ry if __name__=='__main__': import mayavi.mlab as mlab sys.path.append(os.path.join(ROOT_DIR, 'mayavi')) from viz_util import draw_lidar, draw_gt_boxes3d median_list = [] dataset = FrustumDataset(1024, split='val', rotate_to_center=True, random_flip=True, random_shift=True) for i in range(len(dataset)): data = dataset[i] print(('Center: ', data[2], \ 'angle_class: ', data[3], 'angle_res:', data[4], \ 'size_class: ', data[5], 'size_residual:', data[6], \ 'real_size:', g_type_mean_size[g_class2type[data[5]]]+data[6])) print(('Frustum angle: ', dataset.frustum_angle_list[i])) median_list.append(np.median(data[0][:,0])) print((data[2], dataset.box3d_list[i], median_list[-1])) box3d_from_label = get_3d_box(class2size(data[5],data[6]), class2angle(data[3], data[4],12), data[2]) ps = data[0] seg = data[1] fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4), fgcolor=None, engine=None, size=(1000, 500)) mlab.points3d(ps[:,0], ps[:,1], ps[:,2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig) mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2, figure=fig) draw_gt_boxes3d([box3d_from_label], fig, color=(1,0,0)) mlab.orientation_axes() raw_input() print(np.mean(np.abs(median_list)))
def extract_roi_seg(idx_filename, split, output_filename, viz, perturb_box2d=False, augmentX=1, type_whitelist=[ 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', 'night_stand', 'bookshelf', 'bathtub' ]): dataset = sunrgbd_object( '/home/haianyt/frustum-pointnets/sunrgbd/sunrgbd_data/matlab/SUNRGBDtoolbox/mysunrgbd', split) data_idx_list = [int(line.rstrip()) for line in open(idx_filename)] id_list = [] # int number box2d_list = [] # [xmin,ymin,xmax,ymax] box3d_list = [] # (8,3) array in upright depth coord input_list = [] # channel number = 6, xyz,rgb in upright depth coord label_list = [] # 1 for roi object, 0 for clutter type_list = [] # string e.g. bed heading_list = [ ] # face of object angle, radius of clockwise angle from positive x axis in upright camera coord box3d_size_list = [] # array of l,w,h frustum_angle_list = [ ] # angle of 2d box center from pos x-axis (clockwise) pos_cnt = 0 all_cnt = 0 #debuglimit = 10 for data_idx in data_idx_list: # debuglimit -=1 # if debuglimit <=0: # break print('------------- ', data_idx) calib = dataset.get_calibration(data_idx) objects = dataset.get_label_objects(data_idx) pc_upright_depth = dataset.get_depth(data_idx) pc_upright_camera = np.zeros_like(pc_upright_depth) pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera( pc_upright_depth[:, 0:3]) pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:] if viz: mlab.points3d(pc_upright_camera[:, 0], pc_upright_camera[:, 1], pc_upright_camera[:, 2], pc_upright_camera[:, 1], mode='point') mlab.orientation_axes() raw_input() img = dataset.get_image(data_idx) img_height, img_width, img_channel = img.shape pc_image_coord, _ = calib.project_upright_depth_to_image( pc_upright_depth) #print('PC image coord: ', pc_image_coord) for obj_idx in range(len(objects)): obj = objects[obj_idx] if obj.classname not in type_whitelist: continue print("object pass the type_whitelist augmentX = ", augmentX) # 2D BOX: Get pts rect backprojected box2d = obj.box2d for _ in range(augmentX): # Augment data by box2d perturbation if perturb_box2d: xmin, ymin, xmax, ymax = random_shift_box2d(box2d) print(xmin, ymin, xmax, ymax) else: xmin, ymin, xmax, ymax = box2d box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & ( pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin) pc_in_box_fov = pc_upright_camera[box_fov_inds, :] # Get frustum angle (according to center pixel in 2D BOX) box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]) uvdepth = np.zeros((1, 3)) uvdepth[0, 0:2] = box2d_center uvdepth[0, 2] = 20 # some random depth box2d_center_upright_camera = calib.project_image_to_upright_camerea( uvdepth) print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera) frustum_angle = -1 * np.arctan2( box2d_center_upright_camera[0, 2], box2d_center_upright_camera[0, 0] ) # angle as to positive x-axis as in the Zoox paper print('Frustum angle: ', frustum_angle) # 3D BOX: Get pts velo in 3d box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib) box3d_pts_3d = calib.project_upright_depth_to_upright_camera( box3d_pts_3d) _, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d) print(len(inds)) label = np.zeros((pc_in_box_fov.shape[0])) label[inds] = 1 # Get 3D BOX heading print('Orientation: ', obj.orientation) print('Heading angle: ', obj.heading_angle) # Get 3D BOX size box3d_size = np.array([2 * obj.l, 2 * obj.w, 2 * obj.h]) print('Box3d size: ', box3d_size) print('Type: ', obj.classname) print('Num of point: ', pc_in_box_fov.shape[0]) # Subsample points.. num_point = pc_in_box_fov.shape[0] if num_point > 2048: choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False) pc_in_box_fov = pc_in_box_fov[choice, :] label = label[choice] # Reject object with too few points if np.sum(label) < 5: continue id_list.append(data_idx) box2d_list.append(np.array([xmin, ymin, xmax, ymax])) box3d_list.append(box3d_pts_3d) input_list.append(pc_in_box_fov) label_list.append(label) type_list.append(obj.classname) heading_list.append(obj.heading_angle) box3d_size_list.append(box3d_size) frustum_angle_list.append(frustum_angle) # collect statistics pos_cnt += np.sum(label) all_cnt += pc_in_box_fov.shape[0] # VISUALIZATION if viz: img2 = np.copy(img) cv2.rectangle(img2, (int(obj.xmin), int(obj.ymin)), (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2) utils.draw_projected_box3d(img2, box3d_pts_2d) Image.fromarray(img2).show() p1 = input_list[-1] seg = label_list[-1] fig = mlab.figure(figure=None, bgcolor=(0.4, 0.4, 0.4), fgcolor=None, engine=None, size=(500, 500)) mlab.points3d(p1[:, 0], p1[:, 1], p1[:, 2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig) mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2) draw_gt_boxes3d([box3d_pts_3d], fig=fig) mlab.orientation_axes() raw_input() print('Average pos ratio: ', pos_cnt / float(all_cnt)) print('Average npoints: ', float(all_cnt) / len(id_list)) utils.save_zipped_pickle([ id_list, box2d_list, box3d_list, input_list, label_list, type_list, heading_list, box3d_size_list, frustum_angle_list ], output_filename)
def show_lidar_with_numpy_boxes(pc_rect, objects, calib, save_figure, save_figure_dir='', img_name='', img_fov=False, img_width=None, img_height=None, color=(1, 1, 1)): ''' Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) ''' if 'mlab' not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d pc_velo = calib.project_rect_to_velo(pc_rect) print(('All point num: ', pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0.5, 0.5, 0.5), fgcolor=None, engine=None, size=(1600, 1000)) if img_fov: pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width, img_height) print(('FOV point num: ', pc_velo.shape[0])) draw_lidar(pc_velo, fig=fig) for obj in objects: # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_numpy_boxes_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_numpy_orientation_3d( obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color, draw_text=False) mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.8, 0.8, 0.8), tube_radius=None, line_width=1, figure=fig) # mlab.show(1) # mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance='auto', figure=fig) mlab.view(azimuth=180, elevation=60, focalpoint=[12.0909996, -1.04700089, 5.03249991], distance=62.0, figure=fig) if save_figure: if save_figure_dir != '': save_figure_dir = os.path.join(root_dir(), save_figure_dir) print(save_figure_dir) if not os.path.exists(save_figure_dir): os.makedirs(save_figure_dir) print("done!!!!") filename = os.path.join(save_figure_dir, img_name) mlab.savefig(filename) time.sleep(0.03)
def demo(): import mayavi.mlab as mlab from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object')) data_idx = 0 # Load data from dataset objects = dataset.get_label_objects(data_idx) objects[0].print_object() img = dataset.get_image(data_idx) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_height, img_width, img_channel = img.shape print(('Image shape: ', img.shape)) pc_velo = dataset.get_lidar(data_idx)[:, 0:3] calib = dataset.get_calibration(data_idx) ## Draw lidar in rect camera coord #print(' -------- LiDAR points in rect camera coordination --------') #pc_rect = calib.project_velo_to_rect(pc_velo) #fig = draw_lidar_simple(pc_rect) #raw_input() # Draw 2d and 3d boxes on image print(' -------- 2D/3D bounding boxes in images --------') show_image_with_boxes(img, objects, calib) raw_input() # Show all LiDAR points. Draw 3d box in LiDAR point cloud print( ' -------- LiDAR points and 3D boxes in velodyne coordinate --------') #show_lidar_with_boxes(pc_velo, objects, calib) #raw_input() show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height) raw_input() # Visualize LiDAR points on images print(' -------- LiDAR points projected to image plane --------') show_lidar_on_image(pc_velo, img, calib, img_width, img_height) raw_input() # Show LiDAR points that are in the 3d box print(' -------- LiDAR points in a 3D bounding box --------') box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo) print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(box3droi_pc_velo, fig=fig) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.show(1) raw_input() # UVDepth Image and its backprojection to point clouds print(' -------- LiDAR points in a frustum from a 2D box --------') imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov( pc_velo, calib, 0, 0, img_width, img_height, True) imgfov_pts_2d = pts_2d[fov_inds, :] imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo) cameraUVDepth = np.zeros_like(imgfov_pc_rect) cameraUVDepth[:, 0:2] = imgfov_pts_2d cameraUVDepth[:, 2] = imgfov_pc_rect[:, 2] # Show that the points are exactly the same backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth) print(imgfov_pc_velo[0:20]) print(backprojected_pc_velo[0:20]) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(backprojected_pc_velo, fig=fig) raw_input() # Only display those points that fall into 2d box print(' -------- LiDAR points in a frustum from a 2D box --------') xmin,ymin,xmax,ymax = \ objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax boxfov_pc_velo = \ get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax) print(('2d box FOV point num: ', boxfov_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(boxfov_pc_velo, fig=fig) mlab.show(1) raw_input()
def extract_roi_seg(idx_filename, split, output_filename, viz, perturb_box2d=False, augmentX=1, type_whitelist=['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']): dataset = sunrgbd_object('/home/rqi/Data/mysunrgbd', split) data_idx_list = [int(line.rstrip()) for line in open(idx_filename)] id_list = [] # int number box2d_list = [] # [xmin,ymin,xmax,ymax] box3d_list = [] # (8,3) array in upright depth coord input_list = [] # channel number = 6, xyz,rgb in upright depth coord label_list = [] # 1 for roi object, 0 for clutter type_list = [] # string e.g. bed heading_list = [] # face of object angle, radius of clockwise angle from positive x axis in upright camera coord box3d_size_list = [] # array of l,w,h frustum_angle_list = [] # angle of 2d box center from pos x-axis (clockwise) pos_cnt = 0 all_cnt = 0 for data_idx in data_idx_list: print('------------- ', data_idx) calib = dataset.get_calibration(data_idx) objects = dataset.get_label_objects(data_idx) pc_upright_depth = dataset.get_depth(data_idx) pc_upright_camera = np.zeros_like(pc_upright_depth) pc_upright_camera[:,0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:,0:3]) pc_upright_camera[:,3:] = pc_upright_depth[:,3:] if viz: mlab.points3d(pc_upright_camera[:,0], pc_upright_camera[:,1], pc_upright_camera[:,2], pc_upright_camera[:,1], mode='point') mlab.orientation_axes() raw_input() img = dataset.get_image(data_idx) img_height, img_width, img_channel = img.shape pc_image_coord,_ = calib.project_upright_depth_to_image(pc_upright_depth) #print('PC image coord: ', pc_image_coord) for obj_idx in range(len(objects)): obj = objects[obj_idx] if obj.classname not in type_whitelist: continue # 2D BOX: Get pts rect backprojected box2d = obj.box2d for _ in range(augmentX): try: # Augment data by box2d perturbation if perturb_box2d: xmin,ymin,xmax,ymax = random_shift_box2d(box2d) print(xmin,ymin,xmax,ymax) else: xmin,ymin,xmax,ymax = box2d box_fov_inds = (pc_image_coord[:,0]<xmax) & (pc_image_coord[:,0]>=xmin) & (pc_image_coord[:,1]<ymax) & (pc_image_coord[:,1]>=ymin) pc_in_box_fov = pc_upright_camera[box_fov_inds,:] # Get frustum angle (according to center pixel in 2D BOX) box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0]) uvdepth = np.zeros((1,3)) uvdepth[0,0:2] = box2d_center uvdepth[0,2] = 20 # some random depth box2d_center_upright_camera = calib.project_image_to_upright_camerea(uvdepth) print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera) frustum_angle = -1 * np.arctan2(box2d_center_upright_camera[0,2], box2d_center_upright_camera[0,0]) # angle as to positive x-axis as in the Zoox paper print('Frustum angle: ', frustum_angle) # 3D BOX: Get pts velo in 3d box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib) box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d) _,inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d) print(len(inds)) label = np.zeros((pc_in_box_fov.shape[0])) label[inds] = 1 # Get 3D BOX heading print('Orientation: ', obj.orientation) print('Heading angle: ', obj.heading_angle) # Get 3D BOX size box3d_size = np.array([2*obj.l,2*obj.w,2*obj.h]) print('Box3d size: ', box3d_size) print('Type: ', obj.classname) print('Num of point: ', pc_in_box_fov.shape[0]) # Subsample points.. num_point = pc_in_box_fov.shape[0] if num_point > 2048: choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False) pc_in_box_fov = pc_in_box_fov[choice,:] label = label[choice] # Reject object with too few points if np.sum(label) < 5: continue id_list.append(data_idx) box2d_list.append(np.array([xmin,ymin,xmax,ymax])) box3d_list.append(box3d_pts_3d) input_list.append(pc_in_box_fov) label_list.append(label) type_list.append(obj.classname) heading_list.append(obj.heading_angle) box3d_size_list.append(box3d_size) frustum_angle_list.append(frustum_angle) # collect statistics pos_cnt += np.sum(label) all_cnt += pc_in_box_fov.shape[0] # VISUALIZATION if viz: img2 = np.copy(img) cv2.rectangle(img2, (int(obj.xmin),int(obj.ymin)), (int(obj.xmax),int(obj.ymax)), (0,255,0), 2) utils.draw_projected_box3d(img2, box3d_pts_2d) Image.fromarray(img2).show() p1 = input_list[-1] seg = label_list[-1] fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4), fgcolor=None, engine=None, size=(500, 500)) mlab.points3d(p1[:,0], p1[:,1], p1[:,2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig) mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2) draw_gt_boxes3d([box3d_pts_3d], fig=fig) mlab.orientation_axes() raw_input() except: pass print('Average pos ratio: ', pos_cnt/float(all_cnt)) print('Average npoints: ', float(all_cnt)/len(id_list)) utils.save_zipped_pickle([id_list,box2d_list,box3d_list,input_list,label_list,type_list,heading_list,box3d_size_list,frustum_angle_list],output_filename)
raw_input() # Show LiDAR points that are in the 3d box print(' -------- LiDAR points in a 3D bounding box --------') box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo) print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) draw_lidar(box3droi_pc_velo, fig=fig) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig) mlab.show(1) raw_input() # UVDepth Image and its backprojection to point clouds print(' -------- LiDAR points in a frustum from a 2D box --------') imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov( pc_velo, calib, 0, 0, img_width, img_height, True) imgfov_pts_2d = pts_2d[fov_inds, :] imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo) cameraUVDepth = np.zeros_like(imgfov_pc_rect) cameraUVDepth[:, 0:2] = imgfov_pts_2d cameraUVDepth[:, 2] = imgfov_pc_rect[:, 2] # Show that the points are exactly the same
def show_lidar_with_boxes(pc_velo, objects, calib, img_fov=False, img_width=None, img_height=None, objects_pred=None, depth=None, cam_img=None, data_idx=None, pseudo_lidar=None): """ Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) """ if "mlab" not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d print(("All point num: ", pc_velo.shape[0])) fig = mlab.figure( figure=None, # bgcolor=(1, 1, 1), # black background bgcolor=(0, 0, 0), # white background fgcolor=None, engine=None, size=(1000, 500) # figure=None ) if img_fov: # filter pcl out of fov pc_velo = get_lidar_in_image_fov(pc_velo[:, 0:3], calib, 0, 0, img_width, img_height) print(pc_velo.shape) print(("FOV point num: ", pc_velo.shape[0])) print("pc_velo", pc_velo.shape) draw_lidar(pc_velo, fig=fig, pts_color=(0, 1, 0)) # show_box_lidar(objects, calib, data_idx, fig) car_obj = [] color = (1, 0, 0) for obj in objects: if obj.type == "DontCare": print("############## Dont care gt: t: {}, (h: {}, l: {}, w: {})". format(obj.t, obj.h, obj.l, obj.w)) continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) if obj.type == "Car": car_obj.append(box3d_pts_3d) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color, data_idx=data_idx, type=obj.type, occlu=obj.occlusion) # Draw depth from ego-vehicle to objects if depth is not None: # import pdb; pdb.set_trace() depth_pt3d = depth_region_pt3d(depth, obj) depth_UVDepth = np.zeros_like(depth_pt3d) depth_UVDepth[:, 0] = depth_pt3d[:, 1] depth_UVDepth[:, 1] = depth_pt3d[:, 0] depth_UVDepth[:, 2] = depth_pt3d[:, 2] print("depth_pt3d:", depth_UVDepth) dep_pc_velo = calib.project_image_to_velo(depth_UVDepth) print("dep_pc_velo:", dep_pc_velo) draw_lidar(dep_pc_velo, fig=fig, pts_color=(1, 1, 1)) # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] mlab.plot3d( [x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None, line_width=1, figure=fig, ) mlab.show()
def show_lidar_with_boxes( pc_velo, objects, calib, img_fov=False, img_width=None, img_height=None, objects_pred=None, depth=None, cam_img=None, ): """ Show all LiDAR points. Draw 3d box in LiDAR point cloud (in velo coord system) """ if "mlab" not in sys.modules: import mayavi.mlab as mlab from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d print(("All point num: ", pc_velo.shape[0])) fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500)) if img_fov: pc_velo = get_lidar_in_image_fov(pc_velo[:, 0:3], calib, 0, 0, img_width, img_height) print(("FOV point num: ", pc_velo.shape[0])) print("pc_velo", pc_velo.shape) draw_lidar(pc_velo, fig=fig) # pc_velo=pc_velo[:,0:3] color = (0, 1, 0) for obj in objects: if obj.type == "Car": # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) print("box3d_pts_3d_velo:") print(box3d_pts_3d_velo) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color) # Draw depth if depth is not None: # import pdb; pdb.set_trace() depth_pt3d = depth_region_pt3d(depth, obj) depth_UVDepth = np.zeros_like(depth_pt3d) depth_UVDepth[:, 0] = depth_pt3d[:, 1] depth_UVDepth[:, 1] = depth_pt3d[:, 0] depth_UVDepth[:, 2] = depth_pt3d[:, 2] print("depth_pt3d:", depth_UVDepth) dep_pc_velo = calib.project_image_to_velo(depth_UVDepth) print("dep_pc_velo:", dep_pc_velo) draw_lidar(dep_pc_velo, fig=fig, pts_color=(0, 0, 255)) # # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d( obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] mlab.plot3d( [x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None, line_width=1, figure=fig, ) if objects_pred is not None: color = (1, 0, 0) for obj in objects_pred: if obj.type != "Car": continue # Draw 3d bounding box box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) print("box3d_pts_3d_velo:") print(box3d_pts_3d_velo) draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color) # Draw heading arrow ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d( obj, calib.P) ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d) x1, y1, z1 = ori3d_pts_3d_velo[0, :] x2, y2, z2 = ori3d_pts_3d_velo[1, :] mlab.plot3d( [x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None, line_width=1, figure=fig, )
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2, figure=fig) mlab.points3d(ps[:, 0], ps[:, 1], ps[:, 2], segp, mode='point', colormap='gnuplot', scale_factor=1, figure=fig) draw_gt_boxes3d([box3d], fig, color=(0, 0, 1), draw_text=False) draw_gt_boxes3d([corners_3d_pred], fig, color=(0, 1, 0), draw_text=False) mlab.points3d(center[0], center[1], center[2], color=(0, 1, 0), mode='sphere', scale_factor=0.4, figure=fig) mlab.orientation_axes() raw_input() print '-----------------------'