コード例 #1
0
ファイル: test.py プロジェクト: wx-b/VR3Dense
    zoom_diff_range = list(np.arange(0.2, 0.257, 0.001)) + list(
        np.arange(0.257, 0.2, -0.005))
    # iterate through all files
    for idx, pc_filename in enumerate(pc_filenames):
        # read point-cloud
        velo_pc = read_velo_bin(pc_filename)

        # read corresponding image
        fname, file_ext = os.path.splitext(pc_filename)
        fname = fname.split('/')[-1]
        img_fname = os.path.join(args.img_dir, fname + '.png')
        img_bgr = cv2.imread(img_fname)
        img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)

        # perform prediction
        pred_tuple, dt = trainer.predict(velo_pc, img_rgb)
        if args.dense_depth:
            label_dict, dense_depth = pred_tuple

        # tracking
        label_tracked_dict = []
        if TRACKING == True:
            # get detection list for mot_tracker
            mot_det = []
            mot_other_info = []
            for label_ in label_dict:
                det = [
                    label_['h'], label_['w'], label_['l'], label_['x'],
                    label_['y'], label_['z'], label_['yaw']
                ]
                other_info = (label_['class'], label_['conf'])
コード例 #2
0
                      train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only)

    # show 100 samples
    cv2.namedWindow('VR3Dense', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('VR3Dense', 800, 1440)
    for i in range(100):
        sample = trainer.dataset[i]
        ## get true labels visualization
        pc_bbox_img_true = draw_point_cloud_w_bbox(sample['cloud'], sample['label_dict'], \
                                                    xlim=trainer.xlim, ylim=trainer.ylim, zlim=trainer.zlim)
        pc_bbox_img_true_bgr = cv2.cvtColor(pc_bbox_img_true,
                                            cv2.COLOR_RGB2BGR)

        ## get predicted labels visualization
        # perform prediction
        pred_tuple, dt = trainer.predict(sample['cloud'], sample['left_image'])
        if args.dense_depth:
            label_dict, dense_depth = pred_tuple

        # get visualization
        pc_bbox_img_pred = draw_point_cloud_w_bbox(sample['cloud'], label_dict, \
                                                    xlim=trainer.xlim, ylim=trainer.ylim, zlim=trainer.zlim)
        pc_bbox_img_pred_bgr = cv2.cvtColor(pc_bbox_img_pred,
                                            cv2.COLOR_RGB2BGR)

        # visualization image
        img_viz = cv2.vconcat([pc_bbox_img_true_bgr, pc_bbox_img_pred_bgr])
        cv2.line(
            img_viz, (0, pc_bbox_img_true_bgr.shape[0]),
            (pc_bbox_img_true_bgr.shape[1] - 1, pc_bbox_img_true_bgr.shape[0]),
            color=(255, 255, 255),