Exemplo n.º 1
0
def copy_kitti_native_code(kitti_native_code_copy):
    """Copies and compiles kitti native code.

    It also creates neccessary directories for storing the results
    of the kitti native evaluation code.
    """

    # Only copy if the code has not been already copied over
    if not os.path.exists(kitti_native_code_copy + '/run_make.sh'):

        os.makedirs(kitti_native_code_copy)
        original_kitti_native_code = _sys_init.root_dir() + \
            '/lib/evaluator/offline_eval/kitti_native_eval/'

        # create dir for it first
        dir_util.copy_tree(original_kitti_native_code, kitti_native_code_copy)
        # run the script to compile the c++ code
        script_folder = kitti_native_code_copy
        make_script = script_folder + 'run_make.sh'
        subprocess.call([make_script, script_folder])

    # Set up the results folders if they don't exist
    results_dir = _sys_init.root_dir() + '/experiments/results'
    results_05_dir = _sys_init.root_dir() + '/experiments/results_05_iou'
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)
    if not os.path.exists(results_05_dir):
        os.makedirs(results_05_dir)
Exemplo n.º 2
0
def run_kitti_native_script_with_05_iou(kitti_native_code_copy,
                                        checkpoint_name, score_threshold,
                                        global_step):
    """Runs the kitti native code script."""

    make_script = kitti_native_code_copy + '/run_eval_05_iou.sh'
    script_folder = kitti_native_code_copy

    results_dir = _sys_init.root_dir() + '/experiments/results/'

    results_dir = _sys_init.root_dir() + '/experiments/results_05_iou/'

    # Round this because protobuf encodes default values as full decimal
    score_threshold = round(score_threshold, 3)

    subprocess.call([
        make_script, script_folder,
        str(score_threshold),
        str(global_step),
        str(checkpoint_name),
        str(results_dir)
    ])
Exemplo n.º 3
0
def get_kitti_predictions(score_threshold, globl_epoch=-1):
    # Get available prediction folders
    root_dir = _sys_init.root_dir()
    predictions_root_dir = os.path.join(root_dir, 'experiments', 'predictions')

    # 3D prediction directories
    kitti_predictions_3d_dir = predictions_root_dir + \
        '/kitti_native_eval/' + \
        str(score_threshold) + '/' + \
        str(globl_epoch) + '/data'
    if not os.path.exists(kitti_predictions_3d_dir):
        os.makedirs(kitti_predictions_3d_dir)

    print('3D Detections being saved to:', kitti_predictions_3d_dir)
    return kitti_predictions_3d_dir
Exemplo n.º 4
0
def show_image_with_boxes(img,
                          objects,
                          calib,
                          show3d=True,
                          save_figure=False,
                          save_figure_dir='',
                          img_name=''):
    ''' Show image with 2D bounding boxes '''
    img1 = np.copy(img)  # for 2d bbox
    img2 = img.copy()  # for 3d bbox
    for obj in objects:
        if isinstance(obj, np.ndarray):
            box3d_pts_2d, box3d_pts_3d = utils.compute_numpy_boxes_3d(
                obj, calib.P)
        else:
            if obj.type == 'DontCare': continue
            cv2.rectangle(img1, (int(obj.xmin), int(obj.ymin)),
                          (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2)
            box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        if box3d_pts_2d is not None:
            height, width, _ = img.shape
            # box3d_pts_2d[:, 0] = np.clip(box3d_pts_2d[:, 0], 0, height-1)
            # box3d_pts_2d[:, 1] = np.clip(box3d_pts_2d[:, 1], 0, width-1)
            img2 = utils.draw_projected_box3d(img2, box3d_pts_2d)
    if not isinstance(objects, np.ndarray):
        Image.fromarray(img1).show()
    if show3d:
        # img2_tmp = Image.fromarray(img2)
        # img2_tmp.show()
        cv2.imshow(img_name, img2)
        if save_figure:
            if save_figure_dir != '':
                save_figure_dir = os.path.join(root_dir(), save_figure_dir)
                print(save_figure_dir)
            if not os.path.exists(save_figure_dir):
                os.makedirs(save_figure_dir)
                print("done!!!!")
            filename = os.path.join(save_figure_dir, img_name)
            img2 = cv2.resize(img2, (1242, 376), interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(filename, img2)
        # img2_tmp.close()
        time.sleep(0.03)
Exemplo n.º 5
0
def show_lidar_with_numpy_boxes(pc_rect,
                                objects,
                                calib,
                                save_figure,
                                save_figure_dir='',
                                img_name='',
                                img_fov=False,
                                img_width=None,
                                img_height=None,
                                color=(1, 1, 1)):
    ''' Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) '''
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    pc_velo = calib.project_rect_to_velo(pc_rect)

    print(('All point num: ', pc_velo.shape[0]))
    fig = mlab.figure(figure=None,
                      bgcolor=(0.5, 0.5, 0.5),
                      fgcolor=None,
                      engine=None,
                      size=(1600, 1000))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width,
                                         img_height)
        print(('FOV point num: ', pc_velo.shape[0]))
    draw_lidar(pc_velo, fig=fig)
    for obj in objects:
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_numpy_boxes_3d(obj, calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        # Draw heading arrow
        ori3d_pts_2d, ori3d_pts_3d = utils.compute_numpy_orientation_3d(
            obj, calib.P)
        ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        draw_gt_boxes3d([box3d_pts_3d_velo],
                        fig=fig,
                        color=color,
                        draw_text=False)
        mlab.plot3d([x1, x2], [y1, y2], [z1, z2],
                    color=(0.8, 0.8, 0.8),
                    tube_radius=None,
                    line_width=1,
                    figure=fig)
    # mlab.show(1)
    # mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance='auto', figure=fig)
    mlab.view(azimuth=180,
              elevation=60,
              focalpoint=[12.0909996, -1.04700089, 5.03249991],
              distance=62.0,
              figure=fig)
    if save_figure:
        if save_figure_dir != '':
            save_figure_dir = os.path.join(root_dir(), save_figure_dir)
            print(save_figure_dir)
        if not os.path.exists(save_figure_dir):
            os.makedirs(save_figure_dir)
            print("done!!!!")
        filename = os.path.join(save_figure_dir, img_name)
        mlab.savefig(filename)
    time.sleep(0.03)
Exemplo n.º 6
0
''' Helper class and functions for loading KITTI objects

Author: Charles R. Qi
Date: September 2017
'''
from __future__ import print_function

import os
import sys
import numpy as np
import cv2
import time
from PIL import Image
from _sys_init import root_dir
sys.path.append(os.path.join(root_dir(), 'lib', 'mayavi'))
import lib.dataset.kitti_util as utils
#from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

try:
    raw_input  # Python 2
except NameError:
    raw_input = input  # Python 3


class kitti_object(object):
    '''Load and parse object data into a usable format.'''
    def __init__(self, root_dir, split='training'):
        '''root_dir contains training and testing folders'''
        self.root_dir = root_dir
        self.split = split
        self.split_dir = os.path.join(root_dir, split)
Exemplo n.º 7
0
def validate(dataset, dataloader, model, cfg, epoch=-1):
    # switch to evaluate mode
    logger = logging.getLogger('global')
    torch.cuda.set_device(0)
    model.cuda()
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    model.eval()

    total_rc = 0
    total_gt = 0
    area_extents = np.asarray(cfg['shared']['area_extents']).reshape(-1, 2)
    bev_extents = area_extents[[0, 2]]

    score_threshold = cfg['test_rpn_proposal_cfg']['score_threshold']
    valid_samples = 0
    native_code_copy = _sys_init.root_dir(
    ) + '/experiments/predictions/kitti_native_eval/'
    evaluator_utils.copy_kitti_native_code(native_code_copy)
    predictions_3d_dir = evaluator_utils.get_kitti_predictions(
        score_threshold, epoch)

    logger.info('start validate')
    for iter, _input in enumerate(dataloader):
        gt_boxes = _input[9]
        voxel_with_points = _input[6]
        batch_size = voxel_with_points.shape[0]
        # assert batch_size == 1
        img_ids = _input[10]

        x = {
            'cfg': cfg,
            'image': _input[0],
            'points': _input[1],
            'indices': _input[2],
            'num_pts': _input[3],
            'leaf_out': _input[4],
            'voxel_indices': _input[5],
            'voxel_points': torch.autograd.Variable(_input[6]).cuda(),
            'ground_plane': _input[7],
            'gt_bboxes_2d': _input[8],
            'gt_bboxes_3d': _input[9],
            'num_divisions': _input[11]
        }

        t0 = time.time()
        outputs = model(x)
        outputs = outputs['predict']
        t2 = time.time()
        proposals = outputs[0].data.cpu().numpy()

        if torch.is_tensor(gt_boxes):
            gt_boxes = gt_boxes.cpu().numpy()

        for b_ix in range(batch_size):
            rois_per_points_cloud = proposals[proposals[:, 0] == b_ix]
            if gt_boxes.shape[0] != 0:
                gts_per_points_cloud = gt_boxes[b_ix]

                rois_per_points_cloud_anchor = box_3d_encoder.box_3d_to_anchor(
                    rois_per_points_cloud[:, 1:1 + 7])
                gts_per_points_cloud_anchor = box_3d_encoder.box_3d_to_anchor(
                    gts_per_points_cloud)
                rois_per_points_cloud_bev, _ = anchor_projector.project_to_bev(
                    rois_per_points_cloud_anchor, bev_extents)
                gts_per_points_cloud_bev, _ = anchor_projector.project_to_bev(
                    gts_per_points_cloud_anchor, bev_extents)

                # rpn recall
                num_rc, num_gt = bbox_helper.compute_recall(
                    rois_per_points_cloud_bev, gts_per_points_cloud_bev)
                total_gt += num_gt
                total_rc += num_rc

                if args.visual:
                    calib_dir = os.path.join(args.datadir, 'training/calib',
                                             '%06d.txt' % (img_ids[b_ix]))
                    calib = Calibration(calib_dir)

                    # Show all LiDAR points. Draw 3d box in LiDAR point cloud
                    print(
                        ' -------- LiDAR points and 3D boxes in velodyne coordinate --------'
                    )
                    show_lidar_with_numpy_boxes(x['points'][b_ix, :,
                                                            0:3].numpy(),
                                                gts_per_points_cloud,
                                                calib,
                                                save_figure=False,
                                                color=(1, 1, 1))
                    input()

                    score_filter = rois_per_points_cloud[:,
                                                         -1] > score_threshold
                    print('img: {}, proposals shape:{}'.format(
                        img_ids[b_ix],
                        rois_per_points_cloud[score_filter].shape))

                    img = x['image'][b_ix].numpy() * 255.
                    img = img.astype(np.uint8)
                    img = np.array(np.transpose(img, (1, 2, 0)))
                    show_image_with_boxes(
                        img,
                        rois_per_points_cloud[score_filter, 1:1 + 7],
                        calib,
                        True,
                        save_figure=args.save_as_figure,
                        save_figure_dir=args.figdir,
                        img_name='img_%06d.jpg' % (img_ids[b_ix]))
                    # input()
                    #
                    show_lidar_with_numpy_boxes(
                        x['points'][b_ix, :, 0:3].numpy(),
                        rois_per_points_cloud[score_filter, 1:1 + 7][:10],
                        calib,
                        save_figure=args.save_as_figure,
                        save_figure_dir=args.figdir,
                        img_name='points_%06d.jpg' % (img_ids[b_ix]),
                        color=(1, 1, 1))
                    input()
                    # anchors = outputs[1]
                    # total_anchors, _ = anchors.shape
                    # idx = np.random.choice(total_anchors, 50)
                    # show_lidar_with_numpy_boxes(x['points'][b_ix, :, 0:3].numpy(), anchors[idx, :], calib, save_figure=False,
                    #                             color=(1, 1, 1))
                    # input()

            valid, total_samples = evaluator_utils.save_predictions_in_kitti_format(
                dataset, rois_per_points_cloud[:, 1:], img_ids[b_ix],
                predictions_3d_dir, score_threshold)
            valid_samples += valid
            logger.info('valid samples: %d/%d' %
                        (valid_samples, total_samples))
        logger.info('Test valid instance: [%d/%d] Time: %.3f %d/%d' %
                    (iter, len(dataloader), t2 - t0, total_rc, total_gt))
        log_helper.print_speed(iter + 1, t2 - t0, len(dataloader))

    logger.info('rpn300 recall=%f' % (total_rc / total_gt))
    evaluate_name = dataset.id2names[1] + '_' + dataset.split

    # Create a separate processes to run the native evaluation
    native_eval_proc = Process(target=evaluator_utils.run_kitti_native_script,
                               args=(native_code_copy, evaluate_name,
                                     score_threshold, epoch))
    native_eval_proc_05_iou = Process(
        target=evaluator_utils.run_kitti_native_script_with_05_iou,
        args=(native_code_copy, evaluate_name, score_threshold, epoch))
    # Don't call join on this cuz we do not want to block
    # this will cause one zombie process - should be fixed later.
    native_eval_proc.start()
    native_eval_proc_05_iou.start()
    # evaluator_utils.run_kitti_native_script(native_code_copy, evaluate_name, score_threshold, epoch)
    # evaluator_utils.run_kitti_native_script_with_05_iou(native_code_copy, evaluate_name, score_threshold, epoch)
    return total_rc / total_gt