Ejemplo n.º 1
0
def extract_frustum_data_rgb_detection(det_filename, split, output_filename,
                                       type_whitelist=['Car'],
                                       img_height_threshold=5,
                                       lidar_point_threshold=1):
    ''' Extract point clouds in frustums extruded from 2D detection boxes.
        Update: Lidar points and 3d boxes are in *rect camera* coord system
            (as that in 3d box label files)

    Input:
        det_filename: string, each line is
            img_path typeid confidence xmin ymin xmax ymax
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        type_whitelist: a list of strings, object types we are interested in.
        img_height_threshold: int, neglect image with height lower than that.
        lidar_point_threshold: int, neglect frustum with too few points.
    Output:
        None (will write a .pickle file to the disk)
    '''
    dataset = kitti_object(os.path.join(ROOT_DIR, 'data/kitti'), split=split)
    if det_filename.split('.')[-1] == 'pkl':
        det_id_list, det_type_list, det_box2d_list, det_prob_list = \
            read_det_pkl_file(det_filename)
    else:
        det_id_list, det_type_list, det_box2d_list, det_prob_list = \
            read_det_file(det_filename)
    cache_id = -1
    cache = None

    id_list = []
    type_list = []
    box2d_list = []
    prob_list = []
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    frustum_angle_list = []  # angle of 2d box center from pos x-axis
    calib_list = []

    for det_idx in range(len(det_id_list)):
        data_idx = det_id_list[det_idx]
        print('det idx: %d/%d, data idx: %d' %
              (det_idx, len(det_id_list), data_idx))
        if cache_id != data_idx:
            calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix
            pc_velo = dataset.get_lidar(data_idx)
            pc_rect = np.zeros_like(pc_velo)
            pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
            pc_rect[:, 3] = pc_velo[:, 3]
            img = dataset.get_image(data_idx)
            img_height, img_width, img_channel = img.shape
            _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(
                pc_velo[:, 0:3], calib, 0, 0, img_width, img_height, True)
            cache = [calib, pc_rect, pc_image_coord, img_fov_inds]
            cache_id = data_idx
        else:
            calib, pc_rect, pc_image_coord, img_fov_inds = cache

        if det_type_list[det_idx] not in type_whitelist:
            continue

        # 2D BOX: Get pts rect backprojected
        det_box2d = det_box2d_list[det_idx].copy()
        det_box2d[[0, 2]] = np.clip(det_box2d[[0, 2]], 0, img_width - 1)
        det_box2d[[1, 3]] = np.clip(det_box2d[[1, 3]], 0, img_height - 1)

        xmin, ymin, xmax, ymax = det_box2d
        box_fov_inds = (pc_image_coord[:, 0] < xmax) & \
                       (pc_image_coord[:, 0] >= xmin) & \
                       (pc_image_coord[:, 1] < ymax) & \
                       (pc_image_coord[:, 1] >= ymin)
        box_fov_inds = box_fov_inds & img_fov_inds
        pc_in_box_fov = pc_rect[box_fov_inds, :]

        pc_box_image_coord = pc_image_coord[box_fov_inds, :]

        # Get frustum angle (according to center pixel in 2D BOX)
        box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
        uvdepth = np.zeros((1, 3))
        uvdepth[0, 0:2] = box2d_center
        uvdepth[0, 2] = 20  # some random depth
        box2d_center_rect = calib.project_image_to_rect(uvdepth)
        frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                        box2d_center_rect[0, 0])

        # Pass objects that are too small
        if ymax - ymin < img_height_threshold or xmax - xmin < 1 or \
                len(pc_in_box_fov) < lidar_point_threshold:
            continue

        id_list.append(data_idx)
        type_list.append(det_type_list[det_idx])
        box2d_list.append(np.array([xmin, ymin, xmax, ymax]))
        prob_list.append(det_prob_list[det_idx])
        input_list.append(pc_in_box_fov.astype(np.float32, copy=False))
        frustum_angle_list.append(frustum_angle)
        calib_list.append(calib.calib_dict)
    
    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp, -1)
        pickle.dump(box2d_list, fp, -1)
        pickle.dump(input_list, fp, -1)
        pickle.dump(type_list, fp, -1)
        pickle.dump(frustum_angle_list, fp, -1)
        pickle.dump(prob_list, fp, -1)
        pickle.dump(calib_list, fp, -1)

    print('total_objects %d' % len(id_list))
    print('save in {}'.format(output_filename))
Ejemplo n.º 2
0
def extract_frustum_data(idx_filename, split, output_filename,
                         perturb_box2d=False, augmentX=1, type_whitelist=['Car']):
    ''' Extract point clouds and corresponding annotations in frustums
        defined generated from 2D bounding boxes
        Lidar points and 3d boxes are in *rect camera* coord system
        (as that in 3d box label files)

    Input:
        idx_filename: string, each line of the file is a sample ID
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        viz: bool, whether to visualize extracted data
        perturb_box2d: bool, whether to perturb the box2d
            (used for data augmentation in train set)
        augmentX: scalar, how many augmentations to have for each 2D box.
        type_whitelist: a list of strings, object types we are interested in.
    Output:
        None (will write a .pickle file to the disk)
    '''
    dataset = kitti_object(os.path.join(ROOT_DIR, 'data/kitti'), split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    id_list = []  # int number
    box2d_list = []  # [xmin,ymin,xmax,ymax]
    box3d_list = []  # (8,3) array in rect camera coord
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    label_list = []  # 1 for roi object, 0 for clutter
    type_list = []  # string e.g. Car
    heading_list = []  # ry (along y-axis in rect camera coord) radius of
    # (cont.) clockwise angle from positive x axis in velo coord.
    box3d_size_list = []  # array of l,w,h
    frustum_angle_list = []  # angle of 2d box center from pos x-axis

    gt_box2d_list = []

    calib_list = []

    pos_cnt = 0
    all_cnt = 0
    for data_idx in data_idx_list:
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix
        objects = dataset.get_label_objects(data_idx)
        pc_velo = dataset.get_lidar(data_idx)
        pc_rect = np.zeros_like(pc_velo)
        pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
        pc_rect[:, 3] = pc_velo[:, 3]
        img = dataset.get_image(data_idx)
        img_height, img_width, img_channel = img.shape
        _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:, 0:3],
                                                                 calib, 0, 0, img_width, img_height, True)

        for obj_idx in range(len(objects)):
            if objects[obj_idx].type not in type_whitelist:
                continue

            # 2D BOX: Get pts rect backprojected
            box2d = objects[obj_idx].box2d
            for _ in range(augmentX):
                # Augment data by box2d perturbation
                if perturb_box2d:
                    xmin, ymin, xmax, ymax = random_shift_box2d(box2d, img_height, img_width, 0.1)
                else:
                    xmin, ymin, xmax, ymax = box2d
                box_fov_inds = (pc_image_coord[:, 0] < xmax) & \
                               (pc_image_coord[:, 0] >= xmin) & \
                               (pc_image_coord[:, 1] < ymax) & \
                               (pc_image_coord[:, 1] >= ymin)
                box_fov_inds = box_fov_inds & img_fov_inds
                pc_in_box_fov = pc_rect[box_fov_inds, :]

                pc_box_image_coord = pc_image_coord[box_fov_inds]

                # Get frustum angle (according to center pixel in 2D BOX)
                box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
                uvdepth = np.zeros((1, 3))
                uvdepth[0, 0:2] = box2d_center
                uvdepth[0, 2] = 20  # some random depth
                box2d_center_rect = calib.project_image_to_rect(uvdepth)
                frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                                box2d_center_rect[0, 0])
                # 3D BOX: Get pts velo in 3d box
                obj = objects[obj_idx]
                box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
                _, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
                label = np.zeros((pc_in_box_fov.shape[0]))
                label[inds] = 1

                # Get 3D BOX heading
                heading_angle = obj.ry
                # Get 3D BOX size
                box3d_size = np.array([obj.l, obj.w, obj.h])

                # Reject too far away object or object without points
                if (box2d[3] - box2d[1]) < 25 or np.sum(label) == 0:
                    # print(box2d[3] - box2d[1], np.sum(label))
                    continue

                id_list.append(data_idx)
                box2d_list.append(np.array([xmin, ymin, xmax, ymax]))
                box3d_list.append(box3d_pts_3d)
                input_list.append(pc_in_box_fov.astype(np.float32, copy=False))
                label_list.append(label)
                type_list.append(objects[obj_idx].type)
                heading_list.append(heading_angle)
                box3d_size_list.append(box3d_size)
                frustum_angle_list.append(frustum_angle)

                gt_box2d_list.append(box2d)
                calib_list.append(calib.calib_dict)
                # collect statistics
                pos_cnt += np.sum(label)
                all_cnt += pc_in_box_fov.shape[0]

    print('total_objects %d' % len(id_list))
    print('Average pos ratio: %f' % (pos_cnt / float(all_cnt)))
    print('Average npoints: %f' % (float(all_cnt) / len(id_list)))

    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp, -1)
        pickle.dump(box2d_list, fp, -1)
        pickle.dump(box3d_list, fp, -1)
        pickle.dump(input_list, fp, -1)
        pickle.dump(label_list, fp, -1)
        pickle.dump(type_list, fp, -1)
        pickle.dump(heading_list, fp, -1)
        pickle.dump(box3d_size_list, fp, -1)
        pickle.dump(frustum_angle_list, fp, -1)
        pickle.dump(gt_box2d_list, fp, -1)
        pickle.dump(calib_list, fp, -1)

    print('save in {}'.format(output_filename))
Ejemplo n.º 3
0
def extract_frustum_det_data(idx_filename, split, output_filename, det_filename,
                             perturb_box2d=False, augmentX=1, type_whitelist=['Car']):
    dataset = kitti_object(os.path.join(ROOT_DIR, 'data/kitti'), split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    det_id_list, det_type_list, det_box2d_list, det_prob_list = \
        read_det_file(det_filename)

    all_boxes_2d = {}

    for i, det_idx in enumerate(det_id_list):
        if det_idx not in all_boxes_2d:
            all_boxes_2d[det_idx] = []

        all_boxes_2d[det_idx] += [
            {
                'type': det_type_list[i],
                'box2d': det_box2d_list[i],
                'prob': det_prob_list[i]
            }
        ]

    id_list = []  # int number
    box2d_list = []  # [xmin,ymin,xmax,ymax]
    box3d_list = []  # (8,3) array in rect camera coord
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    label_list = []  # 1 for roi object, 0 for clutter
    type_list = []  # string e.g. Car
    heading_list = []  # ry (along y-axis in rect camera coord) radius of
    # (cont.) clockwise angle from positive x axis in velo coord.
    box3d_size_list = []  # array of l,w,h
    frustum_angle_list = []  # angle of 2d box center from pos x-axis

    gt_box2d_list = []

    calib_list = []

    pos_cnt = 0
    all_cnt = 0
    thresh = 0.5 if 'Car' in type_whitelist else 0.25
    for data_idx in data_idx_list:
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix
        gt_objects = dataset.get_label_objects(data_idx)

        gt_objects, gt_boxes_2d, gt_boxes_3d = extract_boxes(gt_objects, type_whitelist)

        if len(gt_objects) == 0:
            continue

        pc_velo = dataset.get_lidar(data_idx)
        pc_rect = np.zeros_like(pc_velo)
        pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
        pc_rect[:, 3] = pc_velo[:, 3]
        img = dataset.get_image(data_idx)
        img_height, img_width, img_channel = img.shape
        _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:, 0:3],
                                                                 calib, 0, 0, img_width, img_height, True)

        det_objects = all_boxes_2d.get(data_idx)
        if det_objects is None:
            continue

        for obj_idx in range(len(det_objects)):

            cur_obj = det_objects[obj_idx]

            if cur_obj['type'] not in type_whitelist:
                continue

            overlap = bbox_overlaps_2d(cur_obj['box2d'].reshape(-1, 4), gt_boxes_2d)
            overlap = overlap[0]
            max_overlap = overlap.max(0)
            max_idx = overlap.argmax(0)

            if max_overlap < thresh:
                continue

            assign_obj = gt_objects[max_idx]

            # 2D BOX: Get pts rect backprojected
            box2d = cur_obj['box2d']
            for _ in range(augmentX):
                # Augment data by box2d perturbation
                if perturb_box2d:
                    xmin, ymin, xmax, ymax = random_shift_box2d(box2d, img_height, img_width, 0.1)
                else:
                    xmin, ymin, xmax, ymax = box2d
                box_fov_inds = (pc_image_coord[:, 0] < xmax) & \
                               (pc_image_coord[:, 0] >= xmin) & \
                               (pc_image_coord[:, 1] < ymax) & \
                               (pc_image_coord[:, 1] >= ymin)
                box_fov_inds = box_fov_inds & img_fov_inds
                pc_in_box_fov = pc_rect[box_fov_inds, :]

                pc_box_image_coord = pc_image_coord[box_fov_inds]

                # Get frustum angle (according to center pixel in 2D BOX)
                box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
                uvdepth = np.zeros((1, 3))
                uvdepth[0, 0:2] = box2d_center
                uvdepth[0, 2] = 20  # some random depth
                box2d_center_rect = calib.project_image_to_rect(uvdepth)
                frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                                box2d_center_rect[0, 0])
                # 3D BOX: Get pts velo in 3d box
                obj = assign_obj
                box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
                _, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
                label = np.zeros((pc_in_box_fov.shape[0]))
                label[inds] = 1

                # Get 3D BOX heading
                heading_angle = obj.ry
                # Get 3D BOX size
                box3d_size = np.array([obj.l, obj.w, obj.h])
                gt_box2d = obj.box2d
                # Reject too far away object or object without points
                if (gt_box2d[3] - gt_box2d[1]) < 25 or np.sum(label) == 0:
                    # print(gt_box2d[3] - gt_box2d[1], np.sum(label))
                    continue

                id_list.append(data_idx)
                box2d_list.append(np.array([xmin, ymin, xmax, ymax]))
                box3d_list.append(box3d_pts_3d)
                input_list.append(pc_in_box_fov.astype(np.float32, copy=False))
                label_list.append(label)
                type_list.append(obj.type)
                heading_list.append(heading_angle)
                box3d_size_list.append(box3d_size)
                frustum_angle_list.append(frustum_angle)

                gt_box2d_list.append(gt_box2d)
                calib_list.append(calib.calib_dict)
                # collect statistics
                pos_cnt += np.sum(label)
                all_cnt += pc_in_box_fov.shape[0]

    print('total_objects %d' % len(id_list))
    print('Average pos ratio: %f' % (pos_cnt / float(all_cnt)))
    print('Average npoints: %f' % (float(all_cnt) / len(id_list)))

    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp, -1)
        pickle.dump(box2d_list, fp, -1)
        pickle.dump(box3d_list, fp, -1)
        pickle.dump(input_list, fp, -1)
        pickle.dump(label_list, fp, -1)
        pickle.dump(type_list, fp, -1)
        pickle.dump(heading_list, fp, -1)
        pickle.dump(box3d_size_list, fp, -1)
        pickle.dump(frustum_angle_list, fp, -1)
        pickle.dump(gt_box2d_list, fp, -1)
        pickle.dump(calib_list, fp, -1)

    print('save in {}'.format(output_filename))
Ejemplo n.º 4
0
def extract_frustum_data_rgb_detection(idx_filename, split, output_filename, res_label_dir,
                                       type_whitelist=['Car'],
                                       img_height_threshold=5,
                                       lidar_point_threshold=1):
    ''' Extract point clouds in frustums extruded from 2D detection boxes.
        Update: Lidar points and 3d boxes are in *rect camera* coord system
            (as that in 3d box label files)

    Input:
        det_filename: string, each line is
            img_path typeid confidence xmin ymin xmax ymax
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        type_whitelist: a list of strings, object types we are interested in.
        img_height_threshold: int, neglect image with height lower than that.
        lidar_point_threshold: int, neglect frustum with too few points.
    Output:
        None (will write a .pickle file to the disk)
    '''

    dataset = kitti_object(os.path.join(ROOT_DIR, 'data/kitti'), split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    id_list = []
    type_list = []
    box2d_list = []
    prob_list = []
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    frustum_angle_list = []  # angle of 2d box center from pos x-axis
    box3d_pred_list = []

    calib_list = []
    enlarge_box3d_list = []
    enlarge_box3d_size_list = []
    enlarge_box3d_angle_list = []

    for data_idx in data_idx_list:
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix

        pc_velo = dataset.get_lidar(data_idx)
        pc_rect = np.zeros_like(pc_velo)
        pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
        pc_rect[:, 3] = pc_velo[:, 3]
        img = dataset.get_image(data_idx)
        img_height, img_width, img_channel = img.shape
        _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:, 0:3],
                                                                 calib, 0, 0, img_width, img_height, True)

        pc_image_coord = pc_image_coord[img_fov_inds]
        pc_rect = pc_rect[img_fov_inds]

        label_filename = os.path.join(res_label_dir, '%06d.txt' % (data_idx))

        objects = utils.read_label(label_filename)

        for obj_idx in range(len(objects)):
            if objects[obj_idx].type not in type_whitelist:
                continue

            # 2D BOX: Get pts rect backprojected
            box2d = objects[obj_idx].box2d
            xmin, ymin, xmax, ymax = box2d

            obj = objects[obj_idx]

            l, w, h = obj.l, obj.w, obj.h
            cx, cy, cz = obj.t
            ry = obj.ry
            cy = cy - h / 2

            obj_array = np.array([cx, cy, cz, l, w, h, ry])

            box3d_pts_3d = compute_box_3d_obj_array(obj_array)

            ratio = 1.2
            enlarge_obj_array = obj_array.copy()
            enlarge_obj_array[3:6] = enlarge_obj_array[3:6] * ratio
            box3d_pts_3d_l = compute_box_3d_obj_array(enlarge_obj_array)

            _, inds = extract_pc_in_box3d(pc_rect, box3d_pts_3d_l)
            pc_in_cuboid = pc_rect[inds]
            pc_box_image_coord = pc_image_coord[inds]

            box3d_center = enlarge_obj_array[:3]

            frustum_angle = -1 * np.arctan2(box3d_center[2],
                                            box3d_center[0])

            # Pass objects that are too small
            if ymax - ymin < img_height_threshold or xmax - xmin < 1 or \
                    len(pc_in_cuboid) < lidar_point_threshold:
                continue

            id_list.append(data_idx)
            input_list.append(pc_in_cuboid.astype(np.float32, copy=False))
            type_list.append(objects[obj_idx].type)
            frustum_angle_list.append(frustum_angle)
            prob_list.append(obj.score)
            box2d_list.append(box2d)

            box3d_pred_list.append(box3d_pts_3d)

            enlarge_box3d_list.append(box3d_pts_3d_l)
            enlarge_box3d_size_list.append(enlarge_obj_array[3:6])
            enlarge_box3d_angle_list.append(enlarge_obj_array[-1])

            calib_list.append(calib.calib_dict)

    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp, -1)
        pickle.dump(box2d_list, fp, -1)
        pickle.dump(input_list, fp, -1)
        pickle.dump(type_list, fp, -1)
        pickle.dump(frustum_angle_list, fp, -1)
        pickle.dump(prob_list, fp, -1)

        pickle.dump(calib_list, fp, -1)

        pickle.dump(enlarge_box3d_list, fp, -1)
        pickle.dump(enlarge_box3d_size_list, fp, -1)
        pickle.dump(enlarge_box3d_angle_list, fp, -1)

    print(len(id_list))
    print('save in {}'.format(output_filename))
Ejemplo n.º 5
0
def extract_frustum_det_data(idx_filename, split, output_filename, res_label_dir,
                             perturb_box2d=False, augmentX=1, type_whitelist=['Car'], remove_diff=False):
    ''' Extract point clouds and corresponding annotations in frustums
        defined generated from 2D bounding boxes
        Lidar points and 3d boxes are in *rect camera* coord system
        (as that in 3d box label files)

    Input:
        idx_filename: string, each line of the file is a sample ID
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        viz: bool, whether to visualize extracted data
        perturb_box2d: bool, whether to perturb the box2d
            (used for data augmentation in train set)
        augmentX: scalar, how many augmentations to have for each 2D box.
        type_whitelist: a list of strings, object types we are interested in.
    Output:
        None (will write a .pickle file to the disk)
    '''
    dataset = kitti_object(os.path.join(ROOT_DIR, 'data/kitti'), split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    id_list = []  # int number
    box3d_list = []  # (8,3) array in rect camera coord
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    label_list = []  # 1 for roi object, 0 for clutter
    type_list = []  # string e.g. Car
    heading_list = []  # ry (along y-axis in rect camera coord) radius of
    # (cont.) clockwise angle from positive x axis in velo coord.
    box3d_size_list = []  # array of l,w,h
    frustum_angle_list = []  # angle of 2d box center from pos x-axis

    gt_box2d_list = []
    calib_list = []

    enlarge_box3d_list = []
    enlarge_box3d_size_list = []
    enlarge_box3d_angle_list = []

    pos_cnt = 0
    all_cnt = 0
    thresh = 0.5 if 'Car' in type_whitelist else 0.25

    for data_idx in data_idx_list:
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix

        # objects = dataset.get_label_objects(data_idx)
        gt_objects = dataset.get_label_objects(data_idx)
        gt_objects, gt_boxes_2d, gt_boxes_3d = extract_boxes(
            gt_objects, type_whitelist, remove_diff)

        if len(gt_objects) == 0:
            continue

        pc_velo = dataset.get_lidar(data_idx)
        pc_rect = np.zeros_like(pc_velo)
        pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
        pc_rect[:, 3] = pc_velo[:, 3]
        img = dataset.get_image(data_idx)
        img_height, img_width, img_channel = img.shape
        _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:, 0:3],
                                                                 calib, 0, 0, img_width, img_height, True)

        pc_rect = pc_rect[img_fov_inds, :]
        pc_image_coord = pc_image_coord[img_fov_inds]

        label_filename = os.path.join(res_label_dir, '%06d.txt' % (data_idx))

        objects = utils.read_label(label_filename)

        for obj_idx in range(len(objects)):
            if objects[obj_idx].type not in type_whitelist:
                continue

            obj = objects[obj_idx]

            l, w, h = obj.l, obj.w, obj.h
            cx, cy, cz = obj.t
            ry = obj.ry
            cy = cy - h / 2

            obj_array = np.array([cx, cy, cz, l, w, h, ry])
            ratio = 1.2
            enlarge_obj_array = obj_array.copy()
            enlarge_obj_array[3:6] = enlarge_obj_array[3:6] * ratio

            overlap = rbbox_iou_3d(obj_array.reshape(-1, 7), gt_boxes_3d)
            overlap = overlap[0]
            max_overlap = overlap.max(0)
            max_idx = overlap.argmax(0)
            # print(max_overlap)
            if max_overlap < thresh:
                continue

            gt_obj = gt_objects[max_idx]
            gt_box2d = gt_objects[max_idx].box2d

            l, w, h = gt_obj.l, gt_obj.w, gt_obj.h
            cx, cy, cz = gt_obj.t
            ry = gt_obj.ry
            cy = cy - h / 2

            gt_obj_array = np.array([cx, cy, cz, l, w, h, ry])

            box3d_pts_3d = compute_box_3d_obj_array(gt_obj_array)

            for _ in range(augmentX):

                if perturb_box2d:
                    # print(box3d_align)

                    enlarge_obj_array = random_shift_rotate_box3d(
                        enlarge_obj_array, 0.05)
                    box3d_corners_enlarge = compute_box_3d_obj_array(
                        enlarge_obj_array)

                else:
                    box3d_corners_enlarge = compute_box_3d_obj_array(
                        enlarge_obj_array)

                _, inds = extract_pc_in_box3d(pc_rect, box3d_corners_enlarge)

                pc_in_cuboid = pc_rect[inds]
                # pc_box_image_coord = pc_image_coord[inds]

                _, inds = extract_pc_in_box3d(pc_in_cuboid, box3d_pts_3d)

                label = np.zeros((pc_in_cuboid.shape[0]))
                label[inds] = 1

                # _, inds = extract_pc_in_box3d(pc_rect, box3d_pts_3d)

                # print(np.sum(label), np.sum(inds))

                # Get 3D BOX heading
                heading_angle = gt_obj.ry
                # Get 3D BOX size
                box3d_size = np.array([gt_obj.l, gt_obj.w, gt_obj.h])

                # Reject too far away object or object without points
                if np.sum(label) == 0:
                    continue

                box3d_center = enlarge_obj_array[:3]

                frustum_angle = -1 * np.arctan2(box3d_center[2],
                                                box3d_center[0])

                id_list.append(data_idx)
                box3d_list.append(box3d_pts_3d)
                input_list.append(pc_in_cuboid)
                label_list.append(label)
                type_list.append(objects[obj_idx].type)
                heading_list.append(heading_angle)
                box3d_size_list.append(box3d_size)
                frustum_angle_list.append(frustum_angle)

                gt_box2d_list.append(gt_box2d)
                calib_list.append(calib.calib_dict)
                enlarge_box3d_list.append(box3d_corners_enlarge)
                enlarge_box3d_size_list.append(enlarge_obj_array[3:6])
                enlarge_box3d_angle_list.append(enlarge_obj_array[-1])
                # collect statistics
                pos_cnt += np.sum(label)
                all_cnt += pc_in_cuboid.shape[0]

    print('total_objects %d' % len(id_list))
    print('Average pos ratio: %f' % (pos_cnt / float(all_cnt)))
    print('Average npoints: %f' % (float(all_cnt) / len(id_list)))

    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp, -1)
        pickle.dump(box3d_list, fp, -1)
        pickle.dump(input_list, fp, -1)
        pickle.dump(label_list, fp, -1)
        pickle.dump(type_list, fp, -1)
        pickle.dump(heading_list, fp, -1)
        pickle.dump(box3d_size_list, fp, -1)
        pickle.dump(frustum_angle_list, fp, -1)
        pickle.dump(gt_box2d_list, fp, -1)
        pickle.dump(calib_list, fp, -1)
        pickle.dump(enlarge_box3d_list, fp, -1)
        pickle.dump(enlarge_box3d_size_list, fp, -1)
        pickle.dump(enlarge_box3d_angle_list, fp, -1)

    print('save in {}'.format(output_filename))
Ejemplo n.º 6
0
def extract_frustum_data(
    data_name,
    object_i,
    dataset,
    classes_mapper,
    perturb_box2d=False,
    augmentX=1,
    type_whitelist=['Car'],
):
    ''' Extract point clouds and corresponding annotations in frustums
        defined generated from 2D bounding boxes
        Lidar points and 3d boxes are in *rect camera* coord system
        (as that in 3d box label files)

    Input:
        data_names_lists: names list
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        viz: bool, whether to visualize extracted data
        perturb_box2d: bool, whether to perturb the box2d
            (used for data augmentation in train set)
        augmentX: scalar, how many augmentations to have for each 2D box.
        type_whitelist: a list of strings, object types we are interested in.
    Output:
        None (will write a .pickle file to the disk)
    '''
    data = {}
    # id_i = []
    # box2d_i = []  # [xmin,ymin,xmax,ymax]
    # box3d_i = []  # (8,3) array in rect camera coord
    # input_i = []  # channel number = 4, xyz,intensity in rect camera coord
    # label_i = []  # 1 for roi object, 0 for clutter
    # type_i = []  # string e.g. Car
    # heading_i = []  # ry (along y-axis in rect camera coord) radius of
    # # (cont.) clockwise angle from positive x axis in velo coord.
    # box3d_size_i = []  # array of l,w,h
    # frustum_angle_i = []  # angle of 2d box center from pos x-axis
    # gt_box2d_i = []
    # calib_i = []

    # print('------------- ', data_name)
    calib = dataset.get_calibration(data_name)  # 3 by 4 matrix
    # objects = dataset.get_label_objects(data_name)
    pc_velo = dataset.get_lidar(data_name)
    pc_rect = np.zeros_like(pc_velo)
    pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
    pc_rect[:, 3] = pc_velo[:, 3]
    img = dataset.get_image(data_name)
    if img is None:
        print('SKIP IMAGE!')
        return None
    img_height, img_width, img_channel = img.shape
    _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(
        pc_velo[:, 0:3], calib, 0, 0, img_width, img_height, True)

    # 2D BOX: Get pts rect backprojected
    box2d = object_i.box2d
    for _ in range(augmentX):
        # Augment data by box2d perturbation
        if perturb_box2d:
            xmin, ymin, xmax, ymax = random_shift_box2d(
                box2d, img_height, img_width, 0.1)
        else:
            xmin, ymin, xmax, ymax = box2d
        box_fov_inds = (pc_image_coord[:, 0] < xmax) & \
                    (pc_image_coord[:, 0] >= xmin) & \
                    (pc_image_coord[:, 1] < ymax) & \
                    (pc_image_coord[:, 1] >= ymin)
        box_fov_inds = box_fov_inds & img_fov_inds
        pc_in_box_fov = pc_rect[box_fov_inds, :]

        pc_box_image_coord = pc_image_coord[box_fov_inds]

        # Get frustum angle (according to center pixel in 2D BOX)
        box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
        uvdepth = np.zeros((1, 3))
        uvdepth[0, 0:2] = box2d_center
        uvdepth[0, 2] = 20  # some random depth
        box2d_center_rect = calib.project_image_to_rect(uvdepth)
        frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                        box2d_center_rect[0, 0])
        # 3D BOX: Get pts velo in 3d box
        obj = object_i
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        _, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
        label = np.zeros((pc_in_box_fov.shape[0]))
        label[inds] = 1

        # Get 3D BOX heading
        heading_angle = obj.ry
        # Get 3D BOX size
        box3d_size = np.array([obj.l, obj.w, obj.h])

        # Reject too far away object or object without points
        if np.sum(label) == 0:
            # print(box2d[3] - box2d[1], np.sum(label))
            return None

        id_i = data_name
        box2d_i = np.array([xmin, ymin, xmax, ymax])
        box3d_i = box3d_pts_3d
        input_i = pc_in_box_fov.astype(np.float32, copy=False)
        label_i = label
        type_i = object_i.type
        heading_i = heading_angle
        box3d_size_i = box3d_size
        frustum_angle_i = frustum_angle

        gt_box2d_i = box2d
        calib_i = calib.calib_dict
        data = {
            'id_i': id_i,
            'box2d_i': box2d_i,
            'box3d_i': box3d_i,
            'input_i': input_i,
            'label_i': label_i,
            'type_i': classes_mapper[type_i],
            'heading_i': heading_i,
            'box3d_size_i': box3d_size_i,
            'frustum_angle_i': frustum_angle_i,
            'gt_box2d_i': gt_box2d_i,
            'calib_i': calib_i
        }
    return data