Beispiel #1
0
def generate_rpn_training_labels(pts_rect, gt_boxes3d):
    cls_label = np.zeros((pts_rect.shape[0]), dtype=np.int32)
    reg_label = np.zeros((pts_rect.shape[0], 7),
                         dtype=np.float32)  # dx, dy, dz, ry, h, w, l
    gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, rotate=True)
    extend_gt_boxes3d = kitti_utils.enlarge_box3d(gt_boxes3d, extra_width=0.2)
    extend_gt_corners = kitti_utils.boxes3d_to_corners3d(extend_gt_boxes3d,
                                                         rotate=True)

    for k in range(gt_boxes3d.shape[0]):
        box_corners = gt_corners[k]
        fg_pt_flag = in_hull(pts_rect, box_corners)
        fg_pts_rect = pts_rect[fg_pt_flag]
        cls_label[fg_pt_flag] = 1

        # enlarge the bbox3d, ignore nearby points
        extend_box_corners = extend_gt_corners[k]
        fg_enlarge_flag = in_hull(pts_rect, extend_box_corners)
        ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)
        cls_label[ignore_flag] = -1

        # pixel offset of object center
        center3d = gt_boxes3d[k][0:3].copy()  # (x, y, z)
        center3d[1] -= gt_boxes3d[k][3] / 2
        reg_label[
            fg_pt_flag, 0:
            3] = center3d - fg_pts_rect  # Now y is the true center of 3d box 20180928

        # size and angle encoding
        reg_label[fg_pt_flag, 3] = gt_boxes3d[k][3]  # h
        reg_label[fg_pt_flag, 4] = gt_boxes3d[k][4]  # w
        reg_label[fg_pt_flag, 5] = gt_boxes3d[k][5]  # l
        reg_label[fg_pt_flag, 6] = gt_boxes3d[k][6]  # ry

    return cls_label, reg_label
 def aug_roi_by_noise(self, roi_info):
     """
     add noise to original roi to get aug_box3d
     :param roi_info:
     :return:
     """
     roi_box3d, gt_box3d = roi_info['roi_box3d'], roi_info['gt_box3d']
     original_iou = roi_info['iou3d']
     temp_iou = cnt = 0
     pos_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
     gt_corners = kitti_utils.boxes3d_to_corners3d(gt_box3d.reshape(-1, 7))
     aug_box3d = roi_box3d
     while temp_iou < pos_thresh and cnt < 10:
         if roi_info['type'] == 'gt':
             aug_box3d = self.random_aug_box3d(roi_box3d)  # GT, must random
         else:
             if np.random.rand() < 0.2:
                 aug_box3d = roi_box3d  # p=0.2 to keep the original roi box
             else:
                 aug_box3d = self.random_aug_box3d(roi_box3d)
         aug_corners = kitti_utils.boxes3d_to_corners3d(aug_box3d.reshape(-1, 7))
         iou3d = kitti_utils.get_iou3d(aug_corners, gt_corners)
         temp_iou = iou3d[0][0]
         cnt += 1
         if original_iou < pos_thresh:  # original bg, break
             break
     return aug_box3d
 def aug_roi_by_noise_batch(self, roi_boxes3d, gt_boxes3d, aug_times=10):
     """
     :param roi_boxes3d: (N, 7)
     :param gt_boxes3d: (N, 7)
     :return:
     """
     iou_of_rois = np.zeros(roi_boxes3d.shape[0], dtype=np.float32)
     for k in range(roi_boxes3d.__len__()):
         temp_iou = cnt = 0
         roi_box3d = roi_boxes3d[k]
         gt_box3d = gt_boxes3d[k]
         pos_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
         gt_corners = kitti_utils.boxes3d_to_corners3d(gt_box3d.reshape(1, 7))
         aug_box3d = roi_box3d
         while temp_iou < pos_thresh and cnt < aug_times:
             if np.random.rand() < 0.2:
                 aug_box3d = roi_box3d  # p=0.2 to keep the original roi box
             else:
                 aug_box3d = self.random_aug_box3d(roi_box3d)
             aug_corners = kitti_utils.boxes3d_to_corners3d(aug_box3d.reshape(1, 7))
             iou3d = kitti_utils.get_iou3d(aug_corners, gt_corners)
             temp_iou = iou3d[0][0]
             cnt += 1
         roi_boxes3d[k] = aug_box3d
         iou_of_rois[k] = temp_iou
     return roi_boxes3d, iou_of_rois
def save_kitti_format(calib, bbox3d, obj_list, img_shape, save_fp):
    corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
    img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)

    img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
    img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
    img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
    img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)

    # Discard boxes that are larger than 80% of the image width OR height
    img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
    img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
    box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)

    for k in range(bbox3d.shape[0]):
        if box_valid_mask[k] == 0:
            continue
        x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
        beta = np.arctan2(z, x)
        alpha = -np.sign(beta) * np.pi / 2 + beta + ry

        print('%s %.2f %d %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
              (args.class_name, obj_list[k].trucation, int(obj_list[k].occlusion), alpha, img_boxes[k, 0], img_boxes[k, 1],
               img_boxes[k, 2], img_boxes[k, 3],
               bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
               bbox3d[k, 6]), file=save_fp)
Beispiel #5
0
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
    corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
    img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)

    img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
    img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
    img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
    img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)

    img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
    img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
    box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)

    kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
    with open(kitti_output_file, 'w') as f:
        for k in range(bbox3d.shape[0]):
            if box_valid_mask[k] == 0:
                continue
            x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
            beta = np.arctan2(z, x)
            alpha = -np.sign(beta) * np.pi / 2 + beta + ry
            ##### cfg.CLASSES 대신에 bbox3d[k, 7]의 string version를 넣어야한다
            print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
                  (cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
                   bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
                   bbox3d[k, 6], scores[k]), file=f)
Beispiel #6
0
def dist_to_boxes(points, boxes):
    """
    Calculates combined distance for each point to all boxes
    :param points: (N, 3)
    :param boxes: (N, 7) [x, y, z, h, w, l, ry]
    :return: distances_array: (M) torch.Tensor of [(N), (N), ...] distances
    """
    distances_array = torch.Tensor([])
    box_corners = kitti_utils.boxes3d_to_corners3d(boxes)

    for box in box_corners:
        minX = min(box[:, 0])
        minY = min(box[:, 1])
        minZ = min(box[:, 2])
        maxX = max(box[:, 0])
        maxY = max(box[:, 1])
        maxZ = max(box[:, 2])
        centroid = np.array([(maxX + minX) / 2, (maxY + minY) / 2,
                             (maxZ + minZ) / 2])
        dists_to_curr_box = dist_to_box_centroid(
            torch.from_numpy(points),
            torch.from_numpy(centroid)).reshape(1, len(points))
        distances_array = torch.cat(
            (distances_array.float(), dists_to_curr_box.float()), 0)

    return distances_array
def save_argo_format(sample_id, bbox3d, argo_output_dir, scores,
                     lidar_idx_table):
    data = []
    corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
    argo_output_file = os.path.join(
        argo_output_dir, lidar_idx_table['%06d' % sample_id] + '.json')
    with open(argo_output_file, 'w') as f:
        for k in range(bbox3d.shape[0]):
            temp = {}
            temp['center'] = {
                'x': float(bbox3d[k, 2]),
                'y': -float(bbox3d[k, 0]),
                'z': -float(bbox3d[k, 1])
            }
            temp['length'] = float(bbox3d[k, 5])
            temp['width'] = float(bbox3d[k, 4])
            temp['height'] = float(bbox3d[k, 3])
            q = pyquaternion.Quaternion(axis=(0.0, 0.0, 1.0),
                                        radians=(np.pi / 2. - bbox3d[k, 6]))
            temp['rotation'] = {'w': q.w, 'x': q.x, 'y': q.y, 'z': q.z}
            temp['score'] = float(scores[k])
            temp['label_class'] = cfg.CLASSES

            data.append(temp)
        json.dump(data, f)
def save_kitti_format(sample_id, bbox3d, kitti_output_dir, scores,
                      lidar_idx_table):
    corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
    print("--------------------")
    kitti_output_file = os.path.join(
        kitti_output_dir, lidar_idx_table['%06d' % sample_id] + '.txt')
    with open(kitti_output_file, 'w') as f:
        print(f)
        for k in range(bbox3d.shape[0]):
            x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
            beta = np.arctan2(z, x)
            alpha = -np.sign(beta) * np.pi / 2 + beta + ry
            print('%s %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
                  (cfg.CLASSES, bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5],
                   bbox3d[k, 2], -bbox3d[k, 0], -bbox3d[k, 1],
                   (np.pi / 2. - bbox3d[k, 6]), scores[k]),
                  file=f)
    def visualize_lidar_plane(self, bbox3d, frame_id):
        marker_array = MarkerArray()
        marker = Marker()
        marker.header.frame_id = frame_id
        marker.type = marker.LINE_LIST
        marker.action = marker.ADD
        marker.header.stamp = rospy.Time.now()

        # marker scale (scale y and z not used due to being linelist)
        marker.scale.x = 0.08
        # marker color
        marker.color.a = 1.0
        marker.color.r = 1.0
        marker.color.g = 1.0
        marker.color.b = 0.0

        marker.pose.position.x = 0.0
        marker.pose.position.y = 0.0
        marker.pose.position.z = 0.0

        marker.pose.orientation.x = 0.0
        marker.pose.orientation.y = 0.0
        marker.pose.orientation.z = 0.0
        marker.pose.orientation.w = 1.0
        marker.points = []
        corner_for_box_list = [0, 1, 0, 3, 2, 3, 2, 1, 4, 5, 4, 7, 6, 7, 6, 5, 3, 7, 0, 4, 1, 5, 2, 6]
        corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)  # (N,8,3)
        for box_nr in range(corners3d.shape[0]):
            box3d_pts_3d_velo = corners3d[box_nr]  # (8,3)
            for corner in corner_for_box_list:
                p = np.array(box3d_pts_3d_velo[corner, 0:4])
                transformed_p = transform_point(p, np.linalg.inv(self.Tr_velo_kitti_cam))
                p = Point()
                p.x = transformed_p[0]
                p.y = transformed_p[1]
                p.z = transformed_p[2]
                marker.points.append(p)
        marker_array.markers.append(marker)

        id = 0
        for m in marker_array.markers:
            m.id = id
            id += 1
        self.mk_pub.publish(marker_array)
        marker_array.markers = []
        pass
Beispiel #10
0
        for label in labels:
            x_list.append(label.pos[0])
            y_list.append(label.pos[1])
            z_list.append(label.pos[2])
            l_list.append(label.l)
            w_list.append(label.w)
            h_list.append(label.h)
            ry_list.append(label.ry)
        points = align_img_and_pc(img_dir, pc_dir, calib_dir)
        # print("numpoints: ", len(points))
        num_pts_per_scene.append(len(points))

        # Get the foreground and background label
        bboxes3d = kitti_utils.objs_to_boxes3d(labels)
        # print("Number of bboxes: ",len(bboxes3d))
        bboxes3d_rotated_corners = kitti_utils.boxes3d_to_corners3d(bboxes3d)
        box3d_roi_inds_overall = None
        sub_box3d_roi_inds_overall = None
        valid_labels = []
        for i, bbox3d_corners in enumerate(bboxes3d_rotated_corners):
            # print("bboxes3d_rotated_corners: ", bboxes3d_rotated_corners[i])
            box3d_roi_inds = kitti_utils.in_hull(points[:, :3], bbox3d_corners)
            # box3d_roi_inds = kitti_utils.in_hull(bbox3d_corners[:,:3], bbox3d_corners)
            # print("xmin: ", np.min(points[:,0]), " xmax: ", np.max(points[:,0]))
            # print("ymin: ", np.min(points[:,1]), " ymax: ", np.max(points[:,1]))
            # print("zmin: ", np.min(points[:,2]), " zmax: ", np.max(points[:,2]))
            # pc_filter = kitti_utils.extract_pc_in_box3d(points[:,:3], bbox3d_corners)
            # sub_pc_filter = kitti_utils.extract_pc_in_box3d(points[:,:3], bbox3d_corners)
            # print("pc_filter.shape: ", pc_filter[0].shape)
            # print("interested indices shape: ", box3d_roi_inds.shape)
            # print("interested indices: ", box3d_roi_inds)
    def get_rcnn_training_sample_batch(self, index):
        sample_id = int(self.sample_id_list[index])
        rpn_xyz, rpn_features, rpn_intensity, seg_mask = \
            self.get_rpn_features(self.rcnn_training_feature_dir, sample_id)

        # load rois and gt_boxes3d for this sample
        roi_file = os.path.join(self.rcnn_training_roi_dir, '%06d.txt' % sample_id)
        roi_obj_list = kitti_utils.get_objects_from_label(roi_file)
        roi_boxes3d = kitti_utils.objs_to_boxes3d(roi_obj_list)
        # roi_scores = kitti_utils.objs_to_scores(roi_obj_list)

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

        # calculate original iou
        iou3d = kitti_utils.get_iou3d(kitti_utils.boxes3d_to_corners3d(roi_boxes3d),
                                      kitti_utils.boxes3d_to_corners3d(gt_boxes3d))
        max_overlaps, gt_assignment = iou3d.max(axis=1), iou3d.argmax(axis=1)
        max_iou_of_gt, roi_assignment = iou3d.max(axis=0), iou3d.argmax(axis=0)
        roi_assignment = roi_assignment[max_iou_of_gt > 0].reshape(-1)

        # sample fg, easy_bg, hard_bg
        fg_rois_per_lidar = int(np.round(cfg.RCNN.FG_RATIO * cfg.RCNN.ROI_PER_lidar))
        fg_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
        fg_inds = np.nonzero(max_overlaps >= fg_thresh)[0]
        fg_inds = np.concatenate((fg_inds, roi_assignment), axis=0)  # consider the roi which has max_overlaps with gt as fg

        easy_bg_inds = np.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH_LO))[0]
        hard_bg_inds = np.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH) &
                                  (max_overlaps >= cfg.RCNN.CLS_BG_THRESH_LO))[0]

        fg_num_rois = fg_inds.size
        bg_num_rois = hard_bg_inds.size + easy_bg_inds.size

        if fg_num_rois > 0 and bg_num_rois > 0:
            # sampling fg
            fg_rois_per_this_lidar = min(fg_rois_per_lidar, fg_num_rois)
            rand_num = np.random.permutation(fg_num_rois)
            fg_inds = fg_inds[rand_num[:fg_rois_per_this_lidar]]

            # sampling bg
            bg_rois_per_this_lidar = cfg.RCNN.ROI_PER_lidar  - fg_rois_per_this_lidar
            bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_lidar)

        elif fg_num_rois > 0 and bg_num_rois == 0:
            # sampling fg
            rand_num = np.floor(np.random.rand(cfg.RCNN.ROI_PER_lidar ) * fg_num_rois)
            rand_num = torch.from_numpy(rand_num).type_as(gt_boxes3d).long()
            fg_inds = fg_inds[rand_num]
            fg_rois_per_this_lidar = cfg.RCNN.ROI_PER_lidar
            bg_rois_per_this_lidar = 0
        elif bg_num_rois > 0 and fg_num_rois == 0:
            # sampling bg
            bg_rois_per_this_lidar = cfg.RCNN.ROI_PER_lidar
            bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_lidar)
            fg_rois_per_this_lidar = 0
        else:
            import pdb
            pdb.set_trace()
            raise NotImplementedError

        # augment the rois by noise
        roi_list, roi_iou_list, roi_gt_list = [], [], []
        if fg_rois_per_this_lidar > 0:
            fg_rois_src = roi_boxes3d[fg_inds].copy()
            gt_of_fg_rois = gt_boxes3d[gt_assignment[fg_inds]]
            fg_rois, fg_iou3d = self.aug_roi_by_noise_batch(fg_rois_src, gt_of_fg_rois, aug_times=10)
            roi_list.append(fg_rois)
            roi_iou_list.append(fg_iou3d)
            roi_gt_list.append(gt_of_fg_rois)

        if bg_rois_per_this_lidar > 0:
            bg_rois_src = roi_boxes3d[bg_inds].copy()
            gt_of_bg_rois = gt_boxes3d[gt_assignment[bg_inds]]
            bg_rois, bg_iou3d = self.aug_roi_by_noise_batch(bg_rois_src, gt_of_bg_rois, aug_times=1)
            roi_list.append(bg_rois)
            roi_iou_list.append(bg_iou3d)
            roi_gt_list.append(gt_of_bg_rois)

        rois = np.concatenate(roi_list, axis=0)
        iou_of_rois = np.concatenate(roi_iou_list, axis=0)
        gt_of_rois = np.concatenate(roi_gt_list, axis=0)

        # collect extra features for point cloud pooling
        if cfg.RCNN.USE_INTENSITY:
            pts_extra_input_list = [rpn_intensity.reshape(-1, 1), seg_mask.reshape(-1, 1)]
        else:
            pts_extra_input_list = [seg_mask.reshape(-1, 1)]

        if cfg.RCNN.USE_DEPTH:
            pts_depth = (np.linalg.norm(rpn_xyz, ord=2, axis=1) / 70.0) - 0.5
            pts_extra_input_list.append(pts_depth.reshape(-1, 1))
        pts_extra_input = np.concatenate(pts_extra_input_list, axis=1)

        pts_input, pts_features, pts_empty_flag = roipool3d_utils.roipool3d_cpu(rois, rpn_xyz, rpn_features,
                                                                                pts_extra_input,
                                                                                cfg.RCNN.POOL_EXTRA_WIDTH,
                                                                                sampled_pt_num=cfg.RCNN.NUM_POINTS,
                                                                                canonical_transform=False)


        valid_mask = (pts_empty_flag == 0).astype(np.int32)

        # regression valid mask
        reg_valid_mask = (iou_of_rois > cfg.RCNN.REG_FG_THRESH).astype(np.int32) & valid_mask

        # classification label
        cls_label = (iou_of_rois > cfg.RCNN.CLS_FG_THRESH).astype(np.int32)
        invalid_mask = (iou_of_rois > cfg.RCNN.CLS_BG_THRESH) & (iou_of_rois < cfg.RCNN.CLS_FG_THRESH)
        cls_label[invalid_mask] = -1
        cls_label[valid_mask == 0] = -1

        # canonical transform and sampling
        pts_input_ct, gt_boxes3d_ct = self.canonical_transform_batch(pts_input, rois, gt_of_rois)

        sample_info = {'sample_id': sample_id,
                       'pts_input': pts_input_ct,
                       'pts_features': pts_features,
                       'cls_label': cls_label,
                       'reg_valid_mask': reg_valid_mask,
                       'gt_boxes3d_ct': gt_boxes3d_ct,
                       'roi_boxes3d': rois,
                       'roi_size': rois[:, 3:6],
                       'gt_boxes3d': gt_of_rois}

        return sample_info
    def get_rcnn_sample_info(self, roi_info):
        sample_id, gt_box3d = roi_info['sample_id'], roi_info['gt_box3d']
        rpn_xyz, rpn_features, rpn_intensity, seg_mask = self.rpn_feature_list[sample_id]

        # augmentation original roi by adding noise
        roi_box3d = self.aug_roi_by_noise(roi_info)

        # point cloud pooling based on roi_box3d
        pooled_boxes3d = kitti_utils.enlarge_box3d(roi_box3d.reshape(1, 7), cfg.RCNN.POOL_EXTRA_WIDTH)

        boxes_pts_mask_list = roipool3d_utils.pts_in_boxes3d_cpu(torch.from_numpy(rpn_xyz),
                                                                 torch.from_numpy(pooled_boxes3d))
        pt_mask_flag = (boxes_pts_mask_list[0].numpy() == 1)
        cur_pts = rpn_xyz[pt_mask_flag].astype(np.float32)

        # data augmentation
        aug_pts = cur_pts.copy()
        aug_gt_box3d = gt_box3d.copy().astype(np.float32)
        aug_roi_box3d = roi_box3d.copy()
        if cfg.AUG_DATA and self.mode == 'TRAIN':
            # calculate alpha by ry
            temp_boxes3d = np.concatenate([aug_roi_box3d.reshape(1, 7), aug_gt_box3d.reshape(1, 7)], axis=0)
            temp_x, temp_z, temp_ry = temp_boxes3d[:, 0], temp_boxes3d[:, 2], temp_boxes3d[:, 6]
            temp_beta = np.arctan2(temp_z, temp_x).astype(np.float64)
            temp_alpha = -np.sign(temp_beta) * np.pi / 2 + temp_beta + temp_ry

            # data augmentation
            aug_pts, aug_boxes3d, aug_method = self.data_augmentation(aug_pts, temp_boxes3d, temp_alpha, mustaug=True, stage=2)
            aug_roi_box3d, aug_gt_box3d = aug_boxes3d[0], aug_boxes3d[1]
            aug_gt_box3d = aug_gt_box3d.astype(gt_box3d.dtype)

        # Pool input points
        valid_mask = 1  # whether the input is valid

        if aug_pts.shape[0] == 0:
            pts_features = np.zeros((1, 128), dtype=np.float32)
            input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
            pts_input = np.zeros((1, input_channel), dtype=np.float32)
            valid_mask = 0
        else:
            pts_features = rpn_features[pt_mask_flag].astype(np.float32)
            pts_intensity = rpn_intensity[pt_mask_flag].astype(np.float32)

            pts_input_list = [aug_pts, pts_intensity.reshape(-1, 1)]
            if cfg.RCNN.USE_INTENSITY:
                pts_input_list = [aug_pts, pts_intensity.reshape(-1, 1)]
            else:
                pts_input_list = [aug_pts]

            if cfg.RCNN.USE_MASK:
                if cfg.RCNN.MASK_TYPE == 'seg':
                    pts_mask = seg_mask[pt_mask_flag].astype(np.float32)
                elif cfg.RCNN.MASK_TYPE == 'roi':
                    pts_mask = roipool3d_utils.pts_in_boxes3d_cpu(torch.from_numpy(aug_pts),
                                                                  torch.from_numpy(aug_roi_box3d.reshape(1, 7)))
                    pts_mask = (pts_mask[0].numpy() == 1).astype(np.float32)
                else:
                    raise NotImplementedError

                pts_input_list.append(pts_mask.reshape(-1, 1))

            if cfg.RCNN.USE_DEPTH:
                pts_depth = np.linalg.norm(aug_pts, axis=1, ord=2)
                pts_depth_norm = (pts_depth / 70.0) - 0.5
                pts_input_list.append(pts_depth_norm.reshape(-1, 1))

            pts_input = np.concatenate(pts_input_list, axis=1)  # (N, C)

        aug_gt_corners = kitti_utils.boxes3d_to_corners3d(aug_gt_box3d.reshape(-1, 7))
        aug_roi_corners = kitti_utils.boxes3d_to_corners3d(aug_roi_box3d.reshape(-1, 7))
        iou3d = kitti_utils.get_iou3d(aug_roi_corners, aug_gt_corners)
        cur_iou = iou3d[0][0]

        # regression valid mask
        reg_valid_mask = 1 if cur_iou >= cfg.RCNN.REG_FG_THRESH and valid_mask == 1 else 0

        # classification label
        cls_label = 1 if cur_iou > cfg.RCNN.CLS_FG_THRESH else 0
        if cfg.RCNN.CLS_BG_THRESH < cur_iou < cfg.RCNN.CLS_FG_THRESH or valid_mask == 0:
            cls_label = -1

        # canonical transform and sampling
        pts_input_ct, gt_box3d_ct = self.canonical_transform(pts_input, aug_roi_box3d, aug_gt_box3d)
        pts_input_ct, pts_features = self.rcnn_input_sample(pts_input_ct, pts_features)

        sample_info = {'sample_id': sample_id,
                       'pts_input': pts_input_ct,
                       'pts_features': pts_features,
                       'cls_label': cls_label,
                       'reg_valid_mask': reg_valid_mask,
                       'gt_boxes3d_ct': gt_box3d_ct,
                       'roi_boxes3d': aug_roi_box3d,
                       'roi_size': aug_roi_box3d[3:6],
                       'gt_boxes3d': aug_gt_box3d}

        return sample_info
    def apply_gt_aug_to_one_scene(self, sample_id, pts_rect, pts_intensity, all_gt_boxes3d):
        """
        :param pts_rect: (N, 3)
        :param all_gt_boxex3d: (M2, 7)
        :return:
        """
        assert self.gt_database is not None
        # extra_gt_num = np.random.randint(10, 15)
        # try_times = 50
        if cfg.GT_AUG_RAND_NUM:
            extra_gt_num = np.random.randint(10, cfg.GT_EXTRA_NUM)
        else:
            extra_gt_num = cfg.GT_EXTRA_NUM
        try_times = 100
        cnt = 0
        cur_gt_boxes3d = all_gt_boxes3d.copy()
        cur_gt_boxes3d[:, 4] += 0.5  # TODO: consider different objects
        cur_gt_boxes3d[:, 5] += 0.5  # enlarge new added box to avoid too nearby boxes
        cur_gt_corners = kitti_utils.boxes3d_to_corners3d(cur_gt_boxes3d)

        extra_gt_obj_list = []
        extra_gt_boxes3d_list = []
        new_pts_list, new_pts_intensity_list = [], []
        src_pts_flag = np.ones(pts_rect.shape[0], dtype=np.int32)

        road_plane = self.get_road_plane(sample_id)
        a, b, c, d = road_plane

        while try_times > 0:
            if cnt > extra_gt_num:
                break

            try_times -= 1
            if cfg.GT_AUG_HARD_RATIO > 0:
                p = np.random.rand()
                if p > cfg.GT_AUG_HARD_RATIO:
                    # use easy sample
                    rand_idx = np.random.randint(0, len(self.gt_database[0]))
                    new_gt_dict = self.gt_database[0][rand_idx]
                else:
                    # use hard sample
                    rand_idx = np.random.randint(0, len(self.gt_database[1]))
                    new_gt_dict = self.gt_database[1][rand_idx]
            else:
                rand_idx = np.random.randint(0, self.gt_database.__len__())
                new_gt_dict = self.gt_database[rand_idx]

            new_gt_box3d = new_gt_dict['gt_box3d'].copy()
            new_gt_points = new_gt_dict['points'].copy()
            new_gt_intensity = new_gt_dict['intensity'].copy()
            new_gt_obj = new_gt_dict['obj']
            center = new_gt_box3d[0:3]
            if cfg.PC_REDUCE_BY_RANGE and (self.check_pc_range(center) is False):
                continue

            if new_gt_points.__len__() < 5:  # too few points
                continue

            # put it on the road plane
            cur_height = (-d - a * center[0] - c * center[2]) / b
            move_height = new_gt_box3d[1] - cur_height
            new_gt_box3d[1] -= move_height
            new_gt_points[:, 1] -= move_height
            new_gt_obj.pos[1] -= move_height

            new_enlarged_box3d = new_gt_box3d.copy()
            new_enlarged_box3d[4] += 0.5
            new_enlarged_box3d[5] += 0.5  # enlarge new added box to avoid too nearby boxes

            cnt += 1
            new_corners = kitti_utils.boxes3d_to_corners3d(new_enlarged_box3d.reshape(1, 7))
            iou3d = kitti_utils.get_iou3d(new_corners, cur_gt_corners)
            valid_flag = iou3d.max() < 1e-8
            if not valid_flag:
                continue

            enlarged_box3d = new_gt_box3d.copy()
            enlarged_box3d[3] += 2  # remove the points above and below the object

            boxes_pts_mask_list = roipool3d_utils.pts_in_boxes3d_cpu(
                torch.from_numpy(pts_rect), torch.from_numpy(enlarged_box3d.reshape(1, 7)))
            pt_mask_flag = (boxes_pts_mask_list[0].numpy() == 1)
            src_pts_flag[pt_mask_flag] = 0  # remove the original points which are inside the new box

            new_pts_list.append(new_gt_points)
            new_pts_intensity_list.append(new_gt_intensity)
            cur_gt_boxes3d = np.concatenate((cur_gt_boxes3d, new_enlarged_box3d.reshape(1, 7)), axis=0)
            cur_gt_corners = np.concatenate((cur_gt_corners, new_corners), axis=0)
            extra_gt_boxes3d_list.append(new_gt_box3d.reshape(1, 7))
            extra_gt_obj_list.append(new_gt_obj)

        if new_pts_list.__len__() == 0:
            return False, pts_rect, pts_intensity, None, None

        extra_gt_boxes3d = np.concatenate(extra_gt_boxes3d_list, axis=0)
        # remove original points and add new points
        pts_rect = pts_rect[src_pts_flag == 1]
        pts_intensity = pts_intensity[src_pts_flag == 1]
        new_pts_rect = np.concatenate(new_pts_list, axis=0)
        new_pts_intensity = np.concatenate(new_pts_intensity_list, axis=0)
        pts_rect = np.concatenate((pts_rect, new_pts_rect), axis=0)
        pts_intensity = np.concatenate((pts_intensity, new_pts_intensity), axis=0)

        return True, pts_rect, pts_intensity, extra_gt_boxes3d, extra_gt_obj_list
Beispiel #14
0
        box_id = -1
        fg_flag = False
        gt_box = np.zeros(7)
        gt_mask = np.zeros((cur_prob_mask.shape))
        if foreground_flag[i] == True:
            fg_flag = True
        if foreground_flag_G[i] == True:
            box_id = proposal_gt_index[i].detach().cpu().numpy()
            gt_box = gt_boxes3d_cam[box_id].detach().cpu().numpy().reshape(7)
            gt_box[0] = gt_box[0] - cur_pts_center[0]
            gt_box[2] = gt_box[2] - cur_pts_center[2]
            gt_box[3] = gt_box[3] * 1.2
            gt_box[4] = gt_box[4] * 1.2
            gt_box[5] = gt_box[5] * 1.2
            gt_corners = kitti_utils.boxes3d_to_corners3d(gt_box.reshape(
                -1, 7),
                                                          rotate=True)
            gt_mask = kitti_utils.in_hull(cur_box_point,
                                          gt_corners.reshape(-1, 3)).reshape(
                                              -1, 1)

            gt_box = gt_boxes3d_cam[box_id].detach().cpu().numpy().reshape(7)
            gt_box[0] = gt_box[0] - cur_pts_center[0]
            gt_box[2] = gt_box[2] - cur_pts_center[2]

        # fig, ax = plt.subplots(figsize=(5, 5))
        # plt.title('%d / %d'%(box_id, gt_boxes3d_cam.shape[0]))
        # ax.axis([-4, 4, -4, 4])
        # plt.scatter(cur_box_point[:, 0], cur_box_point[:, 2], s=15, c=cur_prob_mask[:,0], edgecolor='none',
        #             cmap=plt.get_cmap('rainbow'), alpha=1, marker='.', vmin=0, vmax=1)
        # plt.scatter(np.zeros(1), np.zeros(1), s=200, c='black',
    def get_proposal_from_file(self, index):
        sample_id = int(self.lidar_idx_list[index])
        proposal_file = os.path.join(self.rcnn_eval_roi_dir, '%06d.txt' % sample_id)
        roi_obj_list = kitti_utils.get_objects_from_label(proposal_file)

        rpn_xyz, rpn_features, rpn_intensity, seg_mask = self.get_rpn_features(self.rcnn_eval_feature_dir, sample_id)
        pts_rect, pts_rpn_features, pts_intensity = rpn_xyz, rpn_features, rpn_intensity

        roi_box3d_list, roi_scores = [], []
        for obj in roi_obj_list:
            box3d = np.array([obj.pos[0], obj.pos[1], obj.pos[2], obj.h, obj.w, obj.l, obj.ry], dtype=np.float32)
            roi_box3d_list.append(box3d.reshape(1, 7))
            roi_scores.append(obj.score)

        roi_boxes3d = np.concatenate(roi_box3d_list, axis=0)  # (N, 7)
        roi_scores = np.array(roi_scores, dtype=np.float32)  # (N)

        if cfg.RCNN.ROI_SAMPLE_JIT:
            sample_dict = {'sample_id': sample_id,
                           'rpn_xyz': rpn_xyz,
                           'rpn_features': rpn_features,
                           'seg_mask': seg_mask,
                           'roi_boxes3d': roi_boxes3d,
                           'roi_scores': roi_scores,
                           'pts_depth': np.linalg.norm(rpn_xyz, ord=2, axis=1)}

            if self.mode != 'TEST':
                gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
                gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

                roi_corners = kitti_utils.boxes3d_to_corners3d(roi_boxes3d)
                gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d)
                iou3d = kitti_utils.get_iou3d(roi_corners, gt_corners)
                if gt_boxes3d.shape[0] > 0:
                    gt_iou = iou3d.max(axis=1)
                else:
                    gt_iou = np.zeros(roi_boxes3d.shape[0]).astype(np.float32)

                sample_dict['gt_boxes3d'] = gt_boxes3d
                sample_dict['gt_iou'] = gt_iou
            return sample_dict

        if cfg.RCNN.USE_INTENSITY:
            pts_extra_input_list = [pts_intensity.reshape(-1, 1), seg_mask.reshape(-1, 1)]
        else:
            pts_extra_input_list = [seg_mask.reshape(-1, 1)]

        if cfg.RCNN.USE_DEPTH:
            cur_depth = np.linalg.norm(pts_rect, axis=1, ord=2)
            cur_depth_norm = (cur_depth / 70.0) - 0.5
            pts_extra_input_list.append(cur_depth_norm.reshape(-1, 1))

        pts_extra_input = np.concatenate(pts_extra_input_list, axis=1)
        pts_input, pts_features = roipool3d_utils.roipool3d_cpu(roi_boxes3d, pts_rect, pts_rpn_features,
                                                                pts_extra_input, cfg.RCNN.POOL_EXTRA_WIDTH,
                                                                sampled_pt_num=cfg.RCNN.NUM_POINTS)

        sample_dict = {'sample_id': sample_id,
                       'pts_input': pts_input,
                       'pts_features': pts_features,
                       'roi_boxes3d': roi_boxes3d,
                       'roi_scores': roi_scores,
                       'roi_size': roi_boxes3d[:, 3:6]}

        if self.mode == 'TEST':
            return sample_dict

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = np.zeros((gt_obj_list.__len__(), 7), dtype=np.float32)

        for k, obj in enumerate(gt_obj_list):
            gt_boxes3d[k, 0:3], gt_boxes3d[k, 3], gt_boxes3d[k, 4], gt_boxes3d[k, 5], gt_boxes3d[k, 6] \
                = obj.pos, obj.h, obj.w, obj.l, obj.ry

        if gt_boxes3d.__len__() == 0:
            gt_iou = np.zeros((roi_boxes3d.shape[0]), dtype=np.float32)
        else:
            roi_corners = kitti_utils.boxes3d_to_corners3d(roi_boxes3d)
            gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d)
            iou3d = kitti_utils.get_iou3d(roi_corners, gt_corners)
            gt_iou = iou3d.max(axis=1)
        sample_dict['gt_boxes3d'] = gt_boxes3d
        sample_dict['gt_iou'] = gt_iou

        return sample_dict
Beispiel #16
0
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores,
                      img_shape):
    corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
    img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)

    # frame number cumsum for kitti_tracking dataset, from 0000 to 0020
    img_count_cumsum = [
        154, 597, 830, 974, 1288, 1585, 1855, 2655, 3045, 3848, 4142, 4515,
        4593, 4933, 5039, 5415, 5624, 5769, 6108, 7167, 8004
    ]

    def convert_idx(idx, image_count_cumsum):
        # print(image_count_cumsum)
        for i in range(len(image_count_cumsum)):
            if idx >= image_count_cumsum[i]:
                continue

            else:
                if i == 0:
                    frame_id_in_video = idx
                else:
                    frame_id_in_video = idx - image_count_cumsum[i - 1]
                frame_id = frame_id_in_video
                video_id = i
                break

        return video_id, frame_id

    video_id, frame_id = convert_idx(sample_id, img_count_cumsum)

    img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
    img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
    img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
    img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)

    img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
    img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
    box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8,
                                    img_boxes_h < img_shape[0] * 0.8)

    # kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
    kitti_output_file = os.path.join(kitti_output_dir, '%04d.txt' % video_id)
    # with open(kitti_output_file, 'w') as f:
    with open(kitti_output_file, 'a') as f:
        for k in range(bbox3d.shape[0]):
            if box_valid_mask[k] == 0:
                continue
            x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
            beta = np.arctan2(z, x)
            alpha = -np.sign(beta) * np.pi / 2 + beta + ry

            # print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
            #       (cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
            #        bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
            #        bbox3d[k, 6], scores[k]), file=f)

            print(
                '%d,2,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f'
                % (frame_id, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2],
                   img_boxes[k, 3], scores[k], bbox3d[k, 3], bbox3d[k, 4],
                   bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
                   bbox3d[k, 6], alpha),
                file=f)
Beispiel #17
0
def eval_one_epoch_joint(model, dataloader, epoch_id, result_dir, logger):
    np.random.seed(666)
    MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
    mode = 'TEST' if args.test else 'EVAL'

    final_output_dir = os.path.join(result_dir, 'final_result', 'data')

    if os.path.exists(final_output_dir): shutil.rmtree(final_output_dir)
    os.makedirs(final_output_dir, exist_ok=True)

    logger.info('---- EPOCH %s JOINT EVALUATION ----' % epoch_id)
    logger.info('==> Output file: %s' % result_dir)
    model.eval()

    thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
    total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
    total_roi_recalled_bbox_list = [0] * 5
    dataset = dataloader.dataset
    cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
    obj_num = 0
    progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')

    iou_list = []
    iou_p_score_list = []
    rcnn_p_score_list = []
    prop_count = 0
    for data in dataloader:

        # Loading sample
        sample_id_list, pts_input = data['sample_id'], data['pts_input']
        sample_id = sample_id_list[0]
        cnt += len(sample_id_list)
        #if cnt < 118: continue
        #load label
        if not args.test:
            gt_boxes3d = data['gt_boxes3d']
            obj_num += gt_boxes3d.shape[1]
            # print(obj_num)
            if gt_boxes3d.shape[1] == 0:  # (B, M, 7)
                pass
            else:
                gt_boxes3d = gt_boxes3d

        # rpn model inference
        inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
        #inputs = inputs[:,torch.argsort(-inputs[0,:,2])]
        input_data = {'pts_input': inputs}
        ret_dict = model.rpn_forward(input_data)
        rpn_cls, rpn_reg = ret_dict['rpn_cls'], ret_dict['rpn_reg']
        rpn_backbone_xyz, rpn_backbone_features = ret_dict[
            'backbone_xyz'], ret_dict['backbone_features']

        # stage score parsing
        rpn_scores_raw = rpn_cls[:, :, 0]
        rpn_scores_norm = torch.sigmoid(rpn_cls[:, :, 0])
        rcnn_input_scores = rpn_scores_norm.view(-1).clone()
        inputs = inputs.view(-1, inputs.shape[-1])
        rpn_backbone_features = rpn_backbone_features.view(
            -1, rpn_backbone_features.shape[-2])
        rpn_backbone_xyz = rpn_backbone_xyz.view(-1,
                                                 rpn_backbone_xyz.shape[-1])

        # if VISUAL:
        #     order = torch.argsort(-rpn_scores_norm).view(-1)
        #     inputs = inputs.view(-1,inputs.shape[-1])[order]
        #     rpn_scores_norm = rpn_scores_norm.view(-1)[order]
        #     rpn_backbone_features = rpn_backbone_features.view(-1,rpn_backbone_features.shape[-1])[order]
        #
        #     norm_feature = F.normalize(rpn_backbone_features)
        #     similarity = norm_feature.mm(norm_feature.t())
        #
        #     inputs_plt = inputs.detach().cpu().numpy()
        #     scores_plt = rpn_scores_norm.detach().cpu().numpy()
        #     similarity_plt = similarity.detach().cpu().numpy()
        #
        #
        #     fig = plt.figure(figsize=(10, 10))
        #     plt.axes(facecolor='silver')
        #     plt.axis([-30,30,0,70])
        #     plt.title('point_regressed_center %06d'%sample_id)
        #     plt.scatter(inputs_plt[:, 0], inputs_plt[:, 2], s=15, c=scores_plt[:], edgecolor='none',
        #                 cmap=plt.get_cmap('rainbow'), alpha=1, marker='.', vmin=0, vmax=1)
        #     if args.test==False:
        #         gt_boxes3d = gt_boxes3d.reshape(-1,7)
        #         plt.scatter(gt_boxes3d[:, 0], gt_boxes3d[:, 2], s=200, c='blue',
        #                     alpha=0.5, marker='+', vmin=-1, vmax=1)
        #     plt.show()
        #
        #     for i in range(similarity_plt.shape[0]):
        #         fig = plt.figure(figsize=(10, 10))
        #         plt.axes(facecolor='silver')
        #         plt.axis([-30, 30, 0, 70])
        #         sm_plt = similarity_plt[i]
        #         plt.scatter(inputs_plt[i, 0].reshape(-1), inputs_plt[i, 2].reshape(-1), s=400, c='blue',
        #                     alpha=0.5, marker='+', vmin=-1, vmax=1)
        #         plt.scatter(inputs_plt[:, 0], inputs_plt[:, 2], s=15, c=(sm_plt[:]+scores_plt[:])/2, edgecolor='none',
        #                     cmap=plt.get_cmap('rainbow'), alpha=1, marker='.', vmin=0, vmax=1)
        #         plt.show()

        # thresh select and jump out
        # rpn_mask = rpn_scores_norm.view(-1) > cfg.RPN.SCORE_THRESH
        # if rpn_mask.float().sum() == 0: continue
        # rpn_scores_raw = rpn_scores_raw.view(-1)[rpn_mask]
        # rpn_scores_norm = rpn_scores_norm.view(-1)[rpn_mask]
        # rpn_reg = rpn_reg.view(-1, rpn_reg.shape[-1])[rpn_mask]
        # rpn_backbone_xyz = rpn_backbone_xyz.view(-1, rpn_backbone_xyz.shape[-1])[rpn_mask]

        # generate rois

        rpn_rois = decode_center_target(
            rpn_backbone_xyz,
            rpn_reg.view(-1, rpn_reg.shape[-1]),
            loc_scope=cfg.RPN.LOC_SCOPE,
            loc_bin_size=cfg.RPN.LOC_BIN_SIZE,
        ).view(-1, 3)
        rpn_reg_dist = (rpn_rois - rpn_backbone_xyz).clone()
        #similarity = torch.cosine_similarity(rpn_backbone_xyz[:, [0, 2]], rpn_reg_dist[:, [0, 2]], dim=1)

        # # thresh select and jump out
        rpn_mask = (rpn_scores_norm.view(-1) > cfg.RPN.SCORE_THRESH) & (
            rpn_reg_dist[:, [0, 2]].pow(2).sum(-1).sqrt() > 0.2)  #\
        #& (similarity > -0.7)
        if rpn_mask.float().sum() == 0: continue
        rpn_scores_raw = rpn_scores_raw.view(-1)[rpn_mask]
        rpn_scores_norm = rpn_scores_norm.view(-1)[rpn_mask]
        rpn_rois = rpn_rois[rpn_mask]
        rpn_backbone_xyz = rpn_backbone_xyz.view(
            -1, rpn_backbone_xyz.shape[-1])[rpn_mask]

        # radius NMS
        # sort by center score
        sort_points = torch.argsort(-rpn_scores_raw)
        rpn_rois = rpn_rois[sort_points]
        rpn_scores_norm = rpn_scores_norm[sort_points]
        rpn_scores_raw = rpn_scores_raw[sort_points]

        if rpn_rois.shape[0] > 1:
            keep_id = [0]
            prop_prop_distance = distance_2(rpn_rois[:, [0, 2]],
                                            rpn_rois[:, [0, 2]])
            for i in range(1, rpn_rois.shape[0]):
                #if torch.min(prop_prop_distance[:i, i], dim=-1)[0] > 0.3:
                if torch.min(prop_prop_distance[keep_id, i], dim=-1)[0] > 0.3:
                    keep_id.append(i)
            rpn_center = rpn_rois[keep_id][:, [0, 2]]
            rpn_scores_norm = rpn_scores_norm[keep_id]
            rpn_scores_raw = rpn_scores_raw[keep_id]

        else:
            rpn_center = rpn_rois[:, [0, 2]]
            rpn_scores_norm = rpn_scores_norm
            rpn_scores_raw = rpn_scores_raw

        # #rcnn input select:
        point_center_distance = distance_2(rpn_center, inputs[:, [0, 2]])
        cur_proposal_points_index = (torch.min(point_center_distance,
                                               dim=-1)[0] < 4.0)

        point_center_distance = point_center_distance[
            cur_proposal_points_index]
        inputs = inputs[cur_proposal_points_index]
        rcnn_input_scores = rcnn_input_scores.view(
            -1)[cur_proposal_points_index]

        if VISUAL:
            inputs_plt = inputs.detach().cpu().numpy()
            scores_plt = rcnn_input_scores.detach().cpu().numpy()
            # point_center= rpn_center[rpn_scores_norm > 0.5]
            # point_center_score = rpn_scores_norm[rpn_scores_norm > 0.5]
            point_center = rpn_center
            point_center_score = rpn_scores_norm
            fig = plt.figure(figsize=(10, 10))
            plt.axes(facecolor='silver')
            plt.axis([-30, 30, 0, 70])
            point_center_plt = point_center.cpu().numpy()
            plt.title('point_regressed_center %06d' % sample_id)
            plt.scatter(inputs_plt[:, 0],
                        inputs_plt[:, 2],
                        s=15,
                        c=scores_plt[:],
                        edgecolor='none',
                        cmap=plt.get_cmap('rainbow'),
                        alpha=1,
                        marker='.',
                        vmin=0,
                        vmax=1)
            if point_center.shape[0] > 0:
                plt.scatter(point_center_plt[:, 0],
                            point_center_plt[:, 1],
                            s=200,
                            c='white',
                            alpha=0.5,
                            marker='x',
                            vmin=-1,
                            vmax=1)
            if args.test == False:
                gt_boxes3d = gt_boxes3d.reshape(-1, 7)
                plt.scatter(gt_boxes3d[:, 0],
                            gt_boxes3d[:, 2],
                            s=200,
                            c='blue',
                            alpha=0.5,
                            marker='+',
                            vmin=-1,
                            vmax=1)
            plt.savefig('../visual/rpn.jpg')

        # RCNN stage
        box_list = []
        raw_score_list = []
        iou_score_list = []
        inputs[:, 1] -= 1.65
        point_center_distance = distance_2(rpn_center[:, :], inputs[:, [0, 2]])
        #for c in range(min(rpn_center.shape[0],100)):
        prop_count += rpn_center.shape[0]
        print('num %d' % (prop_count / float(cnt)))
        for c in range(rpn_center.shape[0]):
            # rcnn input generate
            cur_input = inputs.clone()
            cur_input_score = rcnn_input_scores.clone()

            # if COSINE_DISTANCE:
            #     cur_center_points_index = ((point_center_distance[:, c] < 4.0) & \
            #                                (point_prop_cos_matrix[:, c] > COS_THRESH) | \
            #                                (point_center_distance[:, c].view(-1) < 0.7)).view(-1)
            # else:
            cur_center_points_index = (point_center_distance[:, c] <
                                       4.0).view(-1)
            if cur_center_points_index.long().sum() == 0: continue

            cur_center_points_xyz = cur_input[cur_center_points_index, :3]
            cur_center_points_xyz[:, 0] -= rpn_center[c, 0]
            cur_center_points_xyz[:, 2] -= rpn_center[c, 1]
            cur_center_points_r = cur_input[cur_center_points_index,
                                            3].view(-1, 1)
            cur_center_points_mask = (cur_input_score[cur_center_points_index]
                                      > 0.5).view(-1, 1).float()

            # # easy sample sampling
            # if pts_input.shape[0]>512:
            #     cur_input = torch.cat((cur_center_points_xyz, cur_center_points_r,
            #                            (cur_input_score[cur_center_points_index] > 0.5).view(-1, 1).float()), dim=-1)
            #     pts_input = cur_input
            #     pts_input = pts_input[:min(pts_input.shape[0], 2000), :]
            #     pts_input = pts_input[:, :]
            #     sample_index = fps(pts_input[:, 0:3].contiguous(), ratio=min(512 / pts_input.shape[0], 0.99),
            #                        random_start=False)
            #     perm = sample_index
            #     while sample_index.shape[0] < 512:
            #         sample_index = torch.cat(
            #             (sample_index, perm[:min(perm.shape[0], 512 - sample_index.shape[0])]), dim=0)
            #
            #     cur_center_points_xyz = pts_input[sample_index, 0:3]
            #     cur_center_points_r = pts_input[sample_index, 3].reshape(-1, 1)
            #     cur_center_points_mask = pts_input[sample_index, 4].reshape(-1, 1)

            cur_center_points_xyz = cur_center_points_xyz.unsqueeze(0).float()
            cur_center_points_r = cur_center_points_r.unsqueeze(0).float()
            cur_center_points_mask = cur_center_points_mask.unsqueeze(
                0).float() - 0.5

            input_data = {
                'cur_box_point': cur_center_points_xyz,
                'cur_box_reflect': cur_center_points_r,
                'train_mask': cur_center_points_mask,
            }

            # # globaly random sampling
            # pts_input = pts_input[:min(pts_input.shape[0], self.npoints), :]
            # sample_index = np.arange(0, pts_input.shape[0], 1).astype(np.int)
            # perm = np.copy(sample_index)
            # while sample_index.shape[0] < self.npoints:
            #     sample_index = np.concatenate(
            #         (sample_index, perm[:min(perm.shape[0], self.npoints - sample_index.shape[0])]))
            #
            # cur_box_point = pts_input[sample_index, 0:3]
            # cur_box_reflect = pts_input[sample_index, 3].reshape(-1, 1)
            # cur_prob_mask = pts_input[sample_index, 4].reshape(-1, 1)
            # gt_mask = pts_input[sample_index, 5].reshape(-1, 1)

            # rcnn model inference
            ret_dict = model.rcnn_forward(input_data)
            rcnn_cls = ret_dict['rcnn_cls']
            ioun_cls = ret_dict['ioun_cls']
            rcnn_reg = ret_dict['rcnn_reg']
            rcnn_iou = ret_dict['rcnn_iou']
            rcnn_ref = ret_dict['rcnn_ref'].view(1, 1, -1)
            rcnn_box3d = ret_dict['pred_boxes3d']
            refined_box = ret_dict['refined_box']

            rcnn_box3d = refined_box
            rcnn_box3d[:, :, 6] = rcnn_box3d[:, :, 6] % (np.pi * 2)
            if rcnn_box3d[:, :, 6] > np.pi: rcnn_box3d[:, :, 6] -= np.pi * 2

            rcnn_box3d[:, :, 0] += rpn_center[c][0]
            rcnn_box3d[:, :, 2] += rpn_center[c][1]
            rcnn_box3d[:, :, 1] += 1.65

            box_list.append(rcnn_box3d)

            raw_score_list.append(rcnn_cls.view(1, 1))
            #raw_score_list.append(ioun_cls.view(1,1))

            iou_score_list.append(rcnn_iou.view(1, 1))

        rcnn_box3d = torch.cat((box_list), dim=1)
        raw_rcnn_score = torch.cat((raw_score_list),
                                   dim=0).unsqueeze(0).float()
        norm_ioun_score = torch.cat((iou_score_list),
                                    dim=0).unsqueeze(0).float()

        # scoring
        pred_boxes3d = rcnn_box3d
        norm_ioun_score = norm_ioun_score
        raw_rcnn_score = raw_rcnn_score
        norm_rcnn_score = torch.sigmoid(raw_rcnn_score)

        # scores thresh
        pred_h = pred_boxes3d[:, :, 3].view(-1)
        pred_w = pred_boxes3d[:, :, 4].view(-1)
        pred_l = pred_boxes3d[:, :, 5].view(-1)
        inds = (norm_rcnn_score > cfg.RCNN.SCORE_THRESH) & (
            norm_ioun_score > cfg.IOUN.SCORE_THRESH)
        inds = inds.view(-1)
        #size filiter
        # inds = inds & \
        #         (pred_h > 1.2) & (pred_h < 2.2) & \
        #         (pred_w > 1.3) & (pred_w < 2.0) & \
        #         (pred_l > 2.2) & (pred_l < 5.0)
        inds = inds & \
                (pred_h > 1.1) & (pred_h < 2.3) & \
                (pred_w > 1.2) & (pred_w < 2.1) & \
                (pred_l > 2.1) & (pred_l < 5.1)

        pred_boxes3d = pred_boxes3d[:, inds]
        norm_rcnn_score = norm_rcnn_score[:, inds]
        norm_ioun_score = norm_ioun_score[:, inds]
        raw_rcnn_score = raw_rcnn_score[:, inds]

        if pred_boxes3d.shape[1] == 0: continue
        # evaluation
        recalled_num = gt_num = 0

        if not args.test:
            gt_boxes3d = data['gt_boxes3d']

            for k in range(1):
                # calculate recall
                cur_gt_boxes3d = gt_boxes3d[k]
                tmp_idx = cur_gt_boxes3d.__len__() - 1

                while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
                    tmp_idx -= 1

                if tmp_idx >= 0:
                    cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]

                    cur_gt_boxes3d = torch.from_numpy(cur_gt_boxes3d).cuda(
                        non_blocking=True).float()
                    _, iou3d = iou3d_utils.boxes_iou3d_gpu(
                        pred_boxes3d[k], cur_gt_boxes3d)
                    gt_max_iou, _ = iou3d.max(dim=0)
                    refined_iou, _ = iou3d.max(dim=1)

                    iou_list.append(refined_iou.view(-1, 1))
                    iou_p_score_list.append(norm_ioun_score.view(-1, 1))
                    rcnn_p_score_list.append(norm_rcnn_score.view(-1, 1))

                    for idx, thresh in enumerate(thresh_list):
                        total_recalled_bbox_list[idx] += (gt_max_iou >
                                                          thresh).sum().item()
                    recalled_num += (gt_max_iou > 0.7).sum().item()
                    gt_num += cur_gt_boxes3d.shape[0]
                    total_gt_bbox += cur_gt_boxes3d.shape[0]

        if cnt == 1000:
            iou_clloe = torch.cat(iou_list, dim=0).detach().cpu().numpy()
            iou_score_clloe = torch.cat(iou_p_score_list,
                                        dim=0).detach().cpu().numpy()
            plt.axis([-.1, 1.1, -.1, 1.1])
            plt.scatter(iou_clloe,
                        iou_score_clloe,
                        s=20,
                        c='blue',
                        edgecolor='none',
                        cmap=plt.get_cmap('YlOrRd'),
                        alpha=1,
                        marker='.')
            plt.savefig(os.path.join(result_dir, 'distributercnn.png'))

        disp_dict = {
            'mode': mode,
            'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)
        }
        progress_bar.set_postfix(disp_dict)
        progress_bar.update()

        if VISUAL:
            fig, ax = plt.subplots(figsize=(10, 10))
            inputs_plt = inputs.detach().cpu().numpy()
            #plt.axes(facecolor='silver')
            plt.axis([-35, 35, 0, 70])
            plt.scatter(inputs_plt[:, 0],
                        inputs_plt[:, 2],
                        s=15,
                        c=inputs_plt[:, 1],
                        edgecolor='none',
                        cmap=plt.get_cmap('Blues'),
                        alpha=1,
                        marker='.',
                        vmin=-1,
                        vmax=2)
            pred_boxes3d_numpy = pred_boxes3d[0].detach().cpu().numpy()
            pred_boxes3d_corner = kitti_utils.boxes3d_to_corners3d(
                pred_boxes3d_numpy, rotate=True)
            for o in range(pred_boxes3d_corner.shape[0]):
                print_box_corner = pred_boxes3d_corner[o]

                x1, x2, x3, x4 = print_box_corner[0:4, 0]
                z1, z2, z3, z4 = print_box_corner[0:4, 2]

                polygon = np.zeros([5, 2], dtype=np.float32)
                polygon[0, 0] = x1
                polygon[1, 0] = x2
                polygon[2, 0] = x3
                polygon[3, 0] = x4
                polygon[4, 0] = x1

                polygon[0, 1] = z1
                polygon[1, 1] = z2
                polygon[2, 1] = z3
                polygon[3, 1] = z4
                polygon[4, 1] = z1

                line1 = [(x1, z1), (x2, z2)]
                line2 = [(x2, z2), (x3, z3)]
                line3 = [(x3, z3), (x4, z4)]
                line4 = [(x4, z4), (x1, z1)]
                (line1_xs, line1_ys) = zip(*line1)
                (line2_xs, line2_ys) = zip(*line2)
                (line3_xs, line3_ys) = zip(*line3)
                (line4_xs, line4_ys) = zip(*line4)
                ax.add_line(
                    Line2D(line1_xs, line1_ys, linewidth=1, color='green'))
                ax.add_line(
                    Line2D(line2_xs, line2_ys, linewidth=1, color='red'))
                ax.add_line(
                    Line2D(line3_xs, line3_ys, linewidth=1, color='red'))
                ax.add_line(
                    Line2D(line4_xs, line4_ys, linewidth=1, color='red'))

                # gt visualize

            if args.test == False and data['gt_boxes3d'].shape[1] > 0:
                gt_boxes3d_corner = kitti_utils.boxes3d_to_corners3d(
                    data['gt_boxes3d'].reshape(-1, 7), rotate=True)

                for o in range(gt_boxes3d_corner.shape[0]):
                    print_box_corner = gt_boxes3d_corner[o]

                    x1, x2, x3, x4 = print_box_corner[0:4, 0]
                    z1, z2, z3, z4 = print_box_corner[0:4, 2]

                    polygon = np.zeros([5, 2], dtype=np.float32)
                    polygon[0, 0] = x1
                    polygon[1, 0] = x2
                    polygon[2, 0] = x3
                    polygon[3, 0] = x4
                    polygon[4, 0] = x1

                    polygon[0, 1] = z1
                    polygon[1, 1] = z2
                    polygon[2, 1] = z3
                    polygon[3, 1] = z4
                    polygon[4, 1] = z1

                    line1 = [(x1, z1), (x2, z2)]
                    line2 = [(x2, z2), (x3, z3)]
                    line3 = [(x3, z3), (x4, z4)]
                    line4 = [(x4, z4), (x1, z1)]
                    (line1_xs, line1_ys) = zip(*line1)
                    (line2_xs, line2_ys) = zip(*line2)
                    (line3_xs, line3_ys) = zip(*line3)
                    (line4_xs, line4_ys) = zip(*line4)
                    ax.add_line(
                        Line2D(line1_xs, line1_ys, linewidth=1,
                               color='yellow'))
                    ax.add_line(
                        Line2D(line2_xs, line2_ys, linewidth=1,
                               color='purple'))
                    ax.add_line(
                        Line2D(line3_xs, line3_ys, linewidth=1,
                               color='purple'))
                    ax.add_line(
                        Line2D(line4_xs, line4_ys, linewidth=1,
                               color='purple'))
            plt.savefig('../visual/rcnn.jpg')

        # scores thresh
        inds = (norm_rcnn_score > cfg.RCNN.SCORE_THRESH) & (
            norm_ioun_score > cfg.IOUN.SCORE_THRESH)
        #inds = (norm_ioun_score > cfg.IOUN.SCORE_THRESH)

        for k in range(1):
            cur_inds = inds[k].view(-1)
            if cur_inds.sum() == 0:
                continue

            pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
            norm_iou_scores_selected = norm_ioun_score[k, cur_inds]
            raw_rcnn_score_selected = raw_rcnn_score[k, cur_inds]

            #traditional nms
            # NMS thresh rotated nms
            # boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
            # #score NMS
            # # boxes_bev_selected[:,-1] += np.pi/2
            # keep_idx = iou3d_utils.nms_normal_gpu(boxes_bev_selected, norm_iou_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
            # pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
            # norm_iou_scores_selected = norm_iou_scores_selected[keep_idx]
            # raw_rcnn_score_selected = raw_rcnn_score_selected[keep_idx]

            #self NMS
            sort_boxes = torch.argsort(-norm_iou_scores_selected.view(-1))
            pred_boxes3d_selected = pred_boxes3d_selected[sort_boxes]
            norm_iou_scores_selected = norm_iou_scores_selected[sort_boxes]

            if pred_boxes3d_selected.shape[0] > 1:
                keep_id = [0]
                iou2d, iou3d = iou3d_utils.boxes_iou3d_gpu(
                    pred_boxes3d_selected, pred_boxes3d_selected)
                for i in range(1, pred_boxes3d_selected.shape[0]):
                    # if torch.min(prop_prop_distance[:i, i], dim=-1)[0] > 0.3:
                    if torch.max(iou2d[keep_id, i], dim=-1)[0] < 0.01:
                        keep_id.append(i)
                pred_boxes3d_selected = pred_boxes3d_selected[keep_id]
                norm_iou_scores_selected = norm_iou_scores_selected[keep_id]
            else:
                pred_boxes3d_selected = pred_boxes3d_selected
                norm_iou_scores_selected = norm_iou_scores_selected

            pred_boxes3d_selected, norm_iou_scores_selected = pred_boxes3d_selected.cpu(
            ).numpy(), norm_iou_scores_selected.cpu().numpy()

            cur_sample_id = sample_id
            calib = dataset.get_calib(cur_sample_id)
            final_total += pred_boxes3d_selected.shape[0]
            image_shape = dataset.get_image_shape(cur_sample_id)
            save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,
                              final_output_dir, norm_iou_scores_selected,
                              image_shape)

            if VISUAL:
                fig, ax = plt.subplots(figsize=(10, 10))
                inputs_plt = inputs.detach().cpu().numpy()
                # plt.axes(facecolor='silver')
                plt.axis([-35, 35, 0, 70])
                plt.scatter(inputs_plt[:, 0],
                            inputs_plt[:, 2],
                            s=15,
                            c=inputs_plt[:, 1],
                            edgecolor='none',
                            cmap=plt.get_cmap('Blues'),
                            alpha=1,
                            marker='.',
                            vmin=-1,
                            vmax=2)
                pred_boxes3d_numpy = pred_boxes3d_selected
                pred_boxes3d_corner = kitti_utils.boxes3d_to_corners3d(
                    pred_boxes3d_numpy, rotate=True)
                for o in range(pred_boxes3d_corner.shape[0]):
                    print_box_corner = pred_boxes3d_corner[o]

                    x1, x2, x3, x4 = print_box_corner[0:4, 0]
                    z1, z2, z3, z4 = print_box_corner[0:4, 2]

                    polygon = np.zeros([5, 2], dtype=np.float32)
                    polygon[0, 0] = x1
                    polygon[1, 0] = x2
                    polygon[2, 0] = x3
                    polygon[3, 0] = x4
                    polygon[4, 0] = x1

                    polygon[0, 1] = z1
                    polygon[1, 1] = z2
                    polygon[2, 1] = z3
                    polygon[3, 1] = z4
                    polygon[4, 1] = z1

                    line1 = [(x1, z1), (x2, z2)]
                    line2 = [(x2, z2), (x3, z3)]
                    line3 = [(x3, z3), (x4, z4)]
                    line4 = [(x4, z4), (x1, z1)]
                    (line1_xs, line1_ys) = zip(*line1)
                    (line2_xs, line2_ys) = zip(*line2)
                    (line3_xs, line3_ys) = zip(*line3)
                    (line4_xs, line4_ys) = zip(*line4)
                    ax.add_line(
                        Line2D(line1_xs, line1_ys, linewidth=1, color='green'))
                    ax.add_line(
                        Line2D(line2_xs, line2_ys, linewidth=1, color='red'))
                    ax.add_line(
                        Line2D(line3_xs, line3_ys, linewidth=1, color='red'))
                    ax.add_line(
                        Line2D(line4_xs, line4_ys, linewidth=1, color='red'))

                    # gt visualize

                if args.test == False and data['gt_boxes3d'].shape[1] > 0:
                    gt_boxes3d_corner = kitti_utils.boxes3d_to_corners3d(
                        data['gt_boxes3d'].reshape(-1, 7), rotate=True)

                    for o in range(gt_boxes3d_corner.shape[0]):
                        print_box_corner = gt_boxes3d_corner[o]

                        x1, x2, x3, x4 = print_box_corner[0:4, 0]
                        z1, z2, z3, z4 = print_box_corner[0:4, 2]

                        polygon = np.zeros([5, 2], dtype=np.float32)
                        polygon[0, 0] = x1
                        polygon[1, 0] = x2
                        polygon[2, 0] = x3
                        polygon[3, 0] = x4
                        polygon[4, 0] = x1

                        polygon[0, 1] = z1
                        polygon[1, 1] = z2
                        polygon[2, 1] = z3
                        polygon[3, 1] = z4
                        polygon[4, 1] = z1

                        line1 = [(x1, z1), (x2, z2)]
                        line2 = [(x2, z2), (x3, z3)]
                        line3 = [(x3, z3), (x4, z4)]
                        line4 = [(x4, z4), (x1, z1)]
                        (line1_xs, line1_ys) = zip(*line1)
                        (line2_xs, line2_ys) = zip(*line2)
                        (line3_xs, line3_ys) = zip(*line3)
                        (line4_xs, line4_ys) = zip(*line4)
                        ax.add_line(
                            Line2D(line1_xs,
                                   line1_ys,
                                   linewidth=1,
                                   color='yellow'))
                        ax.add_line(
                            Line2D(line2_xs,
                                   line2_ys,
                                   linewidth=1,
                                   color='purple'))
                        ax.add_line(
                            Line2D(line3_xs,
                                   line3_ys,
                                   linewidth=1,
                                   color='purple'))
                        ax.add_line(
                            Line2D(line4_xs,
                                   line4_ys,
                                   linewidth=1,
                                   color='purple'))
                plt.savefig('../visual/ioun.jpg')

    progress_bar.close()
    # dump empty files
    split_file = os.path.join(dataset.imageset_dir, '..', 'ImageSets',
                              dataset.split + '.txt')
    split_file = os.path.abspath(split_file)
    image_idx_list = [x.strip() for x in open(split_file).readlines()]
    empty_cnt = 0
    for k in range(image_idx_list.__len__()):
        cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
        if not os.path.exists(cur_file):
            with open(cur_file, 'w') as temp_f:
                pass
            empty_cnt += 1
            logger.info('empty_cnt=%d: dump empty file %s' %
                        (empty_cnt, cur_file))

    ret_dict = {'empty_cnt': empty_cnt}

    if not args.eval_all:
        logger.info(
            '-------------------performance of epoch %s---------------------' %
            epoch_id)
        logger.info(str(datetime.now()))

        avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
        avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
        avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
        avg_det_num = (final_total / max(len(dataset), 1.0))
        logger.info('final average detections: %.3f' % avg_det_num)
        logger.info('final average rpn_iou refined: %.3f' % avg_rpn_iou)
        logger.info('final average cls acc: %.3f' % avg_cls_acc)
        logger.info('final average cls acc refined: %.3f' %
                    avg_cls_acc_refined)
        ret_dict['rpn_iou'] = avg_rpn_iou
        ret_dict['rcnn_cls_acc'] = avg_cls_acc
        ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
        ret_dict['rcnn_avg_num'] = avg_det_num

        for idx, thresh in enumerate(thresh_list):
            cur_recall = total_recalled_bbox_list[idx] / max(
                total_gt_bbox, 1.0)
            logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' %
                        (thresh, total_recalled_bbox_list[idx], total_gt_bbox,
                         cur_recall))
            ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
            if thresh == 0.7:
                recall = cur_recall

    if cfg.TEST.SPLIT != 'test':
        logger.info('Averate Precision:')
        name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
        ap_result_str, ap_dict = kitti_evaluate(
            dataset.label_dir,
            final_output_dir,
            label_split_file=split_file,
            current_class=name_to_class[cfg.CLASSES])
        if not args.eval_all:
            logger.info(ap_result_str)
            ret_dict.update(ap_dict)

    logger.info('result is saved to: %s' % result_dir)
    precision = ap_dict['Car_3d_easy'] + ap_dict['Car_3d_moderate'] + ap_dict[
        'Car_3d_hard']
    recall = total_recalled_bbox_list[3] / max(total_gt_bbox, 1.0)
    F2_score = 0
    return precision, recall, F2_score