예제 #1
0
def evaluate(label_path,
             result_path,
             save_path,
             current_class=['Car', 'Pedestrian', 'Cyclist'],
             coco=False,
             score_thresh=-1):
    class_to_name = {0: 'Car',
                     1: 'Pedestrian',
                     2: 'Cyclist',
                     3: 'DontCare'}

    gt_annos = kitti.get_label_annos(label_path)
    dt_annos = kitti.get_label_annos(result_path)
    # visualize(gt_annos, dt_annos)
    print(len(dt_annos))
    if score_thresh > 0:
        dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)

    if coco:
        print(get_coco_eval_result(dt_annos, dt_annos, current_class))
    else:
        result_str, _ = get_official_eval_result(gt_annos, dt_annos, current_class, class_to_name)
        print(result_str)
        with open(save_path, 'w+') as f:
            f.write("\n")
            f.write(result_str)
예제 #2
0
def evaluate(label_path,
             result_path,
             current_class=0,
             coco=False,
             score_thresh=-1,
             eval_dist=None):
    dt_annos, image_ids = kitti.get_label_annos(result_path,
                                                return_image_ids=True,
                                                eval_dist=eval_dist)
    print('Eval {} images'.format(len(dt_annos)))
    if score_thresh > 0:
        dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
    #val_image_ids = _read_imageset_file(label_split_file)
    gt_annos = kitti.get_label_annos(label_path,
                                     image_ids,
                                     eval_dist=eval_dist)

    # Playground
    #    print(gt_annos[48].keys())
    #print(gt_annos[48]["name"])
    #print(gt_annos[48]["dimensions"])
    #print(gt_annos[48]["location"])
    ## Playground end
    if coco:
        print(get_coco_eval_result(gt_annos, dt_annos, current_class))
    else:
        print(get_official_eval_result(gt_annos, dt_annos, current_class))
예제 #3
0
def analyze(front_path, frontleft_path, left_path, save_path):

    gt_front = kitti.get_label_annos(front_path)
    gt_frontleft = kitti.get_label_annos(frontleft_path)
    gt_left = kitti.get_label_annos(left_path)
    gt_dataset = [gt_front, gt_frontleft, gt_left]

    annos_name = ["Front", "FrontLeft", "Left"]
    split = "left_pred_"
    for i in range(0, 3):
        count = {'Car': 0, 'Pedestrian': 0, 'Cyclist': 0, 'DontCare': 0}
        ranges = {'Car': [], 'Pedestrian': [], 'Cyclist': [], 'DontCare': []}
        rot_y = {'Car': [], 'Pedestrian': [], 'Cyclist': [], 'DontCare': []}
        for frame in gt_dataset[i]:
            for j in range(0, len(frame['name'])):
                cls = frame['name'][j]
                distance = frame['location'][j][2]
                ranges[cls].append(distance)
                ry = frame['rotation_y'][j] * (180 / np.pi)
                rot_y[cls].append(ry)
                count[cls] += 1
        print("Count of classes:" + str(count) + '\n')
        with open(save_path + split + "classes.txt", 'a+') as f:
            f.write(annos_name[i] + ' ' + str(count) + '\n')
        sns_plot = sns.distplot(ranges["Car"], color="skyblue", label="Car").set_title(
            annos_name[i] + " Car Distances")
        fig = sns_plot.get_figure()
        fig.savefig("output/eval/dataset_vis/" + split + annos_name[i] + "_car.png")
        plt.clf()

        sns_plot_ped = sns.distplot(ranges["Pedestrian"], color="red", label="Pedestrian").set_title(
            annos_name[i] + " Pedestrian Distances")
        fig_ped = sns_plot_ped.get_figure()
        fig_ped.savefig("output/eval/dataset_vis/" + split + annos_name[i] + "_ped.png")
        plt.clf()

        sns_plot_cyc = sns.distplot(ranges["Cyclist"], color="teal", label="Cyclist").set_title(
            annos_name[i] + " Cyclist Distances")
        fig_cyc = sns_plot_cyc.get_figure()
        fig_cyc.savefig("output/eval/dataset_vis/" + split + annos_name[i] + "_cyc.png")
        plt.clf()

        sns_plot = sns.distplot(rot_y["Car"], color="skyblue", label="Car").set_title(annos_name[i] + " Car Rotation")
        fig = sns_plot.get_figure()
        fig.savefig("output/eval/dataset_vis/" + split + "ry_" + annos_name[i] + "_car.png")
        plt.clf()

        sns_plot_ped = sns.distplot(rot_y["Pedestrian"], color="red", label="Pedestrian").set_title(
            annos_name[i] + " Pedestrian Rotation")
        fig_ped = sns_plot_ped.get_figure()
        fig_ped.savefig("output/eval/dataset_vis/" + split + "ry_" + annos_name[i] + "_ped.png")
        plt.clf()

        sns_plot_cyc = sns.distplot(rot_y["Cyclist"], color="teal", label="Cyclist").set_title(
            annos_name[i] + " Cyclist Rotation")
        fig_cyc = sns_plot_cyc.get_figure()
        fig_cyc.savefig("output/eval/dataset_vis/" + split + "ry_" + annos_name[i] + "_cyc.png")
        plt.clf()
예제 #4
0
def training_density_analysis(front_path, frontleft_path, left_path):

    gt_front = kitti.get_label_annos(front_path)
    gt_frontleft = kitti.get_label_annos(frontleft_path)
    gt_left = kitti.get_label_annos(left_path)
    gt_dataset = [gt_front, gt_frontleft, gt_left]

    x = ['Car', 'Pedestrian', 'Cyclist', 'DontCare']
    annos_name = ["Front", "FrontLeft", "Left"]
    count = {"Front": [0, 0, 0, 0], "FrontLeft": [0, 0, 0, 0], "Left": [0, 0, 0, 0]}

    total_images = 0 
    total_car = 0
    for i in range(0, 1):
        view = annos_name[i]
        for frame in gt_dataset[i]:
            view_count = count[view]
            if len(frame['name']) != 0:
            	total_images += 1
            for j in range(0, len(frame['name'])):
                cls = frame['name'][j]
                if (cls) == 'Car' or (cls) == 'Pedestrian' or (cls) == 'Cyclist' :
                	total_car += 1
    print(str(total_car/total_images))

    # set width of bar
    barWidth = 0.25

    # set height of bar
    bars1 = count["Front"]
    bars2 = count["FrontLeft"]
    bars3 = count["Left"]

    # Set position of bar on X axis
    r1 = np.arange(len(bars1))
    r2 = [x + barWidth for x in r1]
    r3 = [x + barWidth for x in r2]

    # Make the plot
    plt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='Front')
    plt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='FrontLeft')
    plt.bar(r3, bars3, color='#2d7f5e', width=barWidth, edgecolor='white', label='Left')

    # Add xticks on the middle of the group bars
    plt.xlabel('group', fontweight='bold')
    plt.xticks([r + barWidth for r in range(len(bars1))], ['Car', 'Pedestrian', 'Cyclist', 'Sign'])

    plt.title("Class Count Across Camera Views")
    # Create legend & Show graphic
    plt.legend()
    plt.savefig("output/eval/dataset_vis/train_class_count.png")
def evaluate(label_path,
             result_path,
             label_split_file,
             current_class=0,
             coco=False,
             score_thresh=-1):
    dt_annos = get_label_annos(result_path)
    if score_thresh > 0:
        dt_annos = filter_annos_low_score(dt_annos, score_thresh)
    val_image_ids = _read_imageset_file(label_split_file)
    gt_annos = get_label_annos(label_path, val_image_ids)
    if coco:
        return get_coco_eval_result(gt_annos, dt_annos, current_class)
    else:
        return get_official_eval_result(gt_annos, dt_annos, current_class)
예제 #6
0
파일: evaluate.py 프로젝트: zyg11/DSGN
def evaluate(label_path,
             result_path,
             current_class=0,
             coco=False,
             score_thresh=-1,
             eval_dist=None):
    dt_annos, image_ids = kitti.get_label_annos(result_path, return_image_ids=True, eval_dist=eval_dist)
    print('Eval {} images'.format(len(dt_annos)))
    if score_thresh > 0:
        dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
    #val_image_ids = _read_imageset_file(label_split_file)
    gt_annos = kitti.get_label_annos(label_path, image_ids, eval_dist=eval_dist)
    if coco:
        print(get_coco_eval_result(gt_annos, dt_annos, current_class))
    else:
        print(get_official_eval_result(gt_annos, dt_annos, current_class))
예제 #7
0
def evaluate(label_path,
             result_path,
             label_split_file,
             current_class=0,
             coco=False,
             score_thresh=-1):

    dt_annos = kitti.get_label_annos(result_path)
    if score_thresh > 0:
        dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
    val_image_ids = _read_imageset_file(label_split_file)
    val_image_ids = exists(val_image_ids, result_path)
    gt_annos = kitti.get_label_annos(label_path, val_image_ids)
    dt_annos = kitti.get_label_annos(result_path, val_image_ids)
    if coco:
        print(get_coco_eval_result(gt_annos, dt_annos, current_class))
    else:
        print(get_official_eval_result(gt_annos, dt_annos, current_class))
예제 #8
0
def evaluate_cuda_wrapper(result_dir,
                          gt_dirs,
                          classes_mapper,
                          image_set='val',
                          async_eval=False):
    # https://github.com/traveller59/kitti-object-eval-python
    # Sometime we can not get the same result as official evaluation for car BEV HARD detection (+-6%)
    if cfg.DATA.CAR_ONLY:
        classes_idx = '0'
    elif cfg.DATA.PEOPLE_ONLY:
        classes_idx = '1,2'
    else:
        classes_idx = '0,1,2'

    for gt_dir in gt_dirs:

        gt_dir_labels = os.path.join(gt_dir, 'label_2')
        gt_annos = kitti.get_label_annos(gt_dir_labels, classes_mapper)
        result_annos = kitti.get_label_annos(result_dir, classes_mapper)
        print(get_official_eval_result_my(gt_annos, result_annos, [0, 1]))
예제 #9
0
import kitti_common as kitti
import json
from eval import get_official_eval_result, get_coco_eval_result
def _read_imageset_file(path):
    with open(path, 'r') as f:
        lines = f.readlines()
    return [line[:-1] for line in lines]
gt_split_file = "/mnt/nfs/scratch1/pmallya/nusc_kitti/val/nusc_val.txt" 
val_image_ids = _read_imageset_file(gt_split_file)
# print("Val Image IDs: ", val_image_ids)
det_path = "/mnt/nfs/scratch1/pmallya/nusc_kitti/val/infer_2/"
dt_annos = kitti.get_label_annos(det_path, val_image_ids)
gt_path = "/mnt/nfs/scratch1/pmallya/nusc_kitti/val/label_2/"
gt_annos = kitti.get_label_annos(gt_path, val_image_ids)

# print(dt_annos)
# print(gt_annos)
with open("./outputs/eval_result.json", "w", encoding="utf-8") as f:
    json.dump(get_official_eval_result(gt_annos, dt_annos, 0), f, ensure_ascii=False, indent=4)
    # f.write("\nCOCO Eval Result:\n")
    # print(get_coco_eval_result(gt_annos, dt_annos, 0))
예제 #10
0
def kitti_eval(detpath,
               db,
               frameset,
               classname,
               cachedir,
               mode,
               ovthresh=0.5,
               eval_type='2d',
               d_levels=0):
    #Min overlap is 0.7 for cars, 0.5 for ped/bike
    """rec, prec, ap = waymo_eval(detpath,
                              annopath,
                              framesetfile,
                              classname,
                              [ovthresh])

  Top level function that does the PASCAL VOC evaluation.

  detpath: Path to detections
      detpath.format(classname) should produce the detection results file.
  annopath: Path to annotations
      annopath.format(framename) should be the xml annotations file.
  framesetfile: Text file containing the list of frames, one frame per line.
  classname: Category name (duh)
  cachedir: Directory for caching the annotations
  [ovthresh]: Overlap threshold (default = 0.5)
  [use_07_metric]: Whether to use VOC07's 11 point AP computation
      (default False)
  """

    #Misc hardcoded variables
    idx = 0
    ovthresh_dc = 0.5
    # assumes detections are in detpath.format(classname)
    # assumes annotations are in annopath.format(framename)
    # assumes framesetfile is a text file with each line an frame name
    # cachedir caches the annotations in a pickle file

    frame_path = get_frame_path(db, mode, eval_type)
    class_recs = load_recs(frameset, frame_path, db, mode, classname)
    # read dets
    detfile = detpath.format(classname)
    print('Opening det file: ' + detfile)

    gt_annos = kitti.get_label_annos(label_path)
    dt_annos = kitti.get_label_annos(result_path)

    #sys.exit('donezo')
    with open(detfile, 'r') as f:
        lines = f.readlines()
    #Extract detection file into array
    splitlines = [x.strip().split(' ') for x in lines]
    #Many entries have the same idx & token
    frame_idx = [x[0] for x in splitlines
                 ]  #TODO: I dont like how this is along many frames
    frame_tokens = [x[1] for x in splitlines]
    confidence = np.array([float(x[2]) for x in splitlines])
    #All detections for specific class
    bbox_elem = cfg[cfg.NET_TYPE.upper()].NUM_BBOX_ELEM
    BB = np.array([[float(z) for z in x[3:3 + bbox_elem]] for x in splitlines])
    det_cnt = np.zeros((cfg.KITTI.MAX_FRAME))
    _, uncertainties = eval_utils.extract_uncertainties(bbox_elem, splitlines)
    #Repeated for X detections along every frame presented
    idx = len(frame_idx)
    #DEPRECATED ---- 3 types, easy medium hard
    tp = np.zeros((idx, d_levels))
    fp = np.zeros((idx, d_levels))
    fn = np.zeros((idx))
    tp_frame = np.zeros(cfg.KITTI.MAX_FRAME)
    fp_frame = np.zeros(cfg.KITTI.MAX_FRAME)
    npos_frame = np.zeros(cfg.KITTI.MAX_FRAME)
    npos = np.zeros((len(class_recs), d_levels))
    #Count number of total labels in all frames
    count_npos(class_recs, npos, npos_frame)
    det_results = []
    frame_uncertainties = []
    #Check if there are any dets at all
    if BB.shape[0] > 0:
        # sort by confidence (highest first)
        sorted_ind = np.argsort(-confidence)
        sorted_scores = np.sort(-confidence)
        idx_sorted = [int(frame_idx[x]) for x in sorted_ind]
        frame_tokens_sorted = [frame_tokens[x] for x in sorted_ind]
        #print(frame_ids)

        # go down dets and mark true positives and false positives
        #Zip together sorted_ind with frame tokens sorted.
        #sorted_ind -> Needed to know which detection we are selecting next
        #frame_tokens_sorted -> Needed to know which set of GT's are for the same frame as the det
        print('num dets {}'.format(len(sorted_ind)))
        idx = 0
        for det_idx, token in zip(sorted_ind, frame_tokens_sorted):
            det_confidence = confidence[det_idx]
            #R is a subset of detections for a specific class
            #print('doing det for frame {}'.format(frame_idx[d]))
            #Need to find associated GT frame ID alongside its detection id 'd'
            #Only one such frame, why appending?
            #print(confidence[det_idx])
            R = None
            skip_iter = True
            R = eval_utils.find_rec(class_recs, token)
            if (R is None):
                continue
            #Deprecated
            #R = class_recs[frame_ids[d]]
            bb = BB[det_idx, :].astype(float)
            var = {}
            #Variance extraction, collect on a per scene basis
            for key, val in uncertainties.items():
                #uc_avg[key][int(R['idx'])] += val[det_idx, :]
                var[key] = val[det_idx, :]
            det_cnt[int(R['idx'])] += 1
            #Variance extraction, collect on a per scene basis
            ovmax = -np.inf
            #Multiple possible bounding boxes, perhaps for multi car detection
            BBGT = R['boxes'].astype(float)
            BBGT_dc = R['boxes_dc'].astype(float)
            #Preload all GT boxes and count number of true positive GT's
            #Not sure why we're setting ignore to false here if it were true
            #for i, BBGT_elem in enumerate(BBGT):
            #    BBGT_height = BBGT_elem[3] - BBGT_elem[1]
            ovmax_dc = 0
            if BBGT_dc.size > 0 and cfg.TEST.IGNORE_DC:
                overlaps_dc = eval_utils.iou(BBGT_dc, bb, eval_type)
                ovmax_dc = np.max(overlaps_dc)
            #Compute IoU
            if BBGT.size > 0:
                overlaps = eval_utils.iou(BBGT, bb, eval_type)
                ovmax = np.max(overlaps)
                #Index of max overlap between a BBGT and BB
                jmax = np.argmax(overlaps)
            else:
                jmax = 0
            # Minimum IoU Threshold for a true positive
            if ovmax > ovthresh and ovmax_dc < ovthresh_dc:
                #if ovmax > ovthresh:
                #ignore if not contained within easy, medium, hard
                if not R['ignore'][jmax]:
                    if not R['hit'][jmax]:
                        #print('TP')
                        if (R['difficulty'][jmax] <= 2):
                            tp[idx, 2] += 1
                        if (R['difficulty'][jmax] <= 1):
                            tp[idx, 1] += 1
                        if (R['difficulty'][jmax] <= 0):
                            tp[idx, 0] += 1
                            #print('ez')
                        tp_frame[int(R['idx'])] += 1
                        R['hit'][jmax] = True
                        det_results.append(
                            write_det(R, det_confidence, ovmax, bb, var, jmax))
                    else:
                        #print('FP-hit')
                        #If it already exists, cant double classify on same spot.
                        if (R['difficulty'][jmax] <= 2):
                            fp[idx, 2] += 1
                        if (R['difficulty'][jmax] <= 1):
                            fp[idx, 1] += 1
                        if (R['difficulty'][jmax] <= 0):
                            fp[idx, 0] += 1
                        fp_frame[int(R['idx'])] += 1
                        det_results.append(
                            write_det(R, det_confidence, ovmax, bb, var))
            #If your IoU is less than required, its simply a false positive.
            elif (BBGT.size > 0 and ovmax_dc < ovthresh_dc):
                #print('FP-else')
                #elif(BBGT.size > 0)
                #if(R['difficulty'][jmax] <= 2):
                #    fp[det_idx,2] += 1
                #if(R['difficulty'][jmax] <= 1):
                #    fp[det_idx,1] += 1
                #if(R['difficulty'][jmax] <= 0):
                #    fp[det_idx,0] += 1
                fp[idx, 2] += 1
                fp[idx, 1] += 1
                fp[idx, 0] += 1
                fp_frame[int(R['idx'])] += 1
                det_results.append(write_det(R, det_confidence, ovmax, bb,
                                             var))
            idx = idx + 1
    else:
        print('waymo eval, no GT boxes detected')
    #for i in np.arange(cfg.KITTI.MAX_FRAME):
    #    frame_dets = np.sum(det_cnt[i])
    #    frame_uc = eval_utils.write_frame_uncertainty(uc_avg,frame_dets,i)
    #    if(frame_uc != '' and cfg.DEBUG.PRINT_SCENE_RESULT):
    #        print(frame_uc)
    #    frame_uncertainties.append(frame_uc)

    if (cfg.DEBUG.TEST_FRAME_PRINT):
        eval_utils.display_frame_counts(tp_frame, fp_frame, npos_frame)
    out_dir = get_output_dir(db, mode='test')
    out_file = '{}_detection_results.txt'.format(classname)
    eval_utils.save_detection_results(det_results, out_dir, out_file)
    #if(len(frame_uncertainties) != 0):
    #    uc_out_file = '{}_frame_uncertainty_results.txt'.format(classname)
    #    eval_utils.save_detection_results(frame_uncertainties, out_dir, uc_out_file)

    map = mrec = mprec = np.zeros((d_levels, ))
    prec = 0
    rec = 0
    fp_sum = np.cumsum(fp, axis=0)
    tp_sum = np.cumsum(tp, axis=0)
    #fn     = 1-fp
    #fn_sum = np.cumsum(fn, axis=0)
    npos_sum = np.sum(npos, axis=0)
    print(tp_sum)
    print(fp_sum)
    print(npos_sum)
    #print('Difficulty Level: {:d}, fp sum: {:f}, tp sum: {:f} npos: {:d}'.format(i, fp_sum[i], tp_sum[i], npos[i]))
    #recall
    #Per frame per class AP
    for i in range(0, d_levels):
        npos_sum_d = npos_sum[i]
        #Override to avoid NaN
        if (npos_sum_d == 0):
            npos_sum_d = np.sum([1])
        rec = tp_sum[:, i] / npos_sum_d.astype(float)
        prec = tp_sum[:, i] / np.maximum(tp_sum[:, i] + fp_sum[:, i],
                                         np.finfo(np.float64).eps)
        #print(rec)
        #print(prec)
        # avoid divide by zero in case the first detection matches a difficult
        # ground truth precision
        rec, prec = zip(*sorted(zip(rec, prec)))
        #plt.scatter(rec,prec)
        #plt.show()
        mprec[i] = np.average(prec)
        mrec[i] = np.average(rec)
        map[i] = eval_utils.ap(rec, prec)
    return mrec, mprec, map
예제 #11
0
def evaluate(result_path,
             dataset_path=None,
             label_split_file=None,
             label_path=None,
             metric="new",
             dataset="kitti",
             current_class=0,
             coco=False,
             score_thresh=-1,
             toground=False,
             rescale_pred=None,
             align_size=False,
             align_front=False,
             reverse_align=False,
             dense_sample=False,
             direct_save=False,
             output_iou=False,
             adapted=False):
    label_split_file = label_split_file or os.path.join(
        dataset_path, "val.txt")
    label_path = label_path or os.path.join(dataset_path, "training",
                                            "label_2")
    if metric == "old":
        from eval_old import get_official_eval_result, get_coco_eval_result, calculate_iou_partly
    else:
        from eval2 import get_official_eval_result, get_coco_eval_result, calculate_iou_partly
    val_image_ids = _read_imageset_file(label_split_file)
    dt_annos = kitti.get_label_annos(result_path, val_image_ids)
    # for i in range(len(dt_annos)):
    #     if len(dt_annos[i]['name']) > 0:
    #         assert np.max(dt_annos[i]['location'][:, 2]) < 80, f"{os.path.join(result_path, '%06d.txt' % val_image_ids[i])}, Some detection > 80m!!!"

    if score_thresh > 0:
        dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
    if toground:
        dt_annos = annos_to_ground(
            dt_annos, os.path.join(os.path.dirname(label_path), "planes"),
            val_image_ids)
        save_labels(dt_annos,
                    os.path.join(os.path.dirname(result_path), "grounded"),
                    val_image_ids)

    if rescale_pred is not None:
        for anno in dt_annos:
            anno['dimensions'] *= rescale_pred

    gt_annos = kitti.get_label_annos(label_path, val_image_ids)

    # for i in range(len(gt_annos)):
    #     if len(gt_annos[i]['name']) > 0:
    #         assert np.max(gt_annos[i]['location'][:, 2]) < 70, f"{os.path.join(label_path, '%06d.txt' % val_image_ids[i])}, Some label > 70m!!!"
    if output_iou:
        target_dir = os.path.join(os.path.dirname(result_path), "with_iou")

        os.makedirs(target_dir, exist_ok=True)

        overlaps, _, _, _ = calculate_iou_partly(dt_annos, gt_annos, 1)
        assert len(overlaps) == len(dt_annos) == len(gt_annos)
        for i in range(len(overlaps)):
            assert overlaps[i].shape == (len(dt_annos[i]['name']),
                                         len(gt_annos[i]['name']))
            if len(dt_annos[i]['name']) > 0 and len(gt_annos[i]['name']) > 0:
                val = np.max(overlaps[i], axis=1)
            else:
                val = np.zeros(len(dt_annos[i]['name']))
            try:
                n = len(dt_annos[i]["name"])
                kitti_str = []
                for j in range(n):
                    kitti_str.append(
                        '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f'
                        %
                        (dt_annos[i]['name'][j], dt_annos[i]['truncated'][j],
                         dt_annos[i]['occluded'][j], dt_annos[i]['alpha'][j],
                         dt_annos[i]['bbox'][j, 0], dt_annos[i]['bbox'][j, 1],
                         dt_annos[i]['bbox'][j, 2], dt_annos[i]['bbox'][j, 3],
                         dt_annos[i]['dimensions'][j, 1],
                         dt_annos[i]['dimensions'][j, 2],
                         dt_annos[i]['dimensions'][j, 0],
                         dt_annos[i]['location'][j, 0],
                         dt_annos[i]['location'][j, 1],
                         dt_annos[i]['location'][j, 2],
                         dt_annos[i]['rotation_y'][j], dt_annos[i]['score'][j],
                         val[j]))
                with open(
                        os.path.join(target_dir,
                                     "%06d.txt" % val_image_ids[i]), "w") as f:
                    f.write("\n".join(kitti_str))
            except:
                pdb.set_trace()

        target_dir = os.path.join(os.path.dirname(result_path), "with_iou_gt")

        os.makedirs(target_dir, exist_ok=True)

        for i in range(len(overlaps)):
            assert overlaps[i].shape == (len(dt_annos[i]['name']),
                                         len(gt_annos[i]['name']))
            if len(dt_annos[i]['name']) > 0 and len(gt_annos[i]['name']) > 0:
                val = np.max(overlaps[i], axis=0)
            else:
                val = np.zeros(len(gt_annos[i]['name']))
            try:
                n = len(gt_annos[i]["name"])
                kitti_str = []
                for j in range(n):
                    kitti_str.append(
                        '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f'
                        %
                        (gt_annos[i]['name'][j], gt_annos[i]['truncated'][j],
                         gt_annos[i]['occluded'][j], gt_annos[i]['alpha'][j],
                         gt_annos[i]['bbox'][j, 0], gt_annos[i]['bbox'][j, 1],
                         gt_annos[i]['bbox'][j, 2], gt_annos[i]['bbox'][j, 3],
                         gt_annos[i]['dimensions'][j, 1],
                         gt_annos[i]['dimensions'][j, 2],
                         gt_annos[i]['dimensions'][j, 0],
                         gt_annos[i]['location'][j, 0],
                         gt_annos[i]['location'][j, 1],
                         gt_annos[i]['location'][j, 2],
                         gt_annos[i]['rotation_y'][j], gt_annos[i]['score'][j],
                         val[j]))
                with open(
                        os.path.join(target_dir,
                                     "%06d.txt" % val_image_ids[i]), "w") as f:
                    f.write("\n".join(kitti_str))
            except:
                pdb.set_trace()

    if align_size:
        overlaps, _, _, _ = calculate_iou_partly(dt_annos, gt_annos, 1)
        assert len(overlaps) == len(dt_annos) == len(gt_annos)
        for i in range(len(overlaps)):
            assert overlaps[i].shape == (len(dt_annos[i]['name']),
                                         len(gt_annos[i]['name']))
            if len(dt_annos[i]['name']) > 0 and len(gt_annos[i]['name']) > 0:
                val = np.max(overlaps[i], axis=1)
                idx = np.argmax(overlaps[i], axis=1)
                for j in range(len(dt_annos[i]['name'])):
                    if val[j] > 0.2:
                        dt_annos[i]['dimensions'][
                            j, :] = gt_annos[i]['dimensions'][idx[j], :]
        save_labels(dt_annos,
                    os.path.join(os.path.dirname(result_path), "align_size"),
                    val_image_ids)

    if align_front:
        overlaps, _, _, _ = calculate_iou_partly(dt_annos, gt_annos, 1)
        assert len(overlaps) == len(dt_annos) == len(gt_annos)
        for i in range(len(overlaps)):
            assert overlaps[i].shape == (len(dt_annos[i]['name']),
                                         len(gt_annos[i]['name']))
            if len(dt_annos[i]['name']) > 0 and len(gt_annos[i]['name']) > 0:
                val = np.max(overlaps[i], axis=1)
                idx = np.argmax(overlaps[i], axis=1)
                for j in range(len(dt_annos[i]['name'])):
                    if val[j] > 0.2:
                        dist = np.linalg.norm(dt_annos[i]['location'][j, :])
                        alpha = dt_annos[i]['alpha'][j]
                        alpha = np.arctan2(np.sin(alpha), np.cos(alpha))
                        if np.abs(np.sin(alpha)) * dist > dt_annos[i][
                                'dimensions'][j, 2] / 2.0:
                            shift = (
                                dt_annos[i]['dimensions'][j, 2] -
                                gt_annos[i]['dimensions'][idx[j], 2]) / 2.0
                            if 0 < alpha:
                                angle = -dt_annos[i]['rotation_y'][j]
                            else:
                                angle = -dt_annos[i]['rotation_y'][j] + np.pi
                            dt_annos[i]['location'][j,
                                                    0] += shift * np.cos(angle)
                            dt_annos[i]['location'][j,
                                                    2] += shift * np.sin(angle)
                        if np.abs(np.cos(alpha)) * dist > dt_annos[i][
                                'dimensions'][j, 1] / 2.0:
                            shift = (
                                dt_annos[i]['dimensions'][j, 1] -
                                gt_annos[i]['dimensions'][idx[j], 1]) / 2.0
                            if -np.pi / 2.0 < alpha < np.pi / 2.0:
                                angle = -dt_annos[i]['rotation_y'][
                                    j] - np.pi / 2.0
                            else:
                                angle = -dt_annos[i]['rotation_y'][
                                    j] + np.pi / 2.0
                            dt_annos[i]['location'][j,
                                                    0] += shift * np.cos(angle)
                            dt_annos[i]['location'][j,
                                                    2] += shift * np.sin(angle)
                        dt_annos[i]['dimensions'][
                            j, :] = gt_annos[i]['dimensions'][idx[j], :]
        save_labels(dt_annos,
                    os.path.join(os.path.dirname(result_path), "align_front"),
                    val_image_ids)

    if reverse_align:
        import sys
        sys.path.insert(0, "..")
        from config_path import dataset_paths

        src = get_model(label_path)
        dst = get_model(result_path)
        print("label_path:", label_path)
        print("result_path:", result_path)
        print(f"{src} -> {dst}")
        with open(os.path.join(dataset_paths[src],
                               "label_normal_val.json")) as f:
            src = json.load(f)
        with open(os.path.join(dataset_paths[dst],
                               "label_normal_val.json")) as f:
            dst = json.load(f)
        mapping = get_scale_map(src, dst)
        for i in range(len(gt_annos)):
            if len(gt_annos[i]['name']) > 0:
                gt_annos[i]["dimensions"] = mapping(gt_annos[i]["dimensions"])
        save_labels(
            gt_annos,
            os.path.join(os.path.dirname(result_path), "reverse_align"),
            val_image_ids)

    if not output_iou:
        if coco:
            return get_coco_eval_result(gt_annos, dt_annos, current_class)
        else:
            ap_result_str, ap_dict = get_official_eval_result(
                gt_annos,
                dt_annos,
                current_class,
                dataset,
                dense_sample=dense_sample)
            if direct_save:
                result_path = os.path.dirname(result_path)
                fname = os.path.basename(result_path) + "_val20"
                if toground:
                    fname += "_ground"
                if align_size:
                    fname += "_align_size"
                if reverse_align:
                    fname += "_reverse_align"
                if adapted:
                    fname += "_adapted"

                print(
                    f"Saving to {os.path.join(os.path.dirname(result_path), fname+'.pkl')}"
                )
                with open(
                        os.path.join(os.path.dirname(result_path),
                                     fname + '.pkl'), "wb") as fb:
                    pickle.dump(ap_dict["result"], fb)
                with open(
                        os.path.join(os.path.dirname(result_path),
                                     fname + '.txt'), "w") as f:
                    f.write(ap_result_str)
            return ap_result_str, ap_dict