Exemple #1
0
def score_th_results(anns, models_res, models_labels):
    ths = np.arange(0.05, 1, 0.05)
    aps = [[] for _ in range(len(models_res))]
    tps = [[] for _ in range(len(models_res))]
    fps = [[] for _ in range(len(models_res))]
    for sc in ths:
        for j, m in enumerate(models_res):
            for i in range(len(m)):
                m[i][0] = m[i][0][m[i][0][:, 4] > sc]
            mean_ap, eval_results, df_summary, recalls, precisions = eval_map(
                m, anns, nproc=4, model_name="Ensemble")
            aps[j].append(mean_ap)
            tps[j].append(df_summary["TP"].values[0])
            fps[j].append(df_summary["FP"].values[0])

    def plot_score_th(res, label):
        plt.figure()
        for i, r in enumerate(res):
            plt.plot(ths, r, label=models_labels[i])
        plt.legend()
        plt.xlabel("Score Threshold")
        plt.ylabel(label)
        plt.show()

    plot_score_th(aps, "AP")
    plot_score_th(tps, "TP")
    plot_score_th(fps, "FP")
def voc_eval(result_file,
             dataset,
             iou_thr=0.5,
             nproc=4,
             conf_thresh=0.5,
             show=False):
    det_results = mmcv.load(result_file)
    annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    mean_ap, eval_results = eval_map(det_results,
                                     annotations,
                                     scale_ranges=None,
                                     iou_thr=iou_thr,
                                     dataset=dataset_name,
                                     logger='print',
                                     nproc=nproc,
                                     conf_thresh=conf_thresh,
                                     show=show)
    return mean_ap, eval_results
Exemple #3
0
        for model in models
    ]

    cfg = Config.fromfile("saved_models/study/{}/{}.py".format(
        models[0], models[0]))
    cfg.data.test.test_mode = True  # To avoid filtering out images without gts
    dataset = build_dataset(cfg.data.test)

    cfg = {'type': 'nms', 'iou_threshold': 0.7}
    # cfg = {'type': 'soft_nms', 'iou_threshold': 0.3, 'sigma': 0.5, 'min_score': 1e-3, 'method': 'linear'}
    # cfg = {'type': 'wbf', 'iou_threshold': 0.7}

    combined_dets = loadResults(resFiles)
    ensemble_dets = [ensembleDetections(dets, cfg) for dets in combined_dets]
    # time = [n[1] for n in nms_output]
    # print(np.mean(time))
    # res = dataset.evaluate(ensemble_dets)
    print()

    for i in range(len(ensemble_dets)):
        ensemble_dets[i][0] = ensemble_dets[i][0][
            ensemble_dets[i][0][:, 4] > 0.05]
        ensemble_dets[i][1] = ensemble_dets[i][1][
            ensemble_dets[i][1][:, 4] > 0.05]
        ensemble_dets[i][2] = ensemble_dets[i][2][
            ensemble_dets[i][2][:, 4] > 0.05]

    anns = [dataset.get_ann_info(n) for n in range(len(ensemble_dets))]
    mean_ap, eval_results, df_summary, recalls, precisions = eval_map(
        ensemble_dets, anns, nproc=4, model_name=models[0])
    mmcv.dump(ensemble_dets, "results_sample.pkl")
def stats_results(
        models,
        filt_scores=[0],
        training_results=False,
        output='stat_res.csv',
        dataset_config='configs/_base_/datasets/waymo_detection_1280x1920.py',
        full_stats=False):
    list_eval_results = []

    resFiles = [
        "saved_models/study/{}/results.pkl".format(model) for model in models
    ]

    cfg = Config.fromfile(dataset_config)
    cfg.data.test.test_mode = True  # To avoid filtering out images without gts
    dataset = build_dataset(cfg.data.test)

    anns = [dataset.get_ann_info(n) for n in range(len(dataset.img_ids))]

    df = pd.DataFrame()
    for i, res in enumerate(resFiles):
        dets = mmcv.load(res)

        # img_index = 0
        # dets = [dets[img_index]]
        # anns = [anns[img_index]]
        # dataset.data_infos = [dataset.data_infos[img_index]]
        # dataset.img_ids = [img_index]

        for sc in filt_scores:
            filt_dets = dets
            model_name = models[i]
            if sc != 0:
                filt_dets = [[
                    dets_class[dets_class[:, 4] > sc]
                    for dets_class in dets_img
                ] for dets_img in dets]
                model_name += "_" + str(sc)
            mean_ap, eval_results, df_summary = eval_map(filt_dets,
                                                         anns,
                                                         nproc=4,
                                                         model_name=model_name)
            list_eval_results.append(eval_results)
            df = pd.concat([df, df_summary])

            if full_stats:
                stats_gt_dets(eval_results, dataset, dets, anns, model_name)

            # mean_ap, eval_results = eval_map(filt_dets, anns, nproc=4, tpfp_fn=tpfp_imagenet, iou_thr=0.7)
            # mean_ap, eval_results = eval_map(filt_dets, anns, nproc=4, tpfp_fn=tpfp_imagenet)
            # mean_ap, eval_results = eval_map(filt_dets, anns, nproc=4, iou_thr=0.7)
            # mean_ap, eval_results = eval_map(filt_dets, anns, nproc=4)
            #
            # dataset.coco.load_anns(232)
            # dataset.coco.dataset['images'] = [dataset.coco.dataset['images'][232]]
            # anns = [v for k, v in dataset.coco.anns.items() if v['image_id']==232 ]
            # dataset.coco.anns = {0: dataset.coco.anns[232]}
            # dataset.coco.imgs = {0: dataset.coco.imgs[232]}
            # # dataset.coco["annotations"] = {0: dataset.coco.anns[232]}

            # dataset.evaluate(filt_dets)

    if training_results:
        cfg.data.train.test_mode = True
        dataset_train = build_dataset(cfg.data.train)
        anns_train = [
            dataset_train.get_ann_info(n)
            for n in range(len(dataset_train.img_ids))
        ]

        resFiles = [
            "saved_models/study/{}/results_training.pkl".format(model)
            for model in models if os.path.exists(
                "saved_models/study/{}/results_training.pkl".format(model))
        ]

        for i, res in enumerate(resFiles):
            dets = mmcv.load(res)
            for sc in filt_scores:
                filt_dets = dets
                model_name = models[i] + "_train"
                if sc != 0:
                    filt_dets = [[
                        dets_class[dets_class[:, 4] > sc]
                        for dets_class in dets_img
                    ] for dets_img in dets]
                    model_name += "_" + str(sc)
                mean_ap, eval_results, df_summary = eval_map(
                    filt_dets, anns_train, nproc=4, model_name=model_name)
                list_eval_results.append(eval_results)
                df = pd.concat([df, df_summary])

    df.to_csv("stats_res/" + output)
    return list_eval_results
Exemple #5
0
def main():
    print('cuda avilable:', torch.cuda.is_available())
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    gpu_num = torch.cuda.device_count()

    with open('cfg.yaml', 'r') as fd:
        cfg = yaml.load(fd, Loader=yaml.FullLoader)
    print(cfg)

    dataset = Dataset(cfg=cfg, phase='val')
    valloader = data.DataLoader(dataset, batch_size=16)

    category_id_to_name = dataset.category_id_to_name
    category_name_to_id = dataset.category_name_to_id
    data_list = dataset.data_list

    # input_size = [3, *cfg['image_size']]
    # num_classes = len(cfg['class_names'])
    # model = ATSSModel(input_size, num_classes=num_classes)
    # model_path = 'checkpoints/1027/160/model.pth'
    # status = model.load_state_dict(torch.load(model_path, map_location=device))
    # print(status)

    checkpoint_path = 'checkpoints/1027/epoch=159.ckpt'
    lit_model = LitATSS(cfg).load_from_checkpoint(checkpoint_path=checkpoint_path)
    model = lit_model.model

    model.to(device)
    model.eval()

    det_results = []
    annotations = []
    t2 = time.time()
    for i, batch in enumerate(valloader):
        input_images = batch[0].to(device)
        annos = batch[1]
        scale_shift = annos['transform']

        t1 = time.time()
        # det_result = detect_single_image(model, input_images, scale_shift[0][0], scale_shift[0][1:3])
        det_result = detect_images(model, input_images, scale_shift[:, 0], scale_shift[:, 1:3])
        det_results.extend(det_result)
        # print(len(det_result))
        print('detect_images time:', time.time() - t1)

        for j in range(input_images.shape[0]):

            gt_num = annos['obj_num'][j]

            gt_bboxes_per_image = annos['bboxes'][j][:gt_num]
            gt_bboxes_per_image[:, [0, 2]] = gt_bboxes_per_image[:, [0, 2]] - scale_shift[j][1]
            gt_bboxes_per_image[:, [1, 3]] = gt_bboxes_per_image[:, [1, 3]] - scale_shift[j][2]
            gt_bboxes_per_image = gt_bboxes_per_image / scale_shift[j][0]
            gt_bboxes_per_image = gt_bboxes_per_image.numpy()

            gt_labels = annos['cls'][j][:gt_num].numpy()

            annotations.append({'bboxes': gt_bboxes_per_image, 'labels': gt_labels})
    print('total detect time:', time.time() - t2)

    mean_ap, eval_results = eval_map(det_results, annotations, iou_thr=0.75)