コード例 #1
0
ファイル: mot_test.py プロジェクト: siyliepfl/bdd100k
 def test_mot(self) -> None:
     """Check mot evaluation correctness."""
     cur_dir = os.path.dirname(os.path.abspath(__file__))
     gts = group_and_sort(
         load("{}/testcases/track_sample_anns.json".format(cur_dir)))
     preds = group_and_sort(
         load("{}/testcases/track_predictions.json".format(cur_dir)))
     result = evaluate_track(acc_single_video_mot, gts, preds)
     overall_reference = {
         "IDF1": 0.7101073676416142,
         "MOTA": 0.6420070762302992,
         "MOTP": 0.871614396957838,
         "FP": 126,
         "FN": 942,
         "IDSw": 45,
         "MT": 62,
         "PT": 47,
         "ML": 33,
         "FM": 66,
         "mIDF1": 0.32247819436558384,
         "mMOTA": 0.24324204637536687,
         "mMOTP": 0.5001285135514636,
     }
     for key in result["OVERALL"]:
         self.assertAlmostEqual(result["OVERALL"][key],
                                overall_reference[key])
コード例 #2
0
ファイル: to_rle.py プロジェクト: bdd100k/bdd100k
def main() -> None:
    """Main."""
    args = parse_args()

    assert os.path.isdir(args.input)

    dataset = load(args.input, args.nproc)
    if args.config is not None:
        bdd100k_config = load_bdd100k_config(args.config)
    elif dataset.config is not None:
        bdd100k_config = BDD100KConfig(config=dataset.config)
    else:
        bdd100k_config = load_bdd100k_config(args.mode)

    categories = get_leaf_categories(bdd100k_config.scalabel.categories)

    convert_funcs: Dict[str, ToRLEFunc] = dict(
        ins_seg=insseg_to_rle,
        sem_seg=semseg_to_rle,
        drivable=semseg_to_rle,
        seg_track=segtrack_to_rle,
    )

    if args.mode == "ins_seg":
        assert args.score_file is not None
        frames = load(args.score_file).frames

        assert all(
            os.path.exists(
                os.path.join(args.input, frame.name.replace(".jpg", ".png")))
            for frame in frames), "Missing some bitmasks."
    elif args.mode in ("sem_seg", "drivable", "seg_track"):
        files = list_files(args.input)
        frames = []
        for file in files:
            if not file.endswith(".png") and not file.endswith(".jpg"):
                continue
            frame = Frame(name=file.replace(".png", ".jpg"), labels=[])
            frames.append(frame)
    else:
        return

    if args.nproc > 1:
        with Pool(args.nproc) as pool:
            frames = pool.map(
                partial(
                    convert_funcs[args.mode],
                    input_dir=args.input,
                    categories=categories,
                ),
                tqdm(frames),
            )
    else:
        frames = [
            convert_funcs[args.mode](frame, args.input, categories)
            for frame in tqdm(frames)
        ]

    save(args.output, frames)
コード例 #3
0
def _load_frames(gt_base: str,
                 result_path: str,
                 config: BDD100KConfig,
                 nproc: int = NPROC) -> Tuple[List[Frame], List[Frame]]:
    """Load ground truth and prediction frames."""
    gt_frames = bdd100k_to_scalabel(load(gt_base, nproc).frames, config)
    result_frames = bdd100k_to_scalabel(
        load(result_path, nproc).frames, config)
    return gt_frames, result_frames
コード例 #4
0
ファイル: detect.py プロジェクト: siyliepfl/bdd100k
def evaluate_det(
    ann_file: Union[str, List[Frame]],
    pred_file: Union[str, List[Frame]],
    cfg_path: str,
    out_dir: str = "none",
    nproc: int = 4,
) -> Dict[str, float]:
    """Load the ground truth and prediction results.

    Args:
        ann_file: path to the ground truth annotations. "*.json"
        pred_file: path to the prediciton results in BDD format. "*.json"
        cfg_path: path to the config file
        out_dir: output_directory
        nproc: processes number for loading jsons

    Returns:
        dict: detection metric scores

    """
    # Convert the annotation file to COCO format
    if isinstance(ann_file, str):
        ann_frames = load(ann_file, nproc)
    else:
        ann_frames = ann_file
    ann_frames = sorted(ann_frames, key=lambda frame: frame.name)

    categories, name_mapping, ignore_mapping = load_coco_config(
        mode="det",
        filepath=cfg_path,
    )
    ann_coco = scalabel2coco_detection(SHAPE, ann_frames, categories,
                                       name_mapping, ignore_mapping)
    coco_gt = COCOV2(None, ann_coco)

    # Load results and convert the predictions
    if isinstance(pred_file, str):
        pred_frames = load(pred_file, nproc)
    else:
        pred_frames = pred_file
    pred_frames = sorted(pred_frames, key=lambda frame: frame.name)

    pred_res = scalabel2coco_detection(SHAPE, pred_frames, categories,
                                       name_mapping,
                                       ignore_mapping)["annotations"]
    coco_dt = coco_gt.loadRes(pred_res)

    cat_ids = coco_dt.getCatIds()
    cat_names = [cat["name"] for cat in coco_dt.loadCats(cat_ids)]

    img_ids = sorted(coco_gt.getImgIds())
    ann_type = "bbox"
    coco_eval = COCOeval(coco_gt, coco_dt, ann_type)
    coco_eval.params.imgIds = img_ids

    return evaluate_workflow(coco_eval, cat_ids, cat_names, out_dir)
コード例 #5
0
ファイル: utils_test.py プロジェクト: scalabel/scalabel
 def test_reorder_preds(self) -> None:
     """Test reorder_preds function."""
     pred_file = f"{self.cur_dir}/testcases/utils/preds.json"
     pred_frames = load(pred_file).frames
     gt_file = f"{self.cur_dir}/testcases/utils/gts.json"
     gt_frames = load(gt_file).frames
     pred_frames = reorder_preds(gt_frames, pred_frames)
     self.assertEqual(len(pred_frames), len(gt_frames))
     self.assertEqual(len(set(f.videoName for f in pred_frames)), 2)
     for frame in pred_frames:
         assert frame.labels is not None
         self.assertGreater(len(frame.labels), 0)
コード例 #6
0
ファイル: utils_test.py プロジェクト: scalabel/scalabel
 def test_combine_stuff_masks(self) -> None:
     """Test combine_stuff_masks function."""
     pred_file = f"{self.cur_dir}/testcases/utils/overlap_masks.json"
     config = load_label_config(
         f"{self.cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
     )
     pred_frames = load(pred_file).frames
     categories = config.categories
     category_names = [category.name for category in categories]
     rles, cids, iids = [], [], []
     assert pred_frames[0].labels is not None
     for label in pred_frames[0].labels:
         assert label.category is not None
         assert label.rle is not None
         rles.append(label.rle)
         cids.append(category_names.index(label.category))
         iids.append(int(label.id))
     out_rle, out_cid, out_iid = combine_stuff_masks(
         rles, cids, iids, categories
     )
     self.assertEqual(len(out_rle), 3)
     self.assertEqual(len(out_cid), 3)
     self.assertEqual(len(out_iid), 3)
     self.assertEqual(out_cid, [0, 34, 34])
     self.assertEqual(len(out_iid), len(set(out_iid)))
コード例 #7
0
ファイル: to_mask_test.py プロジェクト: bdd100k/bdd100k
    def task_specific_test(
        self,
        task_name: str,
        file_name: str,
        output_name: str,
        convert_func: Callable[[List[Frame], str, Config, int], None],
    ) -> None:
        """General test function for different tasks."""
        cur_dir = os.path.dirname(os.path.abspath(__file__))

        if task_name == "pan_seg":
            json_path = (
                f"{cur_dir}/testcases/panseg_bdd100k/panseg_bdd100k.json")
        else:
            json_path = f"{cur_dir}/testcases/example_annotation.json"
        frames = load(json_path).frames
        bdd100k_config = load_bdd100k_config(task_name)
        convert_func(frames, self.test_out, bdd100k_config.scalabel, 1)
        output_path = os.path.join(self.test_out, output_name)
        mask: NDArrayU8 = np.asarray(Image.open(output_path), dtype=np.uint8)

        gt_mask: NDArrayU8 = np.asarray(
            Image.open(f"{cur_dir}/testcases/{file_name}"),
            dtype=np.uint8,
        )

        self.assertTrue((mask == gt_mask).all())
コード例 #8
0
ファイル: to_coco.py プロジェクト: bdd100k/bdd100k
def main() -> None:
    """Main function."""
    args = parse_args()

    if args.only_mask:
        assert args.mode in ["ins_seg", "seg_track"]
        convert_function = dict(
            ins_seg=bitmask2coco_ins_seg,
            seg_track=bitmask2coco_seg_track,
        )[args.mode]

        cfg_path = args.config if args.config is not None else args.mode
        bdd100k_config = load_bdd100k_config(cfg_path)
        logger.info("Start format converting...")
        coco = convert_function(args.input, bdd100k_config.scalabel,
                                args.nproc)
    else:
        logger.info("Loading annotations...")
        dataset = load(args.input, args.nproc)
        if args.config is not None:
            bdd100k_config = load_bdd100k_config(args.config)
        elif dataset.config is not None:
            bdd100k_config = BDD100KConfig(config=dataset.config)
        else:
            bdd100k_config = load_bdd100k_config(args.mode)

        if args.mode in ["det", "box_track", "pose"]:
            convert_func = dict(
                det=scalabel2coco_detection,
                box_track=scalabel2coco_box_track,
                pose=scalabel2coco_pose,
            )[args.mode]
        else:
            if args.mask_base is not None:
                convert_func = partial(
                    dict(
                        ins_seg=bdd100k2coco_ins_seg,
                        seg_track=bdd100k2coco_seg_track,
                    )[args.mode],
                    mask_base=args.mask_base,
                    nproc=args.nproc,
                )
            else:
                convert_func = partial(
                    dict(
                        ins_seg=scalabel2coco_ins_seg,
                        seg_track=scalabel2coco_seg_track,
                    )[args.mode],
                    nproc=args.nproc,
                )

        logger.info("Start format converting...")
        frames = bdd100k_to_scalabel(dataset.frames, bdd100k_config)
        coco = convert_func(frames=frames, config=bdd100k_config.scalabel)

    logger.info("Saving converted annotations to disk...")
    with open(args.output, "w", encoding="utf-8") as f:
        json.dump(coco, f)
    logger.info("Finished!")
コード例 #9
0
ファイル: utils_test.py プロジェクト: scalabel/scalabel
 def test_check_overlap(self) -> None:
     """Test check_overlap function."""
     pred_file = f"{self.cur_dir}/testcases/utils/overlap_masks.json"
     config = load_label_config(
         f"{self.cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
     )
     pred_frames = load(pred_file).frames
     self.assertTrue(check_overlap(pred_frames, config, nproc=1))
     self.assertFalse(check_overlap(pred_frames, config, nproc=1))
コード例 #10
0
ファイル: to_coco.py プロジェクト: guarin/bdd100k
def start_converting(args: argparse.Namespace) -> List[Frame]:
    """Logs settings and load annoatations."""
    logger.info(
        "Mode: %s\nremove-ignore: %s\nignore-as-class: %s",
        args.mode,
        args.remove_ignore,
        args.ignore_as_class,
    )
    logger.info("Loading annotations...")
    labels = load(args.label, args.nproc)
    logger.info("Start format converting...")

    return labels
コード例 #11
0
ファイル: mot_test.py プロジェクト: siyliepfl/bdd100k
class TestRenderResults(unittest.TestCase):
    """Test cases for mot render results."""

    cur_dir = os.path.dirname(os.path.abspath(__file__))
    gts = load("{}/testcases/track_sample_anns.json".format(cur_dir))
    preds = load("{}/testcases/track_predictions.json".format(cur_dir))

    metrics = list(METRIC_MAPS.keys())
    accs = [acc_single_video_mot(gts, preds)]
    names, accs, items = aggregate_accs(accs)
    summaries = [
        evaluate_single_class(name, acc) for name, acc in zip(names, accs)
    ]
    eval_results = render_results(summaries, items, metrics)

    def test_categories(self) -> None:
        """Check the correctness of the 1st-level keys in eval_results."""
        cate_names = ["OVERALL"]
        for super_category, categories in SUPER_CLASSES.items():
            cate_names.append(super_category)
            cate_names.extend(categories)

        self.assertEqual(len(self.eval_results), len(cate_names))
        for key in self.eval_results:
            self.assertIn(key, cate_names)

    def test_metrics(self) -> None:
        """Check the correctness of the 2nd-level keys in eval_results."""
        cate_metrics = list(METRIC_MAPS.values())
        overall_metrics = cate_metrics + ["mIDF1", "mMOTA", "mMOTP"]

        for cate, metrics in self.eval_results.items():
            if cate == "OVERALL":
                target_metrics = overall_metrics
            else:
                target_metrics = cate_metrics
            self.assertEqual(len(metrics), len(target_metrics))
            for metric in metrics:
                self.assertIn(metric, target_metrics)
コード例 #12
0
ファイル: utils_test.py プロジェクト: scalabel/scalabel
 def test_label_ids_to_int(self) -> None:
     """Test label_ids_to_int function."""
     gt_file = f"{self.cur_dir}/testcases/pan_seg/pan_seg_sample.json"
     gt_frames = load(gt_file).frames[0]
     assert gt_frames.labels is not None
     old_ids = [label.id for label in gt_frames.labels]
     label_ids_to_int([gt_frames])
     new_ids = [label.id for label in gt_frames.labels]
     self.assertEqual(len(old_ids), len(new_ids))
     self.assertEqual(len(new_ids), len(set(new_ids)))
     for new_id in new_ids:
         self.assertTrue(isinstance(new_id, str))
         self.assertTrue(new_id.isdigit())
コード例 #13
0
def run() -> None:
    """Main."""
    args = parse_args()

    if args.task == "drivable":
        evaluate_drivable(args.gt, args.result)
    elif args.task == "lane_mark":
        evaluate_lane_marking(args.gt, args.result)
    elif args.task == "sem_seg":
        evaluate_segmentation(args.gt, args.result)
    elif args.task == "det":
        evaluate_det(
            args.gt, args.result, args.config, args.out_dir, args.nproc
        )
    elif args.task == "ins_seg":
        evaluate_ins_seg(args.gt, args.result, args.score_file, args.out_dir)
    elif args.task == "box_track":
        evaluate_track(
            acc_single_video_mot,
            gts=group_and_sort(load(args.gt, args.nproc)),
            results=group_and_sort(load(args.result, args.nproc)),
            iou_thr=args.mot_iou_thr,
            ignore_iof_thr=args.mot_ignore_iof_thr,
            nproc=args.nproc,
        )
    elif args.task == "seg_track":
        evaluate_track(
            acc_single_video_mots,
            gts=group_and_sort_files(
                list_files(args.gt, ".png", with_prefix=True)
            ),
            results=group_and_sort_files(
                list_files(args.result, ".png", with_prefix=True)
            ),
            iou_thr=args.mot_iou_thr,
            ignore_iof_thr=args.mot_ignore_iof_thr,
            nproc=args.nproc,
        )
コード例 #14
0
ファイル: pan_seg_test.py プロジェクト: scalabel/scalabel
 def test_summary(self) -> None:
     """Check evaluation scores' correctnes."""
     gt_frames = load(self.gt_file).frames
     pred_frames = load(self.pred_file).frames
     result = evaluate_pan_seg(gt_frames, pred_frames, self.config, nproc=1)
     summary = result.summary()
     gt_summary = {
         "PQ": 0.0,
         "PQ/STUFF": 0.0,
         "PQ/THING": 0.0,
         "SQ": 0.0,
         "SQ/STUFF": 0.0,
         "SQ/THING": 0.0,
         "RQ": 0.0,
         "RQ/STUFF": 0.0,
         "RQ/THING": 0.0,
         "NUM": 15,
         "NUM/STUFF": 12,
         "NUM/THING": 3,
     }
     self.assertSetEqual(set(summary.keys()), set(gt_summary.keys()))
     for name, score in gt_summary.items():
         self.assertAlmostEqual(score, summary[name])
コード例 #15
0
ファイル: pan_seg_test.py プロジェクト: scalabel/scalabel
 def test_summary(self) -> None:
     """Check evaluation scores' correctnes."""
     gt_frames = load(self.gt_file).frames
     pred_frames = load(self.pred_file).frames
     result = evaluate_pan_seg(gt_frames, pred_frames, self.config, nproc=1)
     summary = result.summary()
     gt_summary = {
         "PQ": 26.671487325762055,
         "PQ/STUFF": 32.04431646856032,
         "PQ/THING": 5.180170754569009,
         "SQ": 30.815623929417264,
         "SQ/STUFF": 32.04431646856032,
         "SQ/THING": 25.90085377284505,
         "RQ": 34.666666666666664,
         "RQ/STUFF": 41.666666666666664,
         "RQ/THING": 6.666666666666667,
         "NUM": 15,
         "NUM/STUFF": 12,
         "NUM/THING": 3,
     }
     self.assertSetEqual(set(summary.keys()), set(gt_summary.keys()))
     for name, score in gt_summary.items():
         self.assertAlmostEqual(score, summary[name])
コード例 #16
0
ファイル: pan_seg_test.py プロジェクト: scalabel/scalabel
 def test_summary(self) -> None:
     """Check evaluation scores' correctnes."""
     gt_frames = load(self.gt_file).frames
     pred_frames = load(self.pred_file).frames
     result = evaluate_pan_seg(gt_frames, pred_frames, self.config, nproc=1)
     summary = result.summary()
     gt_summary = {
         "PQ": 17.780991550508038,
         "PQ/STUFF": 21.362877645706877,
         "PQ/THING": 3.4534471697126734,
         "SQ": 30.815623929417264,
         "SQ/STUFF": 32.04431646856032,
         "SQ/THING": 25.90085377284505,
         "RQ": 23.111111111111104,
         "RQ/STUFF": 27.77777777777777,
         "RQ/THING": 4.444444444444445,
         "NUM": 15,
         "NUM/STUFF": 12,
         "NUM/THING": 3,
     }
     self.assertSetEqual(set(summary.keys()), set(gt_summary.keys()))
     for name, score in gt_summary.items():
         self.assertAlmostEqual(score, summary[name])
コード例 #17
0
ファイル: utils_test.py プロジェクト: scalabel/scalabel
 def test_check_overlap_frame(self) -> None:
     """Test check_overlap_frame function."""
     pred_files = [
         f"{self.cur_dir}/testcases/utils/overlap_masks.json",
         f"{self.cur_dir}/testcases/ins_seg/ins_seg_preds.json",
         f"{self.cur_dir}/testcases/sem_seg/sem_seg_preds.json",
         f"{self.cur_dir}/testcases/seg_track/seg_track_preds.json",
         f"{self.cur_dir}/testcases/pan_seg/pan_seg_preds.json",
     ]
     overlaps = [True, False, False, False, False]
     for pred_file, is_overlap in zip(pred_files, overlaps):
         pred_frames = load(pred_file).frames
         self.assertEqual(
             check_overlap_frame(pred_frames[0], ["car"]), is_overlap
         )
コード例 #18
0
    def test_insseg_to_rle(self) -> None:
        """Test ins_seg to rle conversion."""
        mask_dir = "./testcases/to_rle/ins_seg/masks"
        score = "./testcases/to_rle/ins_seg/scores.json"
        categories = get_leaf_categories(
            load_bdd100k_config("ins_seg").scalabel.categories
        )
        frame = load(score).frames[0]
        assert frame.labels is not None

        new_frame = insseg_to_rle(frame, mask_dir, categories)

        assert new_frame.labels is not None
        self.assertEqual(len(new_frame.labels), 22)
        for i, label in enumerate(new_frame.labels):
            self.assertEqual(label.score, frame.labels[i].score)
コード例 #19
0
    def task_specific_test(
        self,
        file_name: str,
        output_name: str,
        convert_func: Callable[[List[Frame], str, bool, bool, int], None],
    ) -> None:
        """General test function for different tasks."""
        cur_dir = os.path.dirname(os.path.abspath(__file__))

        labels = load("{}/testcases/example_annotation.json".format(cur_dir))
        convert_func(labels, self.test_out, False, False, 1)
        output_path = os.path.join(self.test_out, output_name)
        mask = np.asarray(Image.open(output_path))

        gt_mask = np.asarray(
            Image.open("{}/testcases/{}".format(cur_dir, file_name))
        )

        self.assertTrue((mask == gt_mask).all())
コード例 #20
0
def main() -> None:
    """Main function."""
    args = parse_args()
    assert args.mode in [
        "sem_seg",
        "drivable",
        "lane_mark",
        "ins_seg",
        "pan_seg",
        "seg_track",
    ]
    os.environ["QT_QPA_PLATFORM"] = "offscreen"  # matplotlib offscreen render

    convert_funcs: Dict[str, ToMasksFunc] = dict(
        sem_seg=semseg_to_masks,
        drivable=drivable_to_masks,
        lane_mark=lanemark_to_masks,
        pan_seg=panseg_to_bitmasks,
        ins_seg=insseg_to_bitmasks,
        seg_track=segtrack_to_bitmasks,
    )

    dataset = load(args.input, args.nproc)
    if args.config is not None:
        bdd100k_config = load_bdd100k_config(args.config)
    elif dataset.config is not None:
        bdd100k_config = BDD100KConfig(config=dataset.config)
    else:
        bdd100k_config = load_bdd100k_config(args.mode)

    if args.mode in ["ins_seg", "seg_track"]:
        frames = bdd100k_to_scalabel(dataset.frames, bdd100k_config)
    else:
        frames = dataset.frames

    convert_funcs[args.mode](frames, args.output, bdd100k_config.scalabel,
                             args.nproc)

    logger.info("Finished!")
コード例 #21
0
ファイル: to_scalabel_test.py プロジェクト: bdd100k/bdd100k
    def test_bdd100k_to_scalabel(self) -> None:
        """Test bdd100k_to_scalabel function."""
        cur_dir = os.path.dirname(os.path.abspath(__file__))
        dataset = load(f"{cur_dir}/testcases/example_ignore_annotation.json")
        frames = dataset.frames
        bdd100k_config = load_bdd100k_config("box_track")
        new_frames = bdd100k_to_scalabel(copy.deepcopy(frames), bdd100k_config)
        self.assertEqual(len(new_frames), 2)
        labels = new_frames[0].labels
        assert labels is not None
        self.assertEqual(len(labels), 2)
        self.assertEqual(labels[0].category, "pedestrian")
        self.assertEqual(labels[1].category, "pedestrian")
        assert labels[0].attributes is not None
        self.assertTrue(labels[0].attributes[IGNORED])
        self.assertEqual(new_frames[1].labels, None)

        bdd100k_config.remove_ignored = True
        new_frames = bdd100k_to_scalabel(frames, bdd100k_config)
        labels = new_frames[0].labels
        assert labels is not None
        self.assertEqual(len(labels), 1)
        self.assertEqual(labels[0].category, "pedestrian")
コード例 #22
0
        action="store_true",
        help="ignore unknown categories for mots evaluation",
    )
    parser.add_argument(
        "--nproc",
        "-p",
        type=int,
        default=NPROC,
        help="number of processes for mots evaluation",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_arguments()
    dataset = load(args.gt)
    gt_frames, cfg = dataset.frames, dataset.config
    if args.config is not None:
        cfg = load_label_config(args.config)
    if cfg is None:
        raise ValueError("Dataset config is not specified. Please use --config"
                         " to specify a config for this dataset.")
    eval_result = evaluate_seg_track(
        acc_single_video_mots,
        group_and_sort(gt_frames),
        group_and_sort(load(args.result).frames),
        cfg,
        args.iou_thr,
        args.ignore_iof_thr,
        args.ignore_unknown_cats,
        args.nproc,
コード例 #23
0
ファイル: pan_seg.py プロジェクト: scalabel/scalabel
        "--ignore-unknown-cats",
        action="store_true",
        help="ignore unknown categories for panseg evaluation",
    )
    parser.add_argument(
        "--nproc",
        "-p",
        type=int,
        default=NPROC,
        help="number of processes for panseg evaluation",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_arguments()
    dataset = load(args.gt, args.nproc)
    gts, cfg = dataset.frames, dataset.config
    preds = load(args.result).frames
    if args.config is not None:
        cfg = load_label_config(args.config)
    assert cfg is not None
    eval_result = evaluate_pan_seg(
        gts, preds, cfg, args.ignore_unknown_cats, args.nproc
    )
    logger.info(eval_result)
    logger.info(eval_result.summary())
    if args.out_file:
        with open_write_text(args.out_file) as fp:
            json.dump(eval_result.dict(), fp, indent=2)