コード例 #1
0
ファイル: mot_test.py プロジェクト: siyliepfl/bdd100k
 def test_mot(self) -> None:
     """Check mot evaluation correctness."""
     cur_dir = os.path.dirname(os.path.abspath(__file__))
     gts = group_and_sort(
         load("{}/testcases/track_sample_anns.json".format(cur_dir)))
     preds = group_and_sort(
         load("{}/testcases/track_predictions.json".format(cur_dir)))
     result = evaluate_track(acc_single_video_mot, gts, preds)
     overall_reference = {
         "IDF1": 0.7101073676416142,
         "MOTA": 0.6420070762302992,
         "MOTP": 0.871614396957838,
         "FP": 126,
         "FN": 942,
         "IDSw": 45,
         "MT": 62,
         "PT": 47,
         "ML": 33,
         "FM": 66,
         "mIDF1": 0.32247819436558384,
         "mMOTA": 0.24324204637536687,
         "mMOTP": 0.5001285135514636,
     }
     for key in result["OVERALL"]:
         self.assertAlmostEqual(result["OVERALL"][key],
                                overall_reference[key])
コード例 #2
0
def run_rle(
    config: BDD100KConfig,
    task: str,
    gt_frames: List[Frame],
    pred_frames: List[Frame],
    iou_thr: float = 0.5,
    ignore_iof_thr: float = 0.5,
    nproc: int = NPROC,
) -> Result:
    """Run eval for RLE input."""
    results: Optional[Result] = None
    if task == "ins_seg":
        results = sc_eval_ins_seg(
            gt_frames,
            pred_frames,
            config.scalabel,
            nproc=nproc,
        )
    elif task == "seg_track":
        results = sc_eval_seg_track(
            acc_single_video_mots,
            gts=group_and_sort(gt_frames),
            results=group_and_sort(pred_frames),
            config=config.scalabel,
            iou_thr=iou_thr,
            ignore_iof_thr=ignore_iof_thr,
            nproc=nproc,
        )
    elif task in ("sem_seg", "drivable"):
        results = sc_eval_sem_seg(
            gt_frames,
            pred_frames,
            config.scalabel,
            nproc=nproc,
        )
    elif task == "pan_seg":
        results = sc_eval_pan_seg(
            gt_frames,
            pred_frames,
            config.scalabel,
            nproc=nproc,
        )

    assert (results
            is not None), f"{task} evaluation with RLE format not supported!"

    return results
コード例 #3
0
def run() -> None:
    """Main."""
    args = parse_args()

    if args.task == "drivable":
        evaluate_drivable(args.gt, args.result)
    elif args.task == "lane_mark":
        evaluate_lane_marking(args.gt, args.result)
    elif args.task == "sem_seg":
        evaluate_segmentation(args.gt, args.result)
    elif args.task == "det":
        evaluate_det(
            args.gt, args.result, args.config, args.out_dir, args.nproc
        )
    elif args.task == "ins_seg":
        evaluate_ins_seg(args.gt, args.result, args.score_file, args.out_dir)
    elif args.task == "box_track":
        evaluate_track(
            acc_single_video_mot,
            gts=group_and_sort(load(args.gt, args.nproc)),
            results=group_and_sort(load(args.result, args.nproc)),
            iou_thr=args.mot_iou_thr,
            ignore_iof_thr=args.mot_ignore_iof_thr,
            nproc=args.nproc,
        )
    elif args.task == "seg_track":
        evaluate_track(
            acc_single_video_mots,
            gts=group_and_sort_files(
                list_files(args.gt, ".png", with_prefix=True)
            ),
            results=group_and_sort_files(
                list_files(args.result, ".png", with_prefix=True)
            ),
            iou_thr=args.mot_iou_thr,
            ignore_iof_thr=args.mot_ignore_iof_thr,
            nproc=args.nproc,
        )
コード例 #4
0
def segtrack_to_bitmasks(
    frames: List[Frame],
    out_base: str,
    ignore_as_class: bool = False,
    remove_ignore: bool = False,
    nproc: int = 4,
) -> None:
    """Converting segmentation tracking poly2d to bitmasks."""
    frames_list = group_and_sort(frames)
    categories, name_mapping, ignore_mapping = load_coco_config(
        mode="track",
        filepath=DEFAULT_COCO_CONFIG,
        ignore_as_class=ignore_as_class,
    )

    out_paths: List[str] = []
    colors_list: List[List[np.ndarray]] = []
    poly2ds_list: List[List[List[Poly2D]]] = []

    logger.info("Preparing annotations for SegTrack to Bitmasks")

    for video_anns in tqdm(frames_list):
        global_instance_id: int = 1
        instance_id_maps: Dict[str, int] = dict()

        video_name = video_anns[0].video_name
        out_dir = os.path.join(out_base, video_name)
        if not os.path.isdir(out_dir):
            os.makedirs(out_dir)

        for image_anns in video_anns:
            # Bitmask in .png format
            image_name = image_anns.name.replace(".jpg", ".png")
            image_name = os.path.split(image_name)[-1]
            out_path = os.path.join(out_dir, image_name)
            out_paths.append(out_path)

            colors: List[np.ndarray] = []
            poly2ds: List[List[Poly2D]] = []
            colors_list.append(colors)
            poly2ds_list.append(poly2ds)

            labels_ = image_anns.labels
            if labels_ is None or len(labels_) == 0:
                continue

            # Scores higher, rendering later
            if labels_[0].score is not None:
                labels_ = sorted(labels_, key=lambda label: float(label.score))

            for label in labels_:
                if label.poly2d is None:
                    continue

                category_ignored, category_id = process_category(
                    label.category,
                    categories,
                    name_mapping,
                    ignore_mapping,
                    ignore_as_class=ignore_as_class,
                )
                if category_ignored and remove_ignore:
                    continue

                instance_id, global_instance_id = get_bdd100k_instance_id(
                    instance_id_maps, global_instance_id, str(label.id))

                color = set_instance_color(label, category_id, instance_id,
                                           category_ignored)
                colors.append(color)
                poly2ds.append(label.poly2d)

    logger.info("Start Conversion for SegTrack to Bitmasks")
    frames_to_masks(nproc, out_paths, colors_list, poly2ds_list)
コード例 #5
0
def run() -> None:
    """Main."""
    args = parse_args()
    if args.config is not None:
        bdd100k_config = load_bdd100k_config(args.config)
    elif args.task != "lane_mark":
        bdd100k_config = load_bdd100k_config(args.task)
    else:
        raise ValueError("config not specified")

    if args.task in ["det", "box_track", "pose"]:
        gt_frames, result_frames = _load_frames(args.gt, args.result,
                                                bdd100k_config, args.nproc)
        if args.task == "det":
            results: Result = evaluate_det(
                gt_frames,
                result_frames,
                bdd100k_config.scalabel,
                nproc=args.nproc,
            )
        elif args.task == "box_track":
            results = evaluate_track(
                acc_single_video_mot,
                gts=group_and_sort(gt_frames),
                results=group_and_sort(result_frames),
                config=bdd100k_config.scalabel,
                iou_thr=args.iou_thr,
                ignore_iof_thr=args.ignore_iof_thr,
                nproc=args.nproc,
            )
        else:  # pose
            results = evaluate_pose(
                gt_frames,
                result_frames,
                bdd100k_config.scalabel,
                nproc=args.nproc,
            )
    else:
        # for segmentation tasks, determine if the input contains bitmasks or
        # JSON files and call corresponding evaluation function
        res_files = list_files(args.result)
        if len(res_files) > 0 and all(f.endswith(".png") for f in res_files):
            gt_paths = list_files(args.gt, ".png", with_prefix=True)
            pred_paths = list_files(args.result, ".png", with_prefix=True)
            results = run_bitmask(
                bdd100k_config,
                args.task,
                gt_paths,
                pred_paths,
                args.score_file,
                args.iou_thr,
                args.ignore_iof_thr,
                args.quiet,
                args.nproc,
            )
        elif args.result.endswith(".json") or all(
                f.endswith(".json") for f in res_files):
            gt_frames, result_frames = _load_frames(args.gt, args.result,
                                                    bdd100k_config, args.nproc)
            results = run_rle(
                bdd100k_config,
                args.task,
                gt_frames,
                result_frames,
                args.iou_thr,
                args.ignore_iof_thr,
                args.nproc,
            )
        else:
            raise ValueError(
                "Input should either be a directory of only bitmasks or a "
                "JSON file / directory of only JSON files")

    logger.info(results)
    if args.out_file:
        out_folder = os.path.split(args.out_file)[0]
        if not os.path.exists(out_folder) and out_folder:
            os.makedirs(out_folder)
        with open(args.out_file, "w", encoding="utf-8") as fp:
            json.dump(dict(results), fp, indent=2)
コード例 #6
0
def segtrack_to_bitmasks(frames: List[Frame],
                         out_base: str,
                         config: Config,
                         nproc: int = NPROC) -> None:
    """Converting segmentation tracking poly2d to bitmasks."""
    frames_list = group_and_sort(frames)
    img_shape = config.imageSize

    out_paths: List[str] = []
    shapes: List[ImageSize] = []
    colors_list: List[List[NDArrayU8]] = []
    poly2ds_list: List[List[List[Poly2D]]] = []

    categories = get_leaf_categories(config.categories)
    cat_name2id = {cat.name: i + 1 for i, cat in enumerate(categories)}

    logger.info("Preparing annotations for SegTrack to Bitmasks")

    for video_anns in tqdm(frames_list):
        global_instance_id: int = 1
        instance_id_maps: Dict[str, int] = {}

        video_name = video_anns[0].videoName
        out_dir = os.path.join(out_base, video_name)
        if not os.path.isdir(out_dir):
            os.makedirs(out_dir)

        for image_anns in video_anns:
            # Bitmask in .png format
            image_name = image_anns.name.replace(".jpg", ".png")
            image_name = os.path.split(image_name)[-1]
            out_path = os.path.join(out_dir, image_name)
            out_paths.append(out_path)

            if img_shape is None:
                if image_anns.size is not None:
                    img_shape = image_anns.size
                else:
                    raise ValueError("Image shape not defined!")
            shapes.append(img_shape)

            colors: List[NDArrayU8] = []
            poly2ds: List[List[Poly2D]] = []
            colors_list.append(colors)
            poly2ds_list.append(poly2ds)

            labels_ = image_anns.labels
            if labels_ is None or len(labels_) == 0:
                continue

            # Scores higher, rendering later
            if labels_[0].score is not None:
                labels_ = sorted(labels_, key=lambda label: float(label.score))

            for label in labels_:
                if label.poly2d is None:
                    continue
                if label.category not in cat_name2id:
                    continue

                instance_id, global_instance_id = get_bdd100k_instance_id(
                    instance_id_maps, global_instance_id, label.id)
                category_id = cat_name2id[label.category]
                color = set_instance_color(label, category_id, instance_id)
                colors.append(color)
                poly2ds.append(label.poly2d)

    logger.info("Start Conversion for SegTrack to Bitmasks")
    frames_to_masks(nproc, out_paths, shapes, colors_list, poly2ds_list)
コード例 #7
0
ファイル: to_coco.py プロジェクト: guarin/bdd100k
def bdd100k2coco_seg_track(
    mask_base: str,
    shape: Tuple[int, int],
    frames: List[Frame],
    categories: List[CatType],
    name_mapping: Optional[Dict[str, str]] = None,
    ignore_mapping: Optional[Dict[str, str]] = None,
    ignore_as_class: bool = False,
    remove_ignore: bool = False,
    mask_mode: str = "rle",
    nproc: int = 4,
) -> GtType:
    """Converting BDD100K Segmentation Tracking Set to COCO format."""
    frames_list = group_and_sort(frames)
    videos: List[VidType] = []
    images: List[ImgType] = []
    video_id, image_id, ann_id = 0, 0, 0

    mask_names: List[str] = []
    category_ids_list: List[List[int]] = []
    instance_ids_list: List[List[int]] = []
    annotations_list: List[List[AnnType]] = []

    for video_anns in tqdm(frames_list):
        global_instance_id: int = 1
        instance_id_maps: Dict[str, int] = dict()

        video_name = video_anns[0].video_name
        video_id += 1
        video = VidType(id=video_id, name=video_name)
        videos.append(video)

        for image_anns in video_anns:
            image_id += 1
            image = ImgType(
                video_id=video_id,
                frame_id=image_anns.frame_index,
                id=image_id,
                file_name=os.path.join(video_name, image_anns.name),
                height=shape[0],
                width=shape[1],
            )
            images.append(image)

            mask_name = os.path.join(
                mask_base,
                video_name,
                image_anns.name.replace(".jpg", ".png"),
            )
            mask_names.append(mask_name)

            category_ids: List[int] = []
            instance_ids: List[int] = []
            annotations: List[AnnType] = []

            for label in image_anns.labels:
                if label.poly_2d is None:
                    continue
                category_ignored, category_id = process_category(
                    label.category,
                    categories,
                    name_mapping,
                    ignore_mapping,
                    ignore_as_class=ignore_as_class,
                )
                if remove_ignore and category_ignored:
                    continue

                scalabel_id = str(label.id)
                instance_id, global_instance_id = get_bdd100k_instance_id(
                    instance_id_maps, global_instance_id, scalabel_id)

                ann_id += 1
                iscrowd, ignore = get_bdd100k_object_attributes(
                    label, category_ignored)
                annotation = AnnType(
                    id=ann_id,
                    image_id=image_id,
                    category_id=category_id,
                    instance_id=instance_id,
                    scalabel_id=scalabel_id,
                    iscrowd=iscrowd,
                    ignore=ignore,
                )

                category_ids.append(category_id)
                instance_ids.append(instance_id)
                annotations.append(annotation)

            category_ids_list.append(category_ids)
            instance_ids_list.append(instance_ids)
            annotations_list.append(annotations)

    annotations = bitmask2coco_with_ids_parallel(
        annotations_list,
        mask_names,
        category_ids_list,
        instance_ids_list,
        mask_mode,
        nproc,
    )

    return GtType(
        type="instances",
        categories=categories,
        videos=videos,
        images=images,
        annotations=annotations,
    )
コード例 #8
0
ファイル: to_coco.py プロジェクト: bdd100k/bdd100k
def bdd100k2coco_seg_track(mask_base: str,
                           frames: List[Frame],
                           config: Config,
                           nproc: int = NPROC) -> GtType:
    """Converting BDD100K Segmentation Tracking Set to COCO format."""
    video_id, image_id, ann_id = 0, 0, 0
    img_shape = config.imageSize
    frames_list = group_and_sort(frames)
    videos: List[VidType] = []
    images: List[ImgType] = []

    mask_names: List[str] = []
    category_ids_list: List[List[int]] = []
    instance_ids_list: List[List[int]] = []
    annotations_list: List[List[AnnType]] = []

    categories = get_leaf_categories(config.categories)
    cat_name2id = {cat.name: i + 1 for i, cat in enumerate(categories)}

    logger.info("Collecting annotations...")

    for video_anns in tqdm(frames_list):
        global_instance_id: int = 1
        instance_id_maps: Dict[str, int] = {}

        video_name = video_anns[0].videoName
        video_id += 1
        video = VidType(id=video_id, name=video_name)
        videos.append(video)

        for image_anns in video_anns:
            image_id += 1
            if img_shape is None:
                if image_anns.size is not None:
                    img_shape = image_anns.size
                else:
                    raise ValueError("Image shape not defined!")

            image = ImgType(
                video_id=video_id,
                frame_id=image_anns.frameIndex,
                id=image_id,
                file_name=os.path.join(video_name, image_anns.name),
                height=img_shape.height,
                width=img_shape.width,
            )
            if image_anns.url is not None:
                image["coco_url"] = image_anns.url
            images.append(image)

            mask_name = os.path.join(
                mask_base,
                video_name,
                # Bitmask in .png format, image in .jpg format
                image_anns.name.replace(".jpg", ".png"),
            )
            mask_names.append(mask_name)

            category_ids: List[int] = []
            instance_ids: List[int] = []
            annotations: List[AnnType] = []

            for label in image_anns.labels:
                if label.poly2d is None:
                    continue
                if label.category not in cat_name2id:
                    continue

                ann_id += 1
                instance_id, global_instance_id = get_bdd100k_instance_id(
                    instance_id_maps, global_instance_id, label.id)
                category_id = cat_name2id[label.category]
                annotation = AnnType(
                    id=ann_id,
                    image_id=image_id,
                    instance_id=instance_id,
                    category_id=category_id,
                    scalabel_id=label.id,
                    iscrowd=int(check_crowd(label) or check_ignored(label)),
                    ignore=0,
                )

                category_ids.append(category_id)
                instance_ids.append(instance_id)
                annotations.append(annotation)

            category_ids_list.append(category_ids)
            instance_ids_list.append(instance_ids)
            annotations_list.append(annotations)

    annotations = bitmask2coco_with_ids_parallel(
        annotations_list,
        mask_names,
        category_ids_list,
        instance_ids_list,
        nproc,
    )

    return GtType(
        type="instances",
        categories=get_coco_categories(config),
        videos=videos,
        images=images,
        annotations=annotations,
    )
コード例 #9
0
        default=NPROC,
        help="number of processes for mots evaluation",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_arguments()
    dataset = load(args.gt)
    gt_frames, cfg = dataset.frames, dataset.config
    if args.config is not None:
        cfg = load_label_config(args.config)
    if cfg is None:
        raise ValueError("Dataset config is not specified. Please use --config"
                         " to specify a config for this dataset.")
    eval_result = evaluate_seg_track(
        acc_single_video_mots,
        group_and_sort(gt_frames),
        group_and_sort(load(args.result).frames),
        cfg,
        args.iou_thr,
        args.ignore_iof_thr,
        args.ignore_unknown_cats,
        args.nproc,
    )
    logger.info(eval_result)
    logger.info(eval_result.summary())
    if args.out_file:
        with open_write_text(args.out_file) as fp:
            json.dump(eval_result.dict(), fp, indent=2)