示例#1
0
class TestScalabelPanSegEvalMissing(unittest.TestCase):
    """Test cases for Scalabel panoptic segmentation with missing preds."""

    cur_dir = os.path.dirname(os.path.abspath(__file__))
    gt_file = f"{cur_dir}/testcases/pan_seg/pan_seg_sample_2.json"
    pred_file = f"{cur_dir}/testcases/pan_seg/pan_seg_preds.json"
    config = load_label_config(
        f"{cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
    )

    def test_summary(self) -> None:
        """Check evaluation scores' correctnes."""
        gt_frames = load(self.gt_file).frames
        pred_frames = load(self.pred_file).frames
        result = evaluate_pan_seg(gt_frames, pred_frames, self.config, nproc=1)
        summary = result.summary()
        gt_summary = {
            "PQ": 17.780991550508038,
            "PQ/STUFF": 21.362877645706877,
            "PQ/THING": 3.4534471697126734,
            "SQ": 30.815623929417264,
            "SQ/STUFF": 32.04431646856032,
            "SQ/THING": 25.90085377284505,
            "RQ": 23.111111111111104,
            "RQ/STUFF": 27.77777777777777,
            "RQ/THING": 4.444444444444445,
            "NUM": 15,
            "NUM/STUFF": 12,
            "NUM/THING": 3,
        }
        self.assertSetEqual(set(summary.keys()), set(gt_summary.keys()))
        for name, score in gt_summary.items():
            self.assertAlmostEqual(score, summary[name])
示例#2
0
class TestScalabelPanSegEval(unittest.TestCase):
    """Test cases for Scalabel panoptic segmentation evaluation."""

    cur_dir = os.path.dirname(os.path.abspath(__file__))
    gt_file = f"{cur_dir}/testcases/pan_seg/pan_seg_sample.json"
    pred_file = f"{cur_dir}/testcases/pan_seg/pan_seg_preds.json"
    config = load_label_config(
        f"{cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
    )

    def test_summary(self) -> None:
        """Check evaluation scores' correctnes."""
        gt_frames = load(self.gt_file).frames
        pred_frames = load(self.pred_file).frames
        result = evaluate_pan_seg(gt_frames, pred_frames, self.config, nproc=1)
        summary = result.summary()
        gt_summary = {
            "PQ": 26.671487325762055,
            "PQ/STUFF": 32.04431646856032,
            "PQ/THING": 5.180170754569009,
            "SQ": 30.815623929417264,
            "SQ/STUFF": 32.04431646856032,
            "SQ/THING": 25.90085377284505,
            "RQ": 34.666666666666664,
            "RQ/STUFF": 41.666666666666664,
            "RQ/THING": 6.666666666666667,
            "NUM": 15,
            "NUM/STUFF": 12,
            "NUM/THING": 3,
        }
        self.assertSetEqual(set(summary.keys()), set(gt_summary.keys()))
        for name, score in gt_summary.items():
            self.assertAlmostEqual(score, summary[name])
示例#3
0
class TestScalabelPanSegEvalEmpty(unittest.TestCase):
    """Test cases for Scalabel panoptic segmentation on empty test cases."""

    cur_dir = os.path.dirname(os.path.abspath(__file__))
    gt_file = f"{cur_dir}/testcases/pan_seg/pan_seg_sample.json"
    pred_file = f"{cur_dir}/testcases/pan_seg/pan_seg_preds_empty.json"
    config = load_label_config(
        f"{cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
    )

    def test_summary(self) -> None:
        """Check evaluation scores' correctnes."""
        gt_frames = load(self.gt_file).frames
        pred_frames = load(self.pred_file).frames
        result = evaluate_pan_seg(gt_frames, pred_frames, self.config, nproc=1)
        summary = result.summary()
        gt_summary = {
            "PQ": 0.0,
            "PQ/STUFF": 0.0,
            "PQ/THING": 0.0,
            "SQ": 0.0,
            "SQ/STUFF": 0.0,
            "SQ/THING": 0.0,
            "RQ": 0.0,
            "RQ/STUFF": 0.0,
            "RQ/THING": 0.0,
            "NUM": 15,
            "NUM/STUFF": 12,
            "NUM/THING": 3,
        }
        self.assertSetEqual(set(summary.keys()), set(gt_summary.keys()))
        for name, score in gt_summary.items():
            self.assertAlmostEqual(score, summary[name])
示例#4
0
 def test_combine_stuff_masks(self) -> None:
     """Test combine_stuff_masks function."""
     pred_file = f"{self.cur_dir}/testcases/utils/overlap_masks.json"
     config = load_label_config(
         f"{self.cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
     )
     pred_frames = load(pred_file).frames
     categories = config.categories
     category_names = [category.name for category in categories]
     rles, cids, iids = [], [], []
     assert pred_frames[0].labels is not None
     for label in pred_frames[0].labels:
         assert label.category is not None
         assert label.rle is not None
         rles.append(label.rle)
         cids.append(category_names.index(label.category))
         iids.append(int(label.id))
     out_rle, out_cid, out_iid = combine_stuff_masks(
         rles, cids, iids, categories
     )
     self.assertEqual(len(out_rle), 3)
     self.assertEqual(len(out_cid), 3)
     self.assertEqual(len(out_iid), 3)
     self.assertEqual(out_cid, [0, 34, 34])
     self.assertEqual(len(out_iid), len(set(out_iid)))
示例#5
0
 def test_check_overlap(self) -> None:
     """Test check_overlap function."""
     pred_file = f"{self.cur_dir}/testcases/utils/overlap_masks.json"
     config = load_label_config(
         f"{self.cur_dir}/testcases/pan_seg/pan_seg_configs.toml"
     )
     pred_frames = load(pred_file).frames
     self.assertTrue(check_overlap(pred_frames, config, nproc=1))
     self.assertFalse(check_overlap(pred_frames, config, nproc=1))
示例#6
0
        "--ignore-unknown-cats",
        action="store_true",
        help="ignore unknown categories for panseg evaluation",
    )
    parser.add_argument(
        "--nproc",
        "-p",
        type=int,
        default=NPROC,
        help="number of processes for panseg evaluation",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_arguments()
    dataset = load(args.gt, args.nproc)
    gts, cfg = dataset.frames, dataset.config
    preds = load(args.result).frames
    if args.config is not None:
        cfg = load_label_config(args.config)
    assert cfg is not None
    eval_result = evaluate_pan_seg(
        gts, preds, cfg, args.ignore_unknown_cats, args.nproc
    )
    logger.info(eval_result)
    logger.info(eval_result.summary())
    if args.out_file:
        with open_write_text(args.out_file) as fp:
            json.dump(eval_result.dict(), fp, indent=2)