def _parsing_covering_helper( groundtruth_category_array, groundtruth_instance_array, predicted_category_array, predicted_instance_array, num_classes, max_instances_per_category, ignored_label, offset, normalize_by_image_size): """Helper function to compute parsing covering.""" pc = parsing_covering.ParsingCovering(num_classes, ignored_label, max_instances_per_category, offset, normalize_by_image_size) pc.compare_and_accumulate(groundtruth_category_array, groundtruth_instance_array, predicted_category_array, predicted_instance_array) return pc.weighted_iou_per_class, pc.gt_area_per_class
def _build_metric(metric, num_categories, ignored_label, max_instances_per_category, intersection_offset=None, normalize_by_image_size=True): """Creates a metric aggregator objet of the given name.""" if metric == 'pq': logging.warning('One should check Panoptic Quality results against the ' 'official COCO API code. Small numerical differences ' '(< 0.1%) can be magnified by rounding.') return panoptic_quality.PanopticQuality(num_categories, ignored_label, max_instances_per_category, intersection_offset) elif metric == 'pc': return parsing_covering.ParsingCovering( num_categories, ignored_label, max_instances_per_category, intersection_offset, normalize_by_image_size) else: raise ValueError('No implementation for metric "%s"' % metric)
def test_matches_expected_normalize_by_size(self): pred_classes = test_utils.read_segmentation_with_rgb_color_map( 'team_pred_class.png', _CLASS_COLOR_MAP) pred_instances = test_utils.read_test_image('team_pred_instance.png', mode='L') instance_class_map = { 0: 0, 47: 1, 97: 1, 133: 1, 150: 1, 174: 1, 198: 2, 215: 1, 244: 1, 255: 1, } gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( 'team_gt_instance.png', instance_class_map) pc = parsing_covering.ParsingCovering(num_categories=3, ignored_label=0, max_instances_per_category=256, offset=256 * 256, normalize_by_image_size=True) pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes, pred_instances) np.testing.assert_array_almost_equal( pc.weighted_iou_per_class, [0.0, 0.5002088756, 0.03935002196], decimal=4) np.testing.assert_array_almost_equal( pc.gt_area_per_class, [0.0, 0.7135955832, 0.07277746408], decimal=4) # Note that the per-category and overall PCs are identical to those without # normalization in the previous test, because we only have a single image. np.testing.assert_array_almost_equal(pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4) self.assertAlmostEqual(pc.result(), 0.6208296732)
def test_matches_expected(self): pred_classes = test_utils.read_segmentation_with_rgb_color_map( 'team_pred_class.png', _CLASS_COLOR_MAP) pred_instances = test_utils.read_test_image('team_pred_instance.png', mode='L') instance_class_map = { 0: 0, 47: 1, 97: 1, 133: 1, 150: 1, 174: 1, 198: 2, 215: 1, 244: 1, 255: 1, } gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( 'team_gt_instance.png', instance_class_map) pc = parsing_covering.ParsingCovering(num_categories=3, ignored_label=0, max_instances_per_category=256, offset=256 * 256, normalize_by_image_size=False) pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes, pred_instances) np.testing.assert_array_almost_equal(pc.weighted_iou_per_class, [0.0, 39864.14634, 3136], decimal=4) np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 56870, 5800]) np.testing.assert_array_almost_equal(pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4) self.assertAlmostEqual(pc.result(), 0.6208296732)
def test_totally_wrong(self): categories = np.zeros([6, 6], np.uint16) gt_instances = np.array([ [0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], ], dtype=np.uint16) pred_instances = 1 - gt_instances pc = parsing_covering.ParsingCovering(num_categories=2, ignored_label=0, max_instances_per_category=1, offset=16, normalize_by_image_size=False) pc.compare_and_accumulate(categories, gt_instances, categories, pred_instances) np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 0.0]) np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 10.0]) np.testing.assert_array_equal(pc.result_per_category(), [0.0, 0.0]) self.assertEqual(pc.result(), 0.0)
def test_perfect_match(self): categories = np.zeros([6, 6], np.uint16) instances = np.array([ [2, 2, 2, 2, 2, 2], [2, 4, 4, 4, 4, 2], [2, 4, 4, 4, 4, 2], [2, 4, 4, 4, 4, 2], [2, 4, 4, 2, 2, 2], [2, 4, 2, 2, 2, 2], ], dtype=np.uint16) pc = parsing_covering.ParsingCovering(num_categories=3, ignored_label=2, max_instances_per_category=2, offset=16, normalize_by_image_size=False) pc.compare_and_accumulate(categories, instances, categories, instances) np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 21.0, 0.0]) np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 21.0, 0.0]) np.testing.assert_array_equal(pc.result_per_category(), [0.0, 1.0, 0.0]) self.assertEqual(pc.result(), 1.0)