コード例 #1
0
    def test_merge_accumulates_all_across_instances(self):
        categories = np.zeros([6, 6], np.uint16)
        good_det_labels = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 2, 2, 2, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                                   dtype=np.uint16)
        gt_labels = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 2, 2, 2, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                             dtype=np.uint16)

        good_pq = panoptic_quality.PanopticQuality(
            num_categories=1,
            ignored_label=2,
            max_instances_per_category=16,
            offset=16)
        for _ in six.moves.range(2):
            good_pq.compare_and_accumulate(categories, gt_labels, categories,
                                           good_det_labels)

        bad_det_labels = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 2, 2, 1],
            [1, 1, 1, 2, 2, 1],
            [1, 1, 1, 2, 2, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                                  dtype=np.uint16)

        bad_pq = panoptic_quality.PanopticQuality(
            num_categories=1,
            ignored_label=2,
            max_instances_per_category=16,
            offset=16)
        for _ in six.moves.range(2):
            bad_pq.compare_and_accumulate(categories, gt_labels, categories,
                                          bad_det_labels)

        good_pq.merge(bad_pq)

        np.testing.assert_array_almost_equal(
            good_pq.iou_per_class, [2 * (28 / 30 + 6 / 8) + 2 * (27 / 32)])
        np.testing.assert_array_equal(good_pq.tp_per_class, [2 * 2 + 2])
        np.testing.assert_array_equal(good_pq.fn_per_class, [2])
        np.testing.assert_array_equal(good_pq.fp_per_class, [2])
        self.assertAlmostEqual(good_pq.result(), 0.63177083)
コード例 #2
0
    def test_totally_wrong(self):
        det_categories = np.array([
            [0, 0, 0, 0, 0, 0],
            [0, 1, 0, 0, 1, 0],
            [0, 1, 1, 1, 1, 0],
            [0, 1, 1, 1, 1, 0],
            [0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0],
        ],
                                  dtype=np.uint16)
        gt_categories = 1 - det_categories
        instances = np.zeros([6, 6], np.uint16)

        pq = panoptic_quality.PanopticQuality(num_categories=2,
                                              ignored_label=2,
                                              max_instances_per_category=1,
                                              offset=16)
        pq.compare_and_accumulate(gt_categories, instances, det_categories,
                                  instances)
        np.testing.assert_array_equal(pq.iou_per_class, [0.0, 0.0])
        np.testing.assert_array_equal(pq.tp_per_class, [0, 0])
        np.testing.assert_array_equal(pq.fn_per_class, [1, 1])
        np.testing.assert_array_equal(pq.fp_per_class, [1, 1])
        np.testing.assert_array_equal(pq.result_per_category(), [0.0, 0.0])
        self.assertEqual(pq.result(), 0.0)
コード例 #3
0
    def test_wrong_instances(self):
        categories = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 2, 2, 1, 2, 2],
            [1, 2, 2, 1, 2, 2],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                              dtype=np.uint16)
        predicted_instances = np.array([
            [0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 1, 1],
            [0, 0, 0, 0, 1, 1],
            [0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0],
        ],
                                       dtype=np.uint16)
        groundtruth_instances = np.zeros([6, 6], dtype=np.uint16)

        pq = panoptic_quality.PanopticQuality(num_categories=3,
                                              ignored_label=0,
                                              max_instances_per_category=10,
                                              offset=100)
        pq.compare_and_accumulate(categories, groundtruth_instances,
                                  categories, predicted_instances)

        np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 0.0])
        np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 0])
        np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 1])
        np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 2])
        np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 0])
        self.assertAlmostEqual(pq.result(), 0.5)
コード例 #4
0
    def test_matches_by_iou(self):
        good_det_labels = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 2, 2, 2, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                                   dtype=np.uint16)
        gt_labels = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 2, 2, 2, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                             dtype=np.uint16)

        pq = panoptic_quality.PanopticQuality(num_categories=1,
                                              ignored_label=2,
                                              max_instances_per_category=16,
                                              offset=16)
        pq.compare_and_accumulate(np.zeros_like(gt_labels), gt_labels,
                                  np.zeros_like(good_det_labels),
                                  good_det_labels)

        # iou(1, 1) = 28/30
        # iou(2, 2) = 6/8
        np.testing.assert_array_almost_equal(pq.iou_per_class,
                                             [28 / 30 + 6 / 8])
        np.testing.assert_array_equal(pq.tp_per_class, [2])
        np.testing.assert_array_equal(pq.fn_per_class, [0])
        np.testing.assert_array_equal(pq.fp_per_class, [0])
        self.assertAlmostEqual(pq.result(), (28 / 30 + 6 / 8) / 2)

        bad_det_labels = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1],
            [1, 1, 1, 2, 2, 1],
            [1, 1, 1, 2, 2, 1],
            [1, 1, 1, 2, 2, 1],
            [1, 1, 1, 1, 1, 1],
        ],
                                  dtype=np.uint16)

        pq.reset()
        pq.compare_and_accumulate(np.zeros_like(gt_labels), gt_labels,
                                  np.zeros_like(bad_det_labels),
                                  bad_det_labels)

        # iou(1, 1) = 27/32
        np.testing.assert_array_almost_equal(pq.iou_per_class, [27 / 32])
        np.testing.assert_array_equal(pq.tp_per_class, [1])
        np.testing.assert_array_equal(pq.fn_per_class, [1])
        np.testing.assert_array_equal(pq.fp_per_class, [1])
        self.assertAlmostEqual(pq.result(), (27 / 32) * (1 / 2))
コード例 #5
0
def _panoptic_quality_helper(
    groundtruth_category_array, groundtruth_instance_array,
    predicted_category_array, predicted_instance_array, num_classes,
    max_instances_per_category, ignored_label, offset):
  """Helper function to compute panoptic quality."""
  pq = panoptic_quality.PanopticQuality(num_classes, ignored_label,
                                        max_instances_per_category, offset)
  pq.compare_and_accumulate(groundtruth_category_array,
                            groundtruth_instance_array,
                            predicted_category_array, predicted_instance_array)
  return pq.iou_per_class, pq.tp_per_class, pq.fn_per_class, pq.fp_per_class
コード例 #6
0
def _build_metric(metric,
                  num_categories,
                  ignored_label,
                  max_instances_per_category,
                  intersection_offset=None,
                  normalize_by_image_size=True):
  """Creates a metric aggregator objet of the given name."""
  if metric == 'pq':
    logging.warning('One should check Panoptic Quality results against the '
                    'official COCO API code. Small numerical differences '
                    '(< 0.1%) can be magnified by rounding.')
    return panoptic_quality.PanopticQuality(num_categories, ignored_label,
                                            max_instances_per_category,
                                            intersection_offset)
  elif metric == 'pc':
    return parsing_covering.ParsingCovering(
        num_categories, ignored_label, max_instances_per_category,
        intersection_offset, normalize_by_image_size)
  else:
    raise ValueError('No implementation for metric "%s"' % metric)
コード例 #7
0
    def test_perfect_match(self):
        categories = np.zeros([6, 6], np.uint16)
        instances = np.array([
            [1, 1, 1, 1, 1, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 2, 2, 2, 2, 1],
            [1, 2, 2, 1, 1, 1],
            [1, 2, 1, 1, 1, 1],
        ],
                             dtype=np.uint16)

        pq = panoptic_quality.PanopticQuality(num_categories=1,
                                              ignored_label=2,
                                              max_instances_per_category=16,
                                              offset=16)
        pq.compare_and_accumulate(categories, instances, categories, instances)
        np.testing.assert_array_equal(pq.iou_per_class, [2.0])
        np.testing.assert_array_equal(pq.tp_per_class, [2])
        np.testing.assert_array_equal(pq.fn_per_class, [0])
        np.testing.assert_array_equal(pq.fp_per_class, [0])
        np.testing.assert_array_equal(pq.result_per_category(), [1.0])
        self.assertEqual(pq.result(), 1.0)
コード例 #8
0
    def test_matches_expected(self):
        pred_classes = test_utils.read_segmentation_with_rgb_color_map(
            'team_pred_class.png', _CLASS_COLOR_MAP)
        pred_instances = test_utils.read_test_image('team_pred_instance.png',
                                                    mode='L')

        instance_class_map = {
            0: 0,
            47: 1,
            97: 1,
            133: 1,
            150: 1,
            174: 1,
            198: 2,
            215: 1,
            244: 1,
            255: 1,
        }
        gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
            'team_gt_instance.png', instance_class_map)

        pq = panoptic_quality.PanopticQuality(num_categories=3,
                                              ignored_label=0,
                                              max_instances_per_category=256,
                                              offset=256 * 256)
        pq.compare_and_accumulate(gt_classes, gt_instances, pred_classes,
                                  pred_instances)
        np.testing.assert_array_almost_equal(pq.iou_per_class,
                                             [2.06104, 5.26827, 0.54069],
                                             decimal=4)
        np.testing.assert_array_equal(pq.tp_per_class, [1, 7, 1])
        np.testing.assert_array_equal(pq.fn_per_class, [0, 1, 0])
        np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0])
        np.testing.assert_array_almost_equal(pq.result_per_category(),
                                             [2.061038, 0.702436, 0.54069])
        self.assertAlmostEqual(pq.result(), 0.62156287)