Ejemplo n.º 1
0
def eval_masks(predictions: List[Dict], gts: List[Dict]) -> Dict:
    """Returns the coco evaluation metric for instance segmentation.

    Parameters
    ----------
    predictions: List[Dict]
        The predictions. Length of the list indicates the number of samples.
        Each element in the list are the predictions. Keys must be 'boxes',
        'scores', and 'labels'.

    gts: List[Dict]
        The gts. Length of the list indicates the number of samples.
        Each element in the list are the predictions. Keys must be 'boxes',
        'scores', and 'labels'.

    Returns
    -------
    eval: Dict:
        The results according to the coco metric
    """
    pred_masks, pred_labels, pred_scores = [], [], []
    gt_masks, gt_labels, gt_area = [], [], []
    for prediction, gt in zip(predictions, gts):
        pred_masks.append(_convert_mask(prediction['masks']))
        pred_labels.append(np.array(prediction['labels'], dtype=np.int32))
        pred_scores.append(np.array(prediction['scores']))
        gt_masks.append(_convert_mask(gt['masks']))
        gt_labels.append(np.array(gt['labels'], dtype=np.int32))
        gt_area.append(np.array(gt['area'], dtype=np.float32))
    res = eval_instance_segmentation_coco(pred_masks, pred_labels, pred_scores,
                                          gt_masks, gt_labels, gt_area)
    return res
Ejemplo n.º 2
0
    def my_evaluate(self):

        self._logger.info("Evaluating {} using {} metric. ".format(
            self._dataset_name, "coco"))
        pred_masks = [
            self._mask_preds[i]["pred_masks"] for i in self._mask_preds
        ]
        pred_labels = [
            self._mask_preds[i]["pred_labels"] for i in self._mask_preds
        ]
        pred_scores = [
            self._mask_preds[i]["pred_scores"] for i in self._mask_preds
        ]
        gt_masks = [self._mask_preds[i]["gt_masks"] for i in self._mask_preds]
        gt_labels = [
            self._mask_preds[i]["gt_labels"] for i in self._mask_preds
        ]
        result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                                 pred_scores, gt_masks,
                                                 gt_labels)
        result2 = eval_instance_segmentation_voc(pred_masks,
                                                 pred_labels,
                                                 pred_scores,
                                                 gt_masks,
                                                 gt_labels,
                                                 iou_thresh=0.75)
        ret = OrderedDict()
        print(result)
        ret["instance_segmentation"] = {
            "mAP": np.mean(result['ap/iou=0.50:0.95/area=all/max_dets=100']),
            "AP50": np.mean(result['ap/iou=0.50/area=all/max_dets=100']),
            "AP75": np.mean(result['ap/iou=0.75/area=all/max_dets=100'])
        }
        return ret
Ejemplo n.º 3
0
    def evaluate(self):
        target = self._targets['main']
        if self.comm is not None and self.comm.rank != 0:
            apply_to_iterator(target.predict, None, comm=self.comm)
            return {}

        iterator = self._iterators['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(target.predict,
                                                               it,
                                                               comm=self.comm)
        # delete unused iterators explicitly
        del in_values

        pred_masks, pred_labels, pred_scores = out_values

        if len(rest_values) == 2:
            gt_masks, gt_labels = rest_values
            gt_areas = None
            gt_crowdeds = None
        elif len(rest_values) == 4:
            gt_masks, gt_labels, gt_areas, gt_crowdeds =\
                rest_values
        else:
            raise ValueError('the dataset should return '
                             'sets of (img, mask, label) or sets of '
                             '(img, mask, label, area, crowded).')

        result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                                 pred_scores, gt_masks,
                                                 gt_labels, gt_areas,
                                                 gt_crowdeds)

        report = {}
        for key in result.keys():
            if key.startswith('map') or key.startswith('mar'):
                report[key] = result[key]

        if self.label_names is not None:
            for key in result.keys():
                if key.startswith('ap') or key.startswith('ar'):
                    for l, label_name in enumerate(self.label_names):
                        report_key = '{}/{:s}'.format(key, label_name)
                        try:
                            report[report_key] = result[key][l]
                        except IndexError:
                            report[report_key] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
 def test_area_not_supplied(self):
     result = eval_instance_segmentation_coco(self.pred_masks,
                                              self.pred_labels,
                                              self.pred_scores,
                                              self.gt_masks, self.gt_labels)
     self.assertFalse('map/iou=0.50:0.95/area=small/max_dets=100' in result)
     self.assertFalse(
         'map/iou=0.50:0.95/area=medium/max_dets=100' in result)
     self.assertFalse('map/iou=0.50:0.95/area=large/max_dets=100' in result)
 def test(self):
     result = eval_instance_segmentation_coco(self.pred_masks,
                                              self.pred_labels,
                                              self.pred_scores,
                                              self.gt_masks, self.gt_labels)
     self.assertEqual(
         result['ap/iou=0.50:0.95/area=all/max_dets=100'].shape, (3, ))
     self.assertTrue(
         np.isnan(result['ap/iou=0.50:0.95/area=all/max_dets=100'][0]))
     self.assertEqual(
         np.nanmean(result['ap/iou=0.50:0.95/area=all/max_dets=100'][1:]),
         result['map/iou=0.50:0.95/area=all/max_dets=100'])
Ejemplo n.º 6
0
        def eval_(out_values, rest_values):
            pred_masks, pred_labels, pred_scores = out_values
            gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values

            result = eval_instance_segmentation_coco(
                pred_masks, pred_labels, pred_scores,
                gt_masks, gt_labels, gt_areas, gt_crowdeds)

            print()
            for area in ('all', 'large', 'medium', 'small'):
                print('mmAP ({}):'.format(area),
                      result['map/iou=0.50:0.95/area={}/max_dets=100'.format(
                          area)])
 def test_area_specified(self):
     result = eval_instance_segmentation_coco(self.pred_masks,
                                              self.pred_labels,
                                              self.pred_scores,
                                              self.gt_masks,
                                              self.gt_labels,
                                              gt_areas=[[2048]])
     self.assertFalse(
         np.isnan(result['map/iou=0.50:0.95/area=medium/max_dets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=small/max_dets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=large/max_dets=100']))
 def test_crowded(self):
     result = eval_instance_segmentation_coco(self.pred_masks,
                                              self.pred_labels,
                                              self.pred_scores,
                                              self.gt_masks,
                                              self.gt_labels,
                                              gt_crowdeds=[[True]])
     # When the only ground truth is crowded, nothing is evaluated.
     # In that case, all the results are nan.
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=all/max_dets=100']))
     self.assertTrue(np.isnan(result['map/iou=0.50/area=all/max_dets=100']))
     self.assertTrue(np.isnan(result['map/iou=0.75/area=all/max_dets=100']))
    def test_eval_instance_segmentation_coco(self):
        pred_masks = self.result['masks']
        pred_labels = self.result['labels']
        pred_scores = self.result['scores']

        gt_masks = self.dataset['masks']
        gt_labels = self.dataset['labels']
        gt_crowdeds = self.dataset['crowdeds']
        gt_areas = self.dataset['areas']

        result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                                 pred_scores, gt_masks,
                                                 gt_labels, gt_areas,
                                                 gt_crowdeds)

        expected = {
            'map/iou=0.50:0.95/area=all/max_dets=100': 0.32170935,
            'map/iou=0.50/area=all/max_dets=100': 0.56469292,
            'map/iou=0.75/area=all/max_dets=100': 0.30133106,
            'map/iou=0.50:0.95/area=small/max_dets=100': 0.38737403,
            'map/iou=0.50:0.95/area=medium/max_dets=100': 0.31018272,
            'map/iou=0.50:0.95/area=large/max_dets=100': 0.32693391,
            'mar/iou=0.50:0.95/area=all/max_dets=1': 0.27037258,
            'mar/iou=0.50:0.95/area=all/max_dets=10': 0.41759154,
            'mar/iou=0.50:0.95/area=all/max_dets=100': 0.41898236,
            'mar/iou=0.50:0.95/area=small/max_dets=100': 0.46944986,
            'mar/iou=0.50:0.95/area=medium/max_dets=100': 0.37675923,
            'mar/iou=0.50:0.95/area=large/max_dets=100': 0.38147151
        }

        non_existent_labels = np.setdiff1d(
            np.arange(max(result['existent_labels'])),
            result['existent_labels'])
        for key, item in expected.items():
            non_mean_key = key[1:]
            self.assertIsInstance(result[non_mean_key], np.ndarray)
            self.assertEqual(result[non_mean_key].shape, (80, ))
            self.assertTrue(
                np.all(np.isnan(result[non_mean_key][non_existent_labels])))
            np.testing.assert_almost_equal(result[key],
                                           expected[key],
                                           decimal=5)
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('fcis_resnet101', ),
                        default='fcis_resnet101')
    parser.add_argument('--pretrained-model', default=None)
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    if args.model == 'fcis_resnet101':
        if args.pretrained_model is None:
            args.pretrained_model = 'coco'
        proposal_creator_params = FCISResNet101.proposal_creator_params
        proposal_creator_params['min_size'] = 2
        model = FCISResNet101(
            n_fg_class=len(coco_instance_segmentation_label_names),
            anchor_scales=(4, 8, 16, 32),
            pretrained_model=args.pretrained_model,
            proposal_creator_params=proposal_creator_params)

    model.use_preset('coco_evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = COCOInstanceSegmentationDataset(split='minival',
                                              year='2014',
                                              use_crowded=True,
                                              return_crowded=True,
                                              return_area=True)
    iterator = iterators.SerialIterator(dataset,
                                        1,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_masks, pred_labels, pred_scores = out_values
    gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values

    result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                             pred_scores, gt_masks, gt_labels,
                                             gt_areas, gt_crowdeds)

    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]

    print('')
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--pretrained-model')
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    proposal_creator_params = {
        'nms_thresh': 0.7,
        'n_train_pre_nms': 12000,
        'n_train_post_nms': 2000,
        'n_test_pre_nms': 6000,
        'n_test_post_nms': 1000,
        'force_cpu_nms': False,
        'min_size': 0
    }

    model = FCISPSROIAlignResNet101(
        n_fg_class=len(coco_instance_segmentation_label_names),
        min_size=800,
        max_size=1333,
        anchor_scales=(2, 4, 8, 16, 32),
        pretrained_model=args.pretrained_model,
        proposal_creator_params=proposal_creator_params)

    model.use_preset('coco_evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = COCOInstanceSegmentationDataset(split='minival',
                                              use_crowded=True,
                                              return_crowded=True,
                                              return_area=True)
    iterator = iterators.SerialIterator(dataset,
                                        1,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_masks, pred_labels, pred_scores = out_values
    gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values

    result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                             pred_scores, gt_masks, gt_labels,
                                             gt_areas, gt_crowdeds)

    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]

    print('')
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))