Esempio n. 1
0
    def test_eval_instance_segmentation_voc(self):
        pred_masks = self.result['masks']
        pred_labels = self.result['labels']
        pred_scores = self.result['scores']

        gt_masks = self.dataset['masks']
        gt_labels = self.dataset['labels']

        result = eval_instance_segmentation_voc(pred_masks,
                                                pred_labels,
                                                pred_scores,
                                                gt_masks,
                                                gt_labels,
                                                use_07_metric=True)

        # calculated from original python code
        expected = [
            0.159091, 0.945455, 0.679545, 0.378293, 0.430303, 1.000000,
            0.581055, 0.905195, 0.415757, 0.909091, 1.000000, 0.697256,
            0.856061, 0.681818, 0.793274, 0.362141, 0.948052, 0.545455,
            0.840909, 0.618182
        ]

        np.testing.assert_almost_equal(result['ap'], expected, decimal=5)
        np.testing.assert_almost_equal(result['map'],
                                       np.nanmean(expected),
                                       decimal=5)
Esempio n. 2
0
    def my_evaluate(self):

        self._logger.info("Evaluating {} using {} metric. ".format(
            self._dataset_name, "coco"))
        pred_masks = [
            self._mask_preds[i]["pred_masks"] for i in self._mask_preds
        ]
        pred_labels = [
            self._mask_preds[i]["pred_labels"] for i in self._mask_preds
        ]
        pred_scores = [
            self._mask_preds[i]["pred_scores"] for i in self._mask_preds
        ]
        gt_masks = [self._mask_preds[i]["gt_masks"] for i in self._mask_preds]
        gt_labels = [
            self._mask_preds[i]["gt_labels"] for i in self._mask_preds
        ]
        result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                                 pred_scores, gt_masks,
                                                 gt_labels)
        result2 = eval_instance_segmentation_voc(pred_masks,
                                                 pred_labels,
                                                 pred_scores,
                                                 gt_masks,
                                                 gt_labels,
                                                 iou_thresh=0.75)
        ret = OrderedDict()
        print(result)
        ret["instance_segmentation"] = {
            "mAP": np.mean(result['ap/iou=0.50:0.95/area=all/max_dets=100']),
            "AP50": np.mean(result['ap/iou=0.50/area=all/max_dets=100']),
            "AP75": np.mean(result['ap/iou=0.75/area=all/max_dets=100'])
        }
        return ret
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('fcis_psroi_align_resnet101', ),
                        default='fcis_psroi_align_resnet101')
    parser.add_argument('--pretrained-model')
    parser.add_argument('--iou-thresh', type=float, default=0.5)
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    if args.model == 'fcis_psroi_align_resnet101':
        if args.pretrained_model:
            model = FCISPSROIAlignResNet101(
                n_fg_class=len(sbd_instance_segmentation_label_names),
                pretrained_model=args.pretrained_model)
        else:
            model = FCISPSROIAlignResNet101(pretrained_model='sbd')

    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = SBDInstanceSegmentationDataset(split='val')
    iterator = iterators.SerialIterator(dataset,
                                        1,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_masks, pred_labels, pred_scores = out_values
    gt_masks, gt_labels = rest_values

    result = eval_instance_segmentation_voc(pred_masks,
                                            pred_labels,
                                            pred_scores,
                                            gt_masks,
                                            gt_labels,
                                            args.iou_thresh,
                                            use_07_metric=True)

    print('')
    print('mAP: {:f}'.format(result['map']))
    for l, name in enumerate(sbd_instance_segmentation_label_names):
        if result['ap'][l]:
            print('{:s}: {:f}'.format(name, result['ap'][l]))
        else:
            print('{:s}: -'.format(name))
        def eval_(out_values, rest_values):
            pred_masks, pred_labels, pred_scores = out_values
            gt_masks, gt_labels = rest_values

            result = eval_instance_segmentation_voc(
                pred_masks, pred_labels, pred_scores,
                gt_masks, gt_labels, use_07_metric=True)

            print('')
            print('mAP: {:f}'.format(result['map']))
            for l, name in enumerate(sbd_instance_segmentation_label_names):
                if result['ap'][l]:
                    print('{:s}: {:f}'.format(name, result['ap'][l]))
                else:
                    print('{:s}: -'.format(name))
Esempio n. 5
0
    def evaluate(self):
        target = self._targets['main']
        if self.comm is not None and self.comm.rank != 0:
            apply_to_iterator(target.predict, None, comm=self.comm)
            return {}

        iterator = self._iterators['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(target.predict,
                                                               it,
                                                               comm=self.comm)
        # delete unused iterators explicitly
        del in_values

        pred_masks, pred_labels, pred_scores = out_values
        gt_masks, gt_labels = rest_values

        result = eval_instance_segmentation_voc(
            pred_masks,
            pred_labels,
            pred_scores,
            gt_masks,
            gt_labels,
            iou_thresh=self.iou_thresh,
            use_07_metric=self.use_07_metric)

        report = {'map': result['map']}

        if self.label_names is not None:
            for l, label_name in enumerate(self.label_names):
                try:
                    report['ap/{:s}'.format(label_name)] = result['ap'][l]
                except IndexError:
                    report['ap/{:s}'.format(label_name)] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation