def test_eval_detection_voc(self):
        pred_bboxes = self.result['bboxes']
        pred_labels = self.result['labels']
        pred_scores = self.result['scores']

        gt_bboxes = self.dataset['bboxes']
        gt_labels = self.dataset['labels']
        gt_areas = self.dataset['areas']
        gt_crowdeds = self.dataset['crowdeds']

        result = eval_detection_coco(
            pred_bboxes, pred_labels, pred_scores,
            gt_bboxes, gt_labels, gt_areas, gt_crowdeds)

        expected = {
            'map/iou=0.50:0.95/area=all/maxDets=100': 0.5069852,
            'map/iou=0.50/area=all/maxDets=100': 0.69937725,
            'map/iou=0.75/area=all/maxDets=100': 0.57538619,
            'map/iou=0.50:0.95/area=small/maxDets=100': 0.58562572,
            'map/iou=0.50:0.95/area=medium/maxDets=100': 0.51939969,
            'map/iou=0.50:0.95/area=large/maxDets=100': 0.5013979,
            'mar/iou=0.50:0.95/area=all/maxDets=1': 0.38919373,
            'mar/iou=0.50:0.95/area=all/maxDets=10': 0.59606053,
            'mar/iou=0.50:0.95/area=all/maxDets=100': 0.59773394,
            'mar/iou=0.50:0.95/area=small/maxDets=100': 0.63981096,
            'mar/iou=0.50:0.95/area=medium/maxDets=100': 0.5664206,
            'mar/iou=0.50:0.95/area=large/maxDets=100': 0.5642906
        }

        for key, item in expected.items():
            non_mean_key = key[1:]
            self.assertIsInstance(result[non_mean_key], np.ndarray)
            self.assertEqual(result[non_mean_key].shape, (76,))
            np.testing.assert_almost_equal(
                result[key], expected[key], decimal=5)
def eval_boxes(predictions: List[Dict], gts: List[Dict]) -> Dict:
    """Returns the coco evaluation metric for box detection.

    Parameters
    ----------
    predictions: List[Dict]
        The predictions. Length of the list indicates the number of samples.
        Each element in the list are the predictions. Keys must be 'boxes',
        'scores', and 'labels'.

    gts: List[Dict]
        The gts. Length of the list indicates the number of samples.
        Each element in the list are the predictions. Keys must be 'boxes',
        'scores', and 'labels'.

    Returns
    -------
    eval: Dict:
        The results according to the coco metric. At IoU=0.5: VOC metric.
    """
    pred_boxes, pred_labels, pred_scores = [], [], []
    gt_boxes, gt_labels, gt_areas = [], [], []
    for prediction, gt in zip(predictions, gts):
        pred_boxes.append(_convert_box(prediction['boxes']))
        pred_labels.append(np.array(prediction['labels'], dtype=np.int32))
        pred_scores.append(np.array(prediction['scores']))
        gt_boxes.append(_convert_box(gt['boxes']))
        gt_labels.append(np.array(gt['labels'], dtype=np.int32))
        gt_areas.append(np.array(gt['area'], dtype=np.float32))
    res = eval_detection_coco(pred_boxes, pred_labels, pred_scores,
                              gt_boxes, gt_labels, gt_areas)
    return res
def eva_coco(dataset, func, limit = 1000, preset = 'evaluate'):
    total = limit if limit else len(dataset)
    orig_ids = dataset.ids.copy()
    dataset.ids = dataset.ids[:total]
    iterator = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)
    in_values, out_values, rest_values = apply_to_iterator(func, 
                                                           iterator, 
                                                           hook=ProgressHook(len(dataset)))
    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values
    result = eval_detection_coco(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_areas, gt_crowdeds)
    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]
    print('')
    results = []
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
        results.append(result[key])
    dataset.ids = orig_ids
    return results
 def test_area_not_supplied(self):
     result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
                                  self.pred_scores, self.gt_bboxes,
                                  self.gt_labels)
     self.assertFalse('map/iou=0.50:0.95/area=small/max_dets=100' in result)
     self.assertFalse(
         'map/iou=0.50:0.95/area=medium/max_dets=100' in result)
     self.assertFalse('map/iou=0.50:0.95/area=large/max_dets=100' in result)
示例#5
0
    def evaluate(self):
        target = self._targets['main']
        if self.comm is not None and self.comm.rank != 0:
            apply_to_iterator(target.predict, None, comm=self.comm)
            return {}

        iterator = self._iterators['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(target.predict,
                                                               it,
                                                               comm=self.comm)
        # delete unused iterators explicitly
        del in_values

        pred_bboxes, pred_labels, pred_scores = out_values

        if len(rest_values) == 2:
            gt_bboxes, gt_labels = rest_values
            gt_areas = None
            gt_crowdeds = None
        elif len(rest_values) == 4:
            gt_bboxes, gt_labels, gt_areas, gt_crowdeds =\
                rest_values
        else:
            raise ValueError('the dataset should return '
                             'sets of (img, bbox, label) or sets of '
                             '(img, bbox, label, area, crowded).')

        result = eval_detection_coco(pred_bboxes, pred_labels, pred_scores,
                                     gt_bboxes, gt_labels, gt_areas,
                                     gt_crowdeds)

        report = {}
        for key in result.keys():
            if key.startswith('map') or key.startswith('mar'):
                report[key] = result[key]

        if self.label_names is not None:
            for key in result.keys():
                if key.startswith('ap') or key.startswith('ar'):
                    for l, label_name in enumerate(self.label_names):
                        report_key = '{}/{:s}'.format(key, label_name)
                        try:
                            report[report_key] = result[key][l]
                        except IndexError:
                            report[report_key] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--model', choices=('resnet50', 'resnet101'))
    parser.add_argument(
        '--mean', choices=('chainercv', 'detectron'), default='chainercv')
    parser.add_argument('--batchsize', type=int, default=1)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--pretrained-model')
    group.add_argument('--snapshot')
    args = parser.parse_args()

    if args.model == 'resnet50':
        model = FasterRCNNFPNResNet50(n_fg_class=len(coco_bbox_label_names),
                                      mean=args.mean)
    elif args.model == 'resnet101':
        model = FasterRCNNFPNResNet101(n_fg_class=len(coco_bbox_label_names),
                                       mean=args.mean)

    if args.pretrained_model:
        chainer.serializers.load_npz(args.pretrained_model, model)
    elif args.snapshot:
        chainer.serializers.load_npz(
            args.snapshot, model, path='updater/model:main/model/')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    model.use_preset('evaluate')

    dataset = COCOBboxDataset(
        split='minival',
        use_crowded=True,
        return_area=True,
        return_crowded=True)
    iterator = iterators.MultithreadIterator(
        dataset, args.batchsize, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_area, gt_crowded = rest_values

    result = eval_detection_coco(
        pred_bboxes, pred_labels, pred_scores,
        gt_bboxes, gt_labels, gt_area, gt_crowded)

    print()
    for area in ('all', 'large', 'medium', 'small'):
        print('mmAP ({}):'.format(area),
              result['map/iou=0.50:0.95/area={}/max_dets=100'.format(area)])
 def test(self):
     result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
                                  self.pred_scores, self.gt_bboxes,
                                  self.gt_labels)
     self.assertEqual(
         result['ap/iou=0.50:0.95/area=all/max_dets=100'].shape, (3, ))
     self.assertTrue(
         np.isnan(result['ap/iou=0.50:0.95/area=all/max_dets=100'][0]))
     self.assertEqual(
         np.nanmean(result['ap/iou=0.50:0.95/area=all/max_dets=100'][1:]),
         result['map/iou=0.50:0.95/area=all/max_dets=100'])
示例#8
0
def eval_coco(out_values, rest_values):
    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_area, gt_crowded = rest_values

    result = eval_detection_coco(pred_bboxes, pred_labels, pred_scores,
                                 gt_bboxes, gt_labels, gt_area, gt_crowded)

    print()
    for area in ('all', 'large', 'medium', 'small'):
        print('mmAP ({}):'.format(area),
              result['map/iou=0.50:0.95/area={}/max_dets=100'.format(area)])
 def test_area_specified(self):
     result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
                                  self.pred_scores,
                                  self.gt_bboxes, self.gt_labels,
                                  gt_areas=[[2048]])
     self.assertFalse(
         np.isnan(result['map/iou=0.50:0.95/area=medium/max_dets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=small/max_dets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=large/max_dets=100']))
 def test_crowded(self):
     result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
                                  self.pred_scores,
                                  self.gt_bboxes, self.gt_labels,
                                  gt_crowdeds=[[True]])
     # When the only ground truth is crowded, nothing is evaluated.
     # In that case, all the results are nan.
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=small/maxDets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=medium/maxDets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=large/maxDets=100']))
示例#11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--pretrained-model', default='coco')
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    model = LightHeadRCNNResNet101(
        n_fg_class=len(coco_bbox_label_names),
        pretrained_model=args.pretrained_model)
    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = COCOBboxDataset(
        split='minival', use_crowded=True,
        return_crowded=True, return_area=True)
    iterator = iterators.SerialIterator(
        dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values

    result = eval_detection_coco(
        pred_bboxes, pred_labels, pred_scores,
        gt_bboxes, gt_labels, gt_areas, gt_crowdeds)

    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]

    print('')
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
 def test_area_default(self):
     result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
                                  self.pred_scores,
                                  self.gt_bboxes, self.gt_labels)
     # Test that the original bbox area is used, which is 90.
     # In that case, the ground truth bounding box is assigned to segment
     # "small".
     # Therefore, the score for segments "medium" and "large" will be nan.
     self.assertFalse(
         np.isnan(result['map/iou=0.50:0.95/area=small/maxDets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=medium/maxDets=100']))
     self.assertTrue(
         np.isnan(result['map/iou=0.50:0.95/area=large/maxDets=100']))
示例#13
0
def eval_boxes(predictions, dataset, indices):
    """Returns the coco evaluation metric for box detection.

    Parameters
    ----------
    predictions: List[Dict]
        The predictions. Length of the list indicates the number of samples.
        Each element in the list are the predictions. Keys must be 'boxes',
        'scores', and 'labels'.

    gts: List[Dict]
        The gts. Length of the list indicates the number of samples.
        Each element in the list are the predictions. Keys must be 'boxes',
        'scores', and 'labels'.

    Returns
    -------
    eval: Dict:
        The results according to the coco metric. At IoU=0.5: VOC metric.
    """
    # import pdb; pdb.set_trace()
    pred_boxes, pred_labels, pred_scores = [], [], []
    gt_boxes, gt_labels = [], []
    predictions = list(zip(*predictions))
    gts = [dataset[i][1] for i in indices]
    # sizes = [dataset.pull_item(i)[2:] for i in range(len(dataset))]
    images = [dataset[i][0] for i in indices]
    for pred, gt in zip(predictions, gts):
        tmp_boxes, tmp_labels, tmp_scores = convert_to_box(pred)
        pred_boxes.append(tmp_boxes)
        pred_labels.append(tmp_labels)
        pred_scores.append(tmp_scores)
        tmp_boxes, tmp_labels = convert_gt(np.array(gt))
        gt_labels.append(tmp_labels)
        gt_boxes.append(tmp_boxes)
    res = eval_detection_coco(pred_boxes, pred_labels, pred_scores, gt_boxes,
                              gt_labels)
    return res