Example #1
0
def eval_results(results,
                 feed,
                 metric,
                 num_classes,
                 resolution=None,
                 is_bbox_normalized=False,
                 output_file=None):
    """Evaluation for evaluation program results"""
    if metric == 'COCO':
        from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval
        anno_file = getattr(feed.dataset, 'annotation', None)
        with_background = getattr(feed, 'with_background', True)
        if 'proposal' in results[0]:
            output = 'proposal.json'
            if output_file:
                output = '{}_proposal.json'.format(output_file)
            proposal_eval(results, anno_file, output)
        if 'bbox' in results[0]:
            output = 'bbox.json'
            if output_file:
                output = '{}_bbox.json'.format(output_file)
            bbox_eval(results, anno_file, output, with_background)
        if 'mask' in results[0]:
            output = 'mask.json'
            if output_file:
                output = '{}_mask.json'.format(output_file)
            mask_eval(results, anno_file, output, resolution)
    else:
        if 'accum_map' in results[-1]:
            res = np.mean(results[-1]['accum_map'][0])
            logger.info('mAP: {:.2f}'.format(res * 100.))
        elif 'bbox' in results[0]:
            voc_bbox_eval(results,
                          num_classes,
                          is_bbox_normalized=is_bbox_normalized)
Example #2
0
def eval_results(results,
                 metric,
                 num_classes,
                 resolution=None,
                 is_bbox_normalized=False,
                 output_directory=None,
                 map_type='11point',
                 dataset=None,
                 save_only=False,
                 test_scales=None):
    """Evaluation for evaluation program results"""
    box_ap_stats = []
    if metric == 'COCO':
        from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval
        if output_directory:
            mmcv.mkdir_or_exist(output_directory)
        anno_file = dataset.get_anno()
        with_background = dataset.with_background
        if 'proposal' in results[0]:
            output = 'proposal.json'
            if output_directory:
                output = os.path.join(output_directory, 'proposal.json')
            proposal_eval(results, anno_file, output)
        if 'bbox' in results[0]:
            output = 'bbox_scale{}.json'.format(test_scales) \
                if test_scales else 'bbox.json'
            if output_directory:
                output = os.path.join(output_directory, output)

            box_ap_stats = bbox_eval(results,
                                     anno_file,
                                     output,
                                     with_background,
                                     is_bbox_normalized=is_bbox_normalized,
                                     save_only=save_only)

        if 'mask' in results[0]:
            output = 'mask.json'
            if output_directory:
                output = os.path.join(output_directory, 'mask.json')
            mask_eval(results,
                      anno_file,
                      output,
                      resolution,
                      save_only=save_only)
    else:
        if 'accum_map' in results[-1]:
            res = np.mean(results[-1]['accum_map'][0])
            logger.info('mAP: {:.2f}'.format(res * 100.))
            box_ap_stats.append(res * 100.)
        elif 'bbox' in results[0]:
            box_ap = voc_bbox_eval(results,
                                   num_classes,
                                   is_bbox_normalized=is_bbox_normalized,
                                   map_type=map_type)
            box_ap_stats.append(box_ap)
    return box_ap_stats
Example #3
0
def eval_results(results,
                 feed,
                 metric,
                 num_classes,
                 resolution=None,
                 is_bbox_normalized=False,
                 output_directory=None,
                 map_type='11point'):
    """Evaluation for evaluation program results"""
    box_ap_stats = []
    if metric == 'COCO':
        from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval
        anno_file = getattr(feed.dataset, 'annotation', None)
        with_background = getattr(feed, 'with_background', True)
        if 'proposal' in results[0]:
            output = 'proposal.json'
            if output_directory:
                output = os.path.join(output_directory, 'proposal.json')
            proposal_eval(results, anno_file, output)
        if 'bbox' in results[0]:
            output = 'bbox.json'
            if output_directory:
                output = os.path.join(output_directory, 'bbox.json')

            box_ap_stats = bbox_eval(results,
                                     anno_file,
                                     output,
                                     with_background,
                                     is_bbox_normalized=is_bbox_normalized)

        if 'mask' in results[0]:
            output = 'mask.json'
            if output_directory:
                output = os.path.join(output_directory, 'mask.json')
            mask_eval(results, anno_file, output, resolution)
    else:
        if 'accum_map' in results[-1]:
            res = np.mean(results[-1]['accum_map'][0])
            logger.info('mAP: {:.2f}'.format(res * 100.))
            box_ap_stats.append(res * 100.)
        elif 'bbox' in results[0]:
            box_ap = voc_bbox_eval(results,
                                   num_classes,
                                   is_bbox_normalized=is_bbox_normalized,
                                   map_type=map_type)
            box_ap_stats.append(box_ap)
    return box_ap_stats
Example #4
0
def eval_results(results, feed, metric, resolution=None, output_file=None):
    """Evaluation for evaluation program results"""
    if metric == 'COCO':
        from ppdet.utils.coco_eval import bbox_eval, mask_eval
        anno_file = getattr(feed.dataset, 'annotation', None)
        with_background = getattr(feed, 'with_background', True)
        output = 'bbox.json'
        if output_file:
            output = '{}_bbox.json'.format(output_file)
        bbox_eval(results, anno_file, output, with_background)
        if 'mask' in results[0]:
            output = 'mask.json'
            if output_file:
                output = '{}_mask.json'.format(output_file)
            mask_eval(results, anno_file, output, resolution)
    else:
        res = np.mean(results[-1]['accum_map'][0])
        logger.info('Test mAP: {}'.format(res))
def eval_results(metric,
                 dataset=None):
    """Evaluation for evaluation program results"""
    box_ap_stats = []
    if metric == 'COCO':
        from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval
        anno_file = dataset.get_anno()
        box_ap_stats = bbox_eval(anno_file)
        mask_ap_stats = mask_eval(anno_file)
    else:
        pass
    return box_ap_stats, mask_ap_stats