Exemplo n.º 1
0
    def _evaluate_single(self, detections_dict, groundtruth_list):
        groundtruth_dict = {
            'annotations': groundtruth_list,
            'images': [{
                'id': gt['image_id']
            } for gt in groundtruth_list],
            'categories': self.categories
        }

        if len(groundtruth_list) > 0:
            detections_list = coco_tools.ExportSingleImageDetectionBoxesToCoco(
                image_id=groundtruth_list[0]['image_id'],
                category_id_set=set([c['id'] for c in self.categories]),
                detection_boxes=detections_dict['detection_boxes'],
                detection_scores=detections_dict['detection_scores'],
                detection_classes=detections_dict['detection_classes'])
        else:
            detections_list = []

        # The COCO evaluation prints some information, which we don't care about
        with HiddenPrints():
            groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
            detections = groundtruth.LoadAnnotations(detections_list)
            evaluator = coco_tools.COCOEvalWrapper(groundtruth,
                                                   detections,
                                                   iou_type='bbox')
            summary_metrics, _ = evaluator.ComputeMetrics()

        return summary_metrics['Precision/mAP']
Exemplo n.º 2
0
    def evaluate(self):
        """Evaluates the detection boxes and returns a dictionary of coco metrics.

    Returns:
      A dictionary holding -

      1. summary_metrics:
      'DetectionBoxes_Precision/mAP': mean average precision over classes
        averaged over IOU thresholds ranging from .5 to .95 with .05
        increments.
      'DetectionBoxes_Precision/[email protected]': mean average precision at 50% IOU
      'DetectionBoxes_Precision/[email protected]': mean average precision at 75% IOU
      'DetectionBoxes_Precision/mAP (small)': mean average precision for small
        objects (area < 32^2 pixels).
      'DetectionBoxes_Precision/mAP (medium)': mean average precision for
        medium sized objects (32^2 pixels < area < 96^2 pixels).
      'DetectionBoxes_Precision/mAP (large)': mean average precision for large
        objects (96^2 pixels < area < 10000^2 pixels).
      'DetectionBoxes_Recall/AR@1': average recall with 1 detection.
      'DetectionBoxes_Recall/AR@10': average recall with 10 detections.
      'DetectionBoxes_Recall/AR@100': average recall with 100 detections.
      'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects
        with 100.
      'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects
        with 100.
      'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects
        with 100 detections.

      2. per_category_ap: if include_metrics_per_category is True, category
      specific results with keys of the form:
      'Precision mAP ByCategory/category' (without the supercategory part if
      no supercategories exist). For backward compatibility
      'PerformanceByCategory' is included in the output regardless of
      all_metrics_per_category.
    """
        tf.logging.info('Performing evaluation on %d images.',
                        len(self._image_ids))
        groundtruth_dict = {
            'annotations': self._groundtruth_list,
            'images': [{
                'id': image_id
            } for image_id in self._image_ids],
            'categories': self._categories
        }
        coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
        coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
            self._detection_boxes_list)
        box_evaluator = coco_tools.COCOEvalWrapper(coco_wrapped_groundtruth,
                                                   coco_wrapped_detections,
                                                   agnostic_mode=False)
        box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
            include_metrics_per_category=self._include_metrics_per_category,
            all_metrics_per_category=self._all_metrics_per_category)
        box_metrics.update(box_per_category_ap)
        box_metrics = {
            'DetectionBoxes_' + key: value
            for key, value in iter(box_metrics.items())
        }
        return box_metrics
Exemplo n.º 3
0
 def evaluate(self):
     """Evaluates the detection masks and returns a dictionary of coco metrics.
 Returns:
   A dictionary holding -
   1. summary_metrics:
   'DetectionMasks_Precision/mAP': mean average precision over classes
     averaged over IOU thresholds ranging from .5 to .95 with .05 increments.
   'DetectionMasks_Precision/[email protected]': mean average precision at 50% IOU.
   'DetectionMasks_Precision/[email protected]': mean average precision at 75% IOU.
   'DetectionMasks_Precision/mAP (small)': mean average precision for small
     objects (area < 32^2 pixels).
   'DetectionMasks_Precision/mAP (medium)': mean average precision for medium
     sized objects (32^2 pixels < area < 96^2 pixels).
   'DetectionMasks_Precision/mAP (large)': mean average precision for large
     objects (96^2 pixels < area < 10000^2 pixels).
   'DetectionMasks_Recall/AR@1': average recall with 1 detection.
   'DetectionMasks_Recall/AR@10': average recall with 10 detections.
   'DetectionMasks_Recall/AR@100': average recall with 100 detections.
   'DetectionMasks_Recall/AR@100 (small)': average recall for small objects
     with 100 detections.
   'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects
     with 100 detections.
   'DetectionMasks_Recall/AR@100 (large)': average recall for large objects
     with 100 detections.
   2. per_category_ap: if include_metrics_per_category is True, category
   specific results with keys of the form:
   'Precision mAP ByCategory/category' (without the supercategory part if
   no supercategories exist). For backward compatibility
   'PerformanceByCategory' is included in the output regardless of
   all_metrics_per_category.
 """
     groundtruth_dict = {
         'annotations':
         self._groundtruth_list,
         'images': [{
             'id': image_id,
             'height': shape[1],
             'width': shape[2]
         } for image_id, shape in self._image_id_to_mask_shape_map.items()],
         'categories':
         self._categories
     }
     coco_wrapped_groundtruth = coco_tools.COCOWrapper(
         groundtruth_dict, detection_type='segmentation')
     coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(
         self._detection_masks_list)
     mask_evaluator = coco_tools.COCOEvalWrapper(
         coco_wrapped_groundtruth,
         coco_wrapped_detection_masks,
         agnostic_mode=False,
         iou_type='segm')
     mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(
         include_metrics_per_category=self._include_metrics_per_category)
     mask_metrics.update(mask_per_category_ap)
     mask_metrics = {
         'DetectionMasks_' + key: value
         for key, value in mask_metrics.items()
     }
     return mask_metrics
Exemplo n.º 4
0
    def evaluate(self):
        """Evaluates the detection boxes and returns a dictionary of coco metrics.

    Returns:
      A dictionary holding -

      1. summary_metrics:
      'DetectionBoxes_Precision/mAP': mean average precision over classes
        averaged over IOU thresholds ranging from .5 to .95 with .05
        increments.
      'DetectionBoxes_Precision/[email protected]': mean average precision at 50% IOU
      'DetectionBoxes_Precision/[email protected]': mean average precision at 75% IOU
      'DetectionBoxes_Precision/mAP (small)': mean average precision for small
        objects (area < 32^2 pixels).
      'DetectionBoxes_Precision/mAP (medium)': mean average precision for
        medium sized objects (32^2 pixels < area < 96^2 pixels).
      'DetectionBoxes_Precision/mAP (large)': mean average precision for large
        objects (96^2 pixels < area < 10000^2 pixels).
      'DetectionBoxes_Recall/AR@1': average recall with 1 detection.
      'DetectionBoxes_Recall/AR@10': average recall with 10 detections.
      'DetectionBoxes_Recall/AR@100': average recall with 100 detections.
      'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects
        with 100.
      'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects
        with 100.
      'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects
        with 100 detections.

      2. per_category_ap: if include_metrics_per_category is True, category
      specific results with keys of the form:
      'Precision mAP ByCategory/category' (without the supercategory part if
      no supercategories exist). For backward compatibility
      'PerformanceByCategory' is included in the output regardless of
      all_metrics_per_category.
    """
        groundtruth_dict = {
            'annotations': self._groundtruth_list,
            'images': [{
                'id': image_id
            } for image_id in self._image_ids],
            'categories': self._categories
        }
        coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
        coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
            self._detection_boxes_list)
        box_evaluator = coco_tools.COCOEvalWrapper(
            coco_wrapped_groundtruth,
            coco_wrapped_detections,
            agnostic_mode=False,
            iouThresholds=self.iouThresholds)
        box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
            include_metrics_per_category=self._include_metrics_per_category,
            all_metrics_per_category=self._all_metrics_per_category)
        box_metrics.update(box_per_category_ap)
        box_metrics = {
            'DetectionBoxes_' + key: value
            for key, value in iter(box_metrics.items())
        }
        print(box_metrics)
        y_true = {
            image_id: []
            for image_id in
            [label['image_id'] for label in self._groundtruth_list]
        }
        for label in self._groundtruth_list:
            y_true[label['image_id']].append(label['category_id'])
        image_ids = np.array(sorted(y_true.keys()))
        y_true = [y_true[image_id] for image_id in image_ids]
        labelbinarizer = MultiLabelBinarizer()
        y_true = labelbinarizer.fit_transform(y_true)
        y_pred = np.zeros(y_true.shape)
        for row in self._detection_boxes_list:
            # find image index in groundtruth
            i = np.where(image_ids == row['image_id'])[0]
            j = np.where(
                np.array(labelbinarizer.classes_) == row['category_id'])[0]
            if row['score'] > y_pred[i, j]:
                y_pred[i, j] = row['score']
        roc_auc = roc_auc_score(y_true, y_pred, average='macro')
        print('ROC AUC (macro): ', roc_auc)
        class_names = [cat['name'] for cat in self._categories]
        print(class_names)
        # threshold predictions
        y_pred = (y_pred >= 0.5).astype(np.int)
        class_report = classification_report(y_true,
                                             y_pred,
                                             target_names=class_names)
        print(class_report)
        acc_scores = {}
        for i, label in enumerate(class_names):
            acc_scores['ImageLevel/BALANCED_ACC_{}'.format(
                label)] = balanced_accuracy_score(y_true[:, i], y_pred[:, i])
            acc_scores['ImageLevel/ACC_{}'.format(label)] = accuracy_score(
                y_true[:, i], y_pred[:, i])
        print(acc_scores)
        box_metrics.update(acc_scores)
        box_metrics['ImageLevel/ROC_AUC_MACRO'] = roc_auc
        return box_metrics
Exemplo n.º 5
0
 def testCocoWrappers(self):
     groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict)
     detections = groundtruth.LoadAnnotations(self._detections_list)
     evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
     summary_metrics, _ = evaluator.ComputeMetrics()
     self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP'])
Exemplo n.º 6
0
                ground_truth_box = []
                ground_truth_class = []
                if len(data) == 0:
                    ground_truth_box.append([])
                for i in range(len(data)):

                    ground_truth_box.append(data[i][1:])
                    ground_truth_class.append(label_map_dict[data[i][0]])
            ground_truth_boxes.append(np.array(ground_truth_box, np.float))
            ground_truth_classes.append(np.array(ground_truth_class, np.int32))
    return image_ids1, ground_truth_boxes, ground_truth_classes


pred_image_ids, detection_boxes, detection_scores, detection_classes = convert_OBJ_DETE_dt(
    pred_path)
truth_image_ids, ground_truth_boxes, ground_truth_classes = convert_OBJ_DETE_gt(
    truth_path)
detections_list = coco_tools.ExportDetectionsToCOCO(pred_image_ids,
                                                    detection_boxes,
                                                    detection_scores,
                                                    detection_classes,
                                                    label_map_dict)
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(truth_image_ids,
                                                      ground_truth_boxes,
                                                      ground_truth_classes,
                                                      label_map_dict)

groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
metrics, ap = evaluator.ComputeMetrics()
Exemplo n.º 7
0
 def evaluate(self):
     groundtruth = coco_tools.COCOWrapper(self.groundtruth)
     detections = groundtruth.LoadAnnotations(self.detections)
     evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
     metrics, _ = evaluator.ComputeMetrics()
     return metrics
Exemplo n.º 8
0
def main(_):
    model_path = FLAGS.model_path
    voc_path = FLAGS.voc_path
    labelmap_file = FLAGS.labelmap_file
    set_file = FLAGS.set_file
    scaler_file = FLAGS.scaler_file
    side_input = FLAGS.side_input

    # Load Model and read label_map.pbtxt
    model = file_util.load_model(model_path)
    categories, labelmap_dict, category_index = file_util.load_labelmap(
        voc_path, labelmap_file)

    # Get information from groundtruth and detection
    gt_ids, gt_boxes, gt_classes = _read_annotations_for_groundtruth(
        voc_path, set_file, labelmap_dict)
    dt_ids, dt_boxes, dt_classes, dt_scores, time_per_image = inference(
        model, voc_path, model_path, set_file, category_index, scaler_file,
        side_input)

    # COCO Evaluation
    groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
        gt_ids, gt_boxes, gt_classes, categories)
    detections_list = coco_tools.ExportDetectionsToCOCO(
        dt_ids, dt_boxes, dt_scores, dt_classes, categories)
    groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
    detections = groundtruth.LoadAnnotations(detections_list)
    evaluator = coco_tools.COCOEvalWrapper(groundtruth,
                                           detections,
                                           agnostic_mode=False)
    summary_metrics, per_category_ap = evaluator.ComputeMetrics(
        include_metrics_per_category=True, all_metrics_per_category=True)

    # Convert to Percent format
    for k, v in summary_metrics.items():
        summary_metrics[k] = v * 100
    for k, v in per_category_ap.items():
        per_category_ap[k] = v * 100
    print(summary_metrics)
    print(per_category_ap)

    # Prevention for Tensorflow Bug: Cant calculate Flops with custom Inputs
    if not side_input:
        flops = get_flops(model)
    else:
        flops = 0

    metrics_dict = {
        'flops': flops / 1e9,
        'time_per_image': time_per_image * 1000
    }

    # Read Trainable Params and Name Dictionary from Pickle file
    name_params_dict = pickle.load(
        open(os.path.join(model_path, 'metrics', 'name_params.pkl'), 'rb'))
    metrics_dict.update(name_params_dict)

    metrics_dict.update(summary_metrics)
    metrics_dict.update(per_category_ap)

    # Save Metrics to CSV
    metrics_df = pd.DataFrame.from_records([metrics_dict])
    metrics_df.to_csv(os.path.join(model_path, 'metrics', 'metrics.csv'))