Esempio n. 1
0
  def evaluate(self):
    """Compute evaluation result.

    Returns:
      A named tuple with the following fields -
        average_precision: float numpy array of average precision for
            each class.
        mean_ap: mean average precision of all classes, float scalar
        precisions: List of precisions, each precision is a float numpy
            array
        recalls: List of recalls, each recall is a float numpy array
        corloc: numpy float array
        mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
          self.label_id_offset)

    if self.use_weighted_mean_ap:
      all_scores = np.array([], dtype=float)
      all_tp_fp_labels = np.array([], dtype=bool)
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      if not self.scores_per_class[class_index]:
        scores = np.array([], dtype=float)
        tp_fp_labels = np.array([], dtype=float)
      else:
        scores = np.concatenate(self.scores_per_class[class_index])
        tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      if self.use_weighted_mean_ap:
        all_scores = np.append(all_scores, scores)
        all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
      logging.info('Scores and tpfp per class label: %d', class_index)
      logging.info(tp_fp_labels)
      logging.info(scores)
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    if self.use_weighted_mean_ap:
      num_gt_instances = np.sum(self.num_gt_instances_per_class)
      precision, recall = metrics.compute_precision_recall(
          all_scores, all_tp_fp_labels, num_gt_instances)
      mean_ap = metrics.compute_average_precision(precision, recall)
    else:
      mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return ObjectDetectionEvalMetrics(
        self.average_precision_per_class, mean_ap, self.precisions_per_class,
        self.recalls_per_class, self.corloc_per_class, mean_corloc)
  def evaluate(self):
    """Compute evaluation result.

    Returns:
      A named tuple with the following fields -
        average_precision: float numpy array of average precision for
            each class.
        mean_ap: mean average precision of all classes, float scalar
        precisions: List of precisions, each precision is a float numpy
            array
        recalls: List of recalls, each recall is a float numpy array
        corloc: numpy float array
        mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
          self.label_id_offset)

    if self.use_weighted_mean_ap:
      all_scores = np.array([], dtype=float)
      all_tp_fp_labels = np.array([], dtype=bool)
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      if not self.scores_per_class[class_index]:
        scores = np.array([], dtype=float)
        tp_fp_labels = np.array([], dtype=float)
      else:
        scores = np.concatenate(self.scores_per_class[class_index])
        tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      if self.use_weighted_mean_ap:
        all_scores = np.append(all_scores, scores)
        all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
      logging.info('Scores and tpfp per class label: %d', class_index)
      logging.info(tp_fp_labels)
      logging.info(scores)
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class[class_index] = precision
      self.recalls_per_class[class_index] = recall
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    if self.use_weighted_mean_ap:
      num_gt_instances = np.sum(self.num_gt_instances_per_class)
      precision, recall = metrics.compute_precision_recall(
          all_scores, all_tp_fp_labels, num_gt_instances)
      mean_ap = metrics.compute_average_precision(precision, recall)
    else:
      mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return ObjectDetectionEvalMetrics(
        self.average_precision_per_class, mean_ap, self.precisions_per_class,
        self.recalls_per_class, self.corloc_per_class, mean_corloc)
Esempio n. 3
0
 def test_compute_cor_loc(self):
   num_gt_imgs_per_class = np.array([100, 1, 5, 1, 1], dtype=int)
   num_images_correctly_detected_per_class = np.array(
       [10, 0, 1, 0, 0], dtype=int)
   corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
                                    num_images_correctly_detected_per_class)
   expected_corloc = np.array([0.1, 0, 0.2, 0, 0], dtype=float)
   self.assertTrue(np.allclose(corloc, expected_corloc))
Esempio n. 4
0
 def test_compute_cor_loc_nans(self):
   num_gt_imgs_per_class = np.array([100, 0, 0, 1, 1], dtype=int)
   num_images_correctly_detected_per_class = np.array(
       [10, 0, 1, 0, 0], dtype=int)
   corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
                                    num_images_correctly_detected_per_class)
   expected_corloc = np.array([0.1, np.nan, np.nan, 0, 0], dtype=float)
   self.assertAllClose(corloc, expected_corloc)
Esempio n. 5
0
 def test_compute_cor_loc_nans(self):
   num_gt_imgs_per_class = np.array([100, 0, 0, 1, 1], dtype=int)
   num_images_correctly_detected_per_class = np.array(
       [10, 0, 1, 0, 0], dtype=int)
   corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
                                    num_images_correctly_detected_per_class)
   expected_corloc = np.array([0.1, np.nan, np.nan, 0, 0], dtype=float)
   self.assertAllClose(corloc, expected_corloc)
Esempio n. 6
0
 def test_compute_cor_loc(self):
   num_gt_imgs_per_class = np.array([100, 1, 5, 1, 1], dtype=int)
   num_images_correctly_detected_per_class = np.array(
       [10, 0, 1, 0, 0], dtype=int)
   corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
                                    num_images_correctly_detected_per_class)
   expected_corloc = np.array([0.1, 0, 0.2, 0, 0], dtype=float)
   self.assertTrue(np.allclose(corloc, expected_corloc))
Esempio n. 7
0
  def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))


    average_recall_per_class = {}
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue

      scores = np.concatenate(self.scores_per_class[class_index])
  #    print(self.scores_per_class[class_index][0].shape, self.scores_per_class[class_index][0])
  #    print(self.tp_fp_labels_per_class[class_index][0].shape, self.tp_fp_labels_per_class[class_index][0])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])

      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])

      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)

      average_recall_per_class[class_index] = np.mean(recall)

      # print('ind',class_index,np.mean(precision),np.mean(recall))
      # print(len(precision), '\n', precision)
      # print(len(recall), '\n', recall)

      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, 
            average_recall_per_class,
            self.corloc_per_class, mean_corloc)
Esempio n. 8
0
    def evaluate(self):
        """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """

        # compute mAP
        mean_ap = {}
        for subset in self.subset_names:
            if (self.num_gt_instances_per_class[subset] == 0).any():
                logging.warning(
                    'The following classes in subset %s have no ground truth examples: '
                    '%s', subset,
                    np.squeeze(
                        np.argwhere(self.num_gt_instances_per_class == 0)))
            for class_index in range(self.num_class):
                if self.num_gt_instances_per_class[subset][class_index] == 0:
                    continue
                scores = np.concatenate(
                    self.scores_per_class[subset][class_index])
                tp_fp_labels = np.concatenate(
                    self.tp_fp_labels_per_class[subset][class_index])
                precision, recall = metrics.compute_precision_recall(
                    scores, tp_fp_labels,
                    self.num_gt_instances_per_class[subset][class_index])
                self.precisions_per_class[subset].append(precision)
                self.recalls_per_class[subset].append(recall)
                average_precision = metrics.compute_average_precision(
                    precision, recall)
                self.average_precision_per_class[subset][class_index] = \
                    average_precision

            mean_ap[subset] = np.nanmean(
                self.average_precision_per_class[subset])

        # compute CorLoc
        self.corloc_per_class = metrics.compute_cor_loc(
            self.num_gt_imgs_per_class,
            self.num_images_correctly_detected_per_class)
        mean_corloc = np.nanmean(self.corloc_per_class)

        return (self.average_precision_per_class, mean_ap,
                self.precisions_per_class, self.recalls_per_class,
                self.corloc_per_class, mean_corloc)
Esempio n. 9
0
    def evaluate(self):
        if (self.num_gt_instances_per_class == 0).any():
            logging.warn(
                'The following classes have no ground truth examples: %s',
                np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
                self.label_id_offset)

        if self.use_weighted_mean_ap:
            all_scores = np.array([], dtype=float)
            all_tp_fp_labels = np.array([], dtype=bool)
        for class_index in range(self.num_class):
            if self.num_gt_instances_per_class[class_index] == 0:
                continue
            if not self.scores_per_class[class_index]:
                scores = np.array([], dtype=float)
                tp_fp_labels = np.array([], dtype=float)
            else:
                scores = np.concatenate(self.scores_per_class[class_index])
                tp_fp_labels = np.concatenate(
                    self.tp_fp_labels_per_class[class_index])
            if self.use_weighted_mean_ap:
                all_scores = np.append(all_scores, scores)
                all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
            precision, recall = metrics.compute_precision_recall(
                scores, tp_fp_labels,
                self.num_gt_instances_per_class[class_index])
            self.precisions_per_class.append(precision)
            self.recalls_per_class.append(recall)
            average_precision = metrics.compute_average_precision(
                precision, recall)
            self.average_precision_per_class[class_index] = average_precision

        self.corloc_per_class = metrics.compute_cor_loc(
            self.num_gt_imgs_per_class,
            self.num_images_correctly_detected_per_class)

        if self.use_weighted_mean_ap:
            num_gt_instances = np.sum(self.num_gt_instances_per_class)
            precision, recall = metrics.compute_precision_recall(
                all_scores, all_tp_fp_labels, num_gt_instances)
            mean_ap = metrics.compute_average_precision(precision, recall)
        else:
            mean_ap = np.nanmean(self.average_precision_per_class)
        mean_corloc = np.nanmean(self.corloc_per_class)
        return ObjectDetectionEvalMetrics(self.average_precision_per_class,
                                          mean_ap, self.precisions_per_class,
                                          self.recalls_per_class,
                                          self.corloc_per_class, mean_corloc)
Esempio n. 10
0
  def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision
      with open("AP"+str(class_index),"wb") as f:
          save_file = {"rec":recall,"prec":precision,"ap":average_precision}
          cPickle.dump(save_file,f)
    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc)
  def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc)
Esempio n. 12
0
    def evaluate(self):
        """Compute evaluation result.

    Returns:
      A named tuple with the following fields -
        average_precision: float numpy array of average precision for
            each class.
        mean_ap: mean average precision of all classes, float scalar
        precisions: List of precisions, each precision is a float numpy
            array
        recalls: List of recalls, each recall is a float numpy array
        corloc: numpy float array
        mean_corloc: Mean CorLoc score for each class, float scalar
    """
        if (self.num_gt_instances_per_class == 0).any():
            logging.warn(
                'The following classes have no ground truth examples: %s',
                np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
                self.label_id_offset)

        if self.use_weighted_mean_ap:
            all_scores = np.array([], dtype=float)
            all_tp_fp_labels = np.array([], dtype=bool)

        for class_index in range(self.num_class):
            if self.num_gt_instances_per_class[class_index] == 0:
                continue
            if not self.scores_per_class[class_index]:
                scores = np.array([], dtype=float)
                tp_fp_labels = np.array([], dtype=bool)
            else:
                scores = np.concatenate(self.scores_per_class[class_index])
                tp_fp_labels = np.concatenate(
                    self.tp_fp_labels_per_class[class_index])
            if self.use_weighted_mean_ap:
                all_scores = np.append(all_scores, scores)
                all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
            precision, recall = metrics.compute_precision_recall(
                scores, tp_fp_labels,
                self.num_gt_instances_per_class[class_index])
            self.precisions_per_class.append(precision)
            self.recalls_per_class.append(recall)
            average_precision = metrics.compute_average_precision(
                precision, recall)
            self.average_precision_per_class[class_index] = average_precision

        average_precision_per_iou = []
        for ii in range(len(self.iou_list)):
            for class_index in range(self.num_class):
                if self.num_gt_instances_per_class[class_index] == 0:
                    continue
                if not self.scores_per_class_per_iou[ii][class_index]:
                    scores = np.array([], dtype=float)
                    tp_fp_labels = np.array([], dtype=bool)
                else:
                    scores = np.concatenate(
                        self.scores_per_class_per_iou[ii][class_index])
                    tp_fp_labels = np.concatenate(
                        self.tp_fp_labels_per_class_per_iou[ii][class_index])
                #if self.use_weighted_mean_ap:
                #  all_scores = np.append(all_scores, scores)
                #  all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
                precision, recall = metrics.compute_precision_recall(
                    scores, tp_fp_labels,
                    self.num_gt_instances_per_class[class_index])
                self.precisions_per_class_per_iou[ii].append(precision)
                self.recalls_per_class_per_iou[ii].append(recall)
                average_precision = metrics.compute_average_precision(
                    precision, recall)
                self.average_precision_per_class_per_iou[ii][
                    class_index] = average_precision

        self.corloc_per_class = metrics.compute_cor_loc(
            self.num_gt_imgs_per_class,
            self.num_images_correctly_detected_per_class)

        if self.use_weighted_mean_ap:
            num_gt_instances = np.sum(self.num_gt_instances_per_class)
            precision, recall = metrics.compute_precision_recall(
                all_scores, all_tp_fp_labels, num_gt_instances)
            mean_ap = metrics.compute_average_precision(precision, recall)
        else:
            mean_ap = np.nanmean(self.average_precision_per_class)

        mean_ap_per_iou = []
        for ii in range(len(self.iou_list)):
            mean_ap_per_iou.append(
                np.nanmean(self.average_precision_per_class_per_iou[ii]))

        print('Mean AP: %.3f' % mean_ap)
        for ii in range(len(self.iou_list)):
            print('Mean AP @ IoU %.2f: %.3f' %
                  (self.iou_list[ii], mean_ap_per_iou[ii]))
        print('Mean AP @ IoU[0.5:0.05:0.95]: %.3f' %
              np.nanmean(np.asarray(mean_ap_per_iou)))
        raw_input()

        mean_corloc = np.nanmean(self.corloc_per_class)
        return ObjectDetectionEvalMetrics(self.average_precision_per_class,
                                          mean_ap, self.precisions_per_class,
                                          self.recalls_per_class,
                                          self.corloc_per_class, mean_corloc)