def total_iou_matrix(self,
                      item,
                      label_weights=None,
                      algorithm=None,
                      qval=None,
                      per_label=False):
     """
     For each shape in current eval item, we compute IOU with identically labeled shape.
     :param item: to be compared with self
     :param label_weights: weight of particular label
     :param algorithm: algorithm of comparing values
     :param qval: q value
     :param per_label: calculate per label or overall
     :return:
     """
     label_weights = label_weights or {}
     ious = []
     comparator = get_text_comparator(algorithm, qval)
     for gt in self.get_values_iter():
         for pred in item.get_values_iter():
             label_sim = texts_similarity(gt[self._shape_key],
                                          pred[self._shape_key], comparator)
             if label_sim == 0:
                 continue
             iou = self._iou(gt, pred)
             weight = sum(
                 label_weights.get(l, 1) for l in gt[self._shape_key])
             result = dict()
             result['iou'] = iou * weight
             result['weight'] = weight
             result['prediction'] = pred
             result['groundtruth'] = gt
             ious.append(result)
     return ious
 def match(self, item, algorithm='Levenshtein', qval=1):
     comparator = get_text_comparator(algorithm, qval)
     all_scores = []
     for gt, pred in zip(self.get_values_iter(), item.get_values_iter()):
         all_scores.append(
             texts_similarity(gt[self._shape_key], pred[self._shape_key],
                              comparator))
     return sum(all_scores) / max(len(all_scores), 1)
 def total_iou(self,
               item,
               label_weights=None,
               algorithm=None,
               qval=None,
               per_label=False):
     """
     For each shape in current eval item, we compute IOU with identically labeled shape with largest intersection.
     This is suboptimal metric since it doesn't consider cases where multiple boxes from self coincides with
     with single box from item
     :param item: to be compared with self
     :param label_weights: weight of particular label
     :param algorithm: algorithm of comparing values
     :param qval: q value
     :param per_label: calculate per label or overall
     :return:
     """
     label_weights = label_weights or {}
     if per_label:
         ious = defaultdict(list)
     else:
         ious, weights = [], []
     comparator = get_text_comparator(algorithm, qval)
     for gt in self.get_values_iter():
         max_iou = 0
         for pred in item.get_values_iter():
             label_sim = texts_similarity(gt[self._shape_key],
                                          pred[self._shape_key], comparator)
             if label_sim == 0:
                 continue
             iou = self._iou(gt, pred)
             max_iou = max(iou, max_iou)
         if per_label:
             for l in gt[self._shape_key]:
                 ious[l].append(max_iou)
         else:
             weight = sum(
                 label_weights.get(l, 1) for l in gt[self._shape_key])
             ious.append(max_iou * weight)
             weights.append(weight)
     if per_label:
         return {l: float(np.mean(v)) for l, v in ious.items()}
     return np.average(ious, weights=weights) if ious else 0.0
Exemple #4
0
 def total_iou(self, item, label_weights=None, algorithm=None, qval=None):
     """
     For each shape in current eval item, we compute IOU with identically labeled shape with largest intersection.
     This is suboptimal metric since it doesn't consider cases where multiple boxes from self coincides with
     with single box from item
     :param item to be compared with self:
     :return:
     """
     label_weights = label_weights or {}
     ious, weights = [], []
     comparator = get_text_comparator(algorithm, qval)
     for gt in self.get_values_iter():
         max_iou = 0
         for pred in item.get_values_iter():
             label_sim = texts_similarity(gt[self._shape_key],
                                          pred[self._shape_key], comparator)
             if label_sim == 0:
                 continue
             iou = self._iou(gt, pred)
             max_iou = max(iou, max_iou)
         weight = sum(label_weights.get(l, 1) for l in gt[self._shape_key])
         ious.append(max_iou * weight)
         weights.append(weight)
     return np.average(ious, weights=weights) if ious else 0.0
 def _match(self, x, y, f):
     labels_match = texts_similarity(x[self._shape_key], y[self._shape_key],
                                     f)
     spans_match = self.spans_iou(x, y)
     return labels_match * spans_match