def add_pair(self, ann_gt, ann_pred): labels_gt = filter_labels_by_name(ann_gt.labels, self._gt_to_pred_class_mapping) all_labels_pred = [label for label in filter_labels_by_name(ann_pred.labels, self._pred_to_gt_class_mapping)] labels_pred = [] for label in all_labels_pred: label_confidence = self._get_confidence_value(label) if label_confidence is None: logger.warn(f'Found a label with class {label.obj_class.name!r} that does not have a ' f'{self._confidence_tag_name!r} tag attached. Skipping this object for metric computation.') else: labels_pred.append(label) match_result = match_labels_by_iou(labels_1=labels_gt, labels_2=labels_pred, img_size=ann_gt.img_size, iou_threshold=self._iou_threshold) for match in match_result.matches: gt_class = match.label_1.obj_class.name label_pred = match.label_2 self._counters[gt_class][MATCHES].append( MatchWithConfidence(is_correct=(label_pred.obj_class.name == self._gt_to_pred_class_mapping[gt_class]), confidence=self._get_confidence_value(label_pred))) # Add unmatched predictions to the list as false positive matches. for umatched_pred in match_result.unmatched_labels_2: gt_class = self._pred_to_gt_class_mapping[umatched_pred.obj_class.name] self._counters[gt_class][MATCHES].append( MatchWithConfidence(is_correct=False, confidence=self._get_confidence_value(umatched_pred))) for label_1 in labels_gt: self._counters[label_1.obj_class.name][TOTAL_GROUND_TRUTH] += 1
def add_pair(self, ann_gt: Annotation, ann_pred: Annotation): labels_gt = filter_labels_by_name(ann_gt.labels, self._unmatched_gt) labels_pred = filter_labels_by_name(ann_pred.labels, self._unmatched_pred) match_result = match_labels_by_iou(labels_1=labels_gt, labels_2=labels_pred, img_size=ann_gt.img_size, iou_threshold=self._iou_threshold) for match in match_result.matches: self._confusion_matrix[match.label_1.obj_class.name, match.label_2.obj_class.name] += 1 for unmatched_gt_label in match_result.unmatched_labels_1: self._unmatched_gt[unmatched_gt_label.obj_class.name] += 1 for unmatched_pred_label in match_result.unmatched_labels_2: self._unmatched_pred[unmatched_pred_label.obj_class.name] += 1
def add_pair(self, ann_gt, ann_pred): labels_gt = filter_labels_by_name(ann_gt.labels, self._gt_to_pred_class_mapping) labels_pred = filter_labels_by_name(ann_pred.labels, self._pred_to_gt_class_mapping) match_result = match_labels_by_iou(labels_1=labels_gt, labels_2=labels_pred, img_size=ann_gt.img_size, iou_threshold=self._iou_threshold) # TODO unify with confusion matrix ? for match in match_result.matches: self._counters[match.label_1.obj_class.name][TRUE_POSITIVE] += 1 for label_gt in labels_gt: self._counters[label_gt.obj_class.name][TOTAL_GROUND_TRUTH] += 1 for label_pred in labels_pred: self._counters[self._pred_to_gt_class_mapping[label_pred.obj_class.name]][TOTAL_PREDICTIONS] += 1
def add_pair(self, ann_gt, ann_pred): labels_gt = filter_labels_by_name(ann_gt.labels, self._gt_to_pred_class_mapping) labels_pred = [label for label in filter_labels_by_name(ann_pred.labels, self._predicted_class_names) if self._get_confidence_value(label) is not None] match_result = match_labels_by_iou(labels_1=labels_gt, labels_2=labels_pred, img_size=ann_gt.img_size, iou_threshold=self._iou_threshold) for match in match_result.matches: gt_class = match.label_1.obj_class.name label_pred = match.label_2 self._counters[gt_class][MATCHES].append( MatchWithConfidence(is_correct=(label_pred.obj_class.name == self._gt_to_pred_class_mapping[gt_class]), confidence=self._get_confidence_value(label_pred))) for label_1 in labels_gt: self._counters[label_1.obj_class.name][TOTAL_GROUND_TRUTH] += 1