コード例 #1
0
 def calculate_metric(self):
     return compute_multi_label_classification_metrics(
         [
             LabelListPrediction(scores, pred, expect)
             for scores, pred, expect in zip(
                 self.all_scores, self.all_preds, self.all_targets)
         ],
         self.label_names,
         self.calculate_loss(),
     )
コード例 #2
0
    def test_compute_multi_label_classification_metrics(self) -> None:

        roc_auc_dict = {"label1": 1.0, "label2": 0.25, "label3": 0.0}

        metrics = compute_multi_label_classification_metrics(PREDICTIONS,
                                                             LABEL_NAMES,
                                                             loss=5.0)
        self.assertAlmostEqual(metrics.roc_auc, 1.25 / 3)
        for k, v in metrics.per_label_soft_scores.items():
            metric_value = getattr(v, "roc_auc", None)
            self.assertAlmostEqual(metric_value, roc_auc_dict[k])
コード例 #3
0
 def calculate_metric(self):
     return compute_multi_label_classification_metrics(
         [
             LabelListPrediction(scores, pred, expect)
             for scores, pred, expect in zip(
                 self.all_scores, self.all_preds, self.all_targets)
         ],
         self.label_names,
         self.calculate_loss(),
         average_precisions=(not self.is_memory_efficient),
         recall_at_precision_thresholds=self.recall_at_precision_thresholds,
     )