Esempio n. 1
0
 def print_metrics(self, report_pep=False) -> None:
     print(f"Accuracy: {self.accuracy * 100:.2f}\n")
     print("Macro P/R/F1 Scores:")
     print("\nSoft Metrics:")
     if self.per_label_soft_scores:
         soft_scores = [
             {
                 "label": label,
                 "avg_pr": f"{metrics.average_precision:.3f}",
                 "roc_auc": f"{(metrics.roc_auc or 0.0):.3f}",
             }
             for label, metrics in sorted(self.per_label_soft_scores.items())
         ]
         columns = {
             "label": "Label",
             "avg_pr": "Average precision",
             "roc_auc": "ROC AUC",
         }
         print(ascii_table(soft_scores, columns))
         all_thresholds = set(
             itertools.chain.from_iterable(
                 metrics.recall_at_precision
                 for metrics in self.per_label_soft_scores.values()
             )
         )
         print("\nRecall at Precision")
         print(
             ascii_table(
                 (
                     dict(
                         {"label": label},
                         **{
                             str(p): f"{r:.3f}"
                             for p, r in metrics.recall_at_precision.items()
                         },
                     )
                     for label, metrics in sorted(self.per_label_soft_scores.items())
                 ),
                 dict(
                     {"label": "Label"},
                     **{str(t): f"R@P {t}" for t in all_thresholds},
                 ),
                 alignments={"label": "<"},
             )
         )
     if self.mcc:
         print(f"\nMatthews correlation coefficient: {self.mcc :.3f}")
     if self.roc_auc:
         print(f"\nROC AUC: {self.roc_auc:.3f}")
     if report_pep:
         self.print_pep()
Esempio n. 2
0
 def print_metrics(self, indentation="") -> None:
     print(
         ascii_table(
             [{
                 "label": label,
                 "precision": f"{metrics.precision:.2f}",
                 "recall": f"{metrics.recall:.2f}",
                 "f1": f"{metrics.f1:.2f}",
                 "support":
                 metrics.true_positives + metrics.false_negatives,
             } for label, metrics in sorted(self.per_label_scores.items())],
             human_column_names={
                 "label": "Label",
                 "precision": "Precision",
                 "recall": "Recall",
                 "f1": "F1",
                 "support": "Support",
             },
             footer={
                 "label": "Overall macro scores",
                 "precision": f"{self.macro_scores.precision:.2f}",
                 "recall": f"{self.macro_scores.recall:.2f}",
                 "f1": f"{self.macro_scores.f1:.2f}",
             },
             alignments={"label": "<"},
             indentation=indentation,
         ))
Esempio n. 3
0
    def test_with_headers_and_footer(self):
        two_rows = [
            {
                "stage": "forward",
                "total": "0.011"
            },
            {
                "stage": "comput_loss",
                "total": "0.000"
            },
        ]

        headers = {"stage": "Stage", "total": "Total Time"}
        footers = {
            "stage": "This very long footer is necessary.",
            "total": "0.055"
        }

        in_table_form = ascii_table(two_rows, headers, footers)

        output = ("+-------------------------------------+------------+\n"
                  "| Stage                               | Total Time |\n"
                  "+-------------------------------------+------------+\n"
                  "|                             forward |      0.011 |\n"
                  "|                         comput_loss |      0.000 |\n"
                  "+-------------------------------------+------------+\n"
                  "| This very long footer is necessary. | 0.055      |\n"
                  "+-------------------------------------+------------+")
        self.assertEqual(in_table_form, output)
Esempio n. 4
0
    def test_simple(self):
        two_rows = [
            {
                "stage": "forward",
                "total": "0.011"
            },
            {
                "stage": "comput_loss",
                "total": "0.000"
            },
        ]
        in_table_form = ascii_table(two_rows)

        output = ("+-------------+-------+\n"
                  "|     forward | 0.011 |\n"
                  "| comput_loss | 0.000 |\n"
                  "+-------------+-------+")
        self.assertEqual(in_table_form, output)
Esempio n. 5
0
 def print_metrics(self) -> None:
     print(f"Accuracy: {self.accuracy * 100:.2f}\n")
     print("Macro P/R/F1 Scores:")
     self.macro_prf1_metrics.print_metrics(indentation="\t")
     print("\nSoft Metrics:")
     if self.per_label_soft_scores:
         soft_scores = {
             label: f"{metrics.average_precision * 100:.2f}"
             for label, metrics in self.per_label_soft_scores.items()
         }
         print(
             ascii_table_from_dict(soft_scores,
                                   "Label",
                                   "Average precision",
                                   indentation="\t"))
         all_thresholds = set(
             itertools.chain.from_iterable(
                 metrics.recall_at_precision
                 for metrics in self.per_label_soft_scores.values()))
         print("\n\t Precision at Recall")
         print(
             ascii_table(
                 (dict(
                     {"label": label},
                     **{
                         str(p): f"{r:.2f}"
                         for p, r in metrics.recall_at_precision.items()
                     },
                 )
                  for label, metrics in self.per_label_soft_scores.items()),
                 dict(
                     {"label": "Label"},
                     **{str(t): f"P@R {t}"
                        for t in all_thresholds},
                 ),
                 indentation="\t",
             ))
     if self.mcc:
         print(f"\nMatthews correlation coefficient: {self.mcc :.2f}")
     if self.roc_auc:
         print(f"\nROC AUC: {self.roc_auc:.3f}")
Esempio n. 6
0
    def test_with_headers(self):
        two_rows = [
            {
                "stage": "forward",
                "total": "0.011"
            },
            {
                "stage": "comput_loss",
                "total": "0.000"
            },
        ]

        headers = {"stage": "Stage", "total": "Total Time"}

        in_table_form = ascii_table(two_rows, headers)

        output = ("+-------------+------------+\n"
                  "| Stage       | Total Time |\n"
                  "+-------------+------------+\n"
                  "|     forward |      0.011 |\n"
                  "| comput_loss |      0.000 |\n"
                  "+-------------+------------+")
        self.assertEqual(in_table_form, output)
Esempio n. 7
0
    def print_metrics(self, report_pep=False) -> None:
        print(f"Accuracy: {self.accuracy * 100:.2f}")
        print("\nSoft Metrics:")
        if self.per_label_soft_scores:
            soft_scores = []
            for label, metrics in sorted(self.per_label_soft_scores.items()):
                total_num_examples = 0
                true_positive = 0
                false_positive = 0
                false_negative = 0
                if label in self.macro_prf1_metrics.per_label_scores:
                    per_label_score = self.macro_prf1_metrics.per_label_scores[label]
                    true_positive = per_label_score.true_positives
                    false_positive = per_label_score.false_positives
                    false_negative = per_label_score.false_negatives
                    total_num_examples = (
                        per_label_score.true_positives + per_label_score.false_negatives
                    )

                soft_scores.append(
                    {
                        "label": label,
                        "avg_pr": f"{metrics.average_precision:.3f}",
                        "roc_auc": f"{(metrics.roc_auc or 0.0):.3f}",
                        "true_positive": f"{true_positive}",
                        "false_positive": f"{false_positive}",
                        "false_negative": f"{false_negative}",
                        "support": f"{total_num_examples}",
                    }
                )
            columns = {
                "label": "Label",
                "avg_pr": "Average precision",
                "roc_auc": "ROC AUC",
                "true_positive": "True positive",
                "false_positive": "False positive",
                "false_negative": "False negative",
                "support": "Support",
            }
            print(ascii_table(soft_scores, columns))
            print("\nRecall at Precision")
            r_at_p_thresholds = set(
                itertools.chain.from_iterable(
                    metrics.recall_at_precision
                    for metrics in self.per_label_soft_scores.values()
                )
            )
            print(
                ascii_table(
                    (
                        dict(
                            {"label": label},
                            **{
                                str(p): f"{r:.3f}"
                                for p, r in metrics.recall_at_precision.items()
                            },
                        )
                        for label, metrics in sorted(self.per_label_soft_scores.items())
                    ),
                    dict(
                        {"label": "Label"},
                        **{str(t): f"R@P {t}" for t in r_at_p_thresholds},
                    ),
                    alignments={"label": "<"},
                )
            )
            print("\nPrecision at Recall")
            p_at_r_thresholds = set(
                itertools.chain.from_iterable(
                    metrics.precision_at_recall
                    for metrics in self.per_label_soft_scores.values()
                )
            )
            print(
                ascii_table(
                    (
                        dict(
                            {"label": label},
                            **{
                                str(p): f"{r:.3f}"
                                for p, r in metrics.precision_at_recall.items()
                            },
                        )
                        for label, metrics in sorted(self.per_label_soft_scores.items())
                    ),
                    dict(
                        {"label": "Label"},
                        **{str(t): f"P@R {t}" for t in p_at_r_thresholds},
                    ),
                    alignments={"label": "<"},
                )
            )
        if self.mcc:
            print(f"\nMatthews correlation coefficient: {self.mcc :.3f}")
        if self.roc_auc:
            print(f"\nROC AUC: {self.roc_auc:.3f}")
        if report_pep:
            self.print_pep()