Esempio n. 1
0
    def _calculate_step_metrics(self, logits, y):
        # prepare the metrics
        loss = self._loss_function(logits[1], y)
        # loss = F.cross_entropy(logits[1], y)
        preds = torch.argmax(logits[1], dim=1)
        num_correct = torch.eq(preds.view(-1), y.view(-1)).sum()
        acc = accuracy(preds, y)
        f1_score = f1(preds, y, num_classes=2, average='weighted')
        fb05_score = fbeta(preds,
                           y,
                           num_classes=2,
                           average='weighted',
                           beta=0.5)
        fb2_score = fbeta(preds, y, num_classes=2, average='weighted', beta=2)
        cm = confusion_matrix(preds, y, num_classes=2)
        prec = precision(preds, y, num_classes=2, class_reduction='weighted')
        rec = recall(preds, y, num_classes=2, class_reduction='weighted')
        # au_roc = auroc(preds, y, pos_label=1)

        return {
            'loss': loss,
            'acc': acc,
            'f1_score': f1_score,
            'f05_score': fb05_score,
            'f2_score': fb2_score,
            'precision': prec,
            'recall': rec,
            # 'auroc': au_roc,
            'confusion_matrix': cm,
            'num_correct': num_correct
        }
Esempio n. 2
0
def test_fbeta_score(pred, target, beta, exp_score):
    score = fbeta(torch.tensor(pred),
                  torch.tensor(target),
                  num_classes=1,
                  beta=beta,
                  average='none')
    assert torch.allclose(score, torch.tensor(exp_score))
Esempio n. 3
0
def test_v1_5_metric_classif_mix():
    ConfusionMatrix.__init__._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        ConfusionMatrix(num_classes=1)

    FBeta.__init__._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        FBeta(num_classes=1)

    F1.__init__._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        F1(num_classes=1)

    HammingDistance.__init__._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        HammingDistance()

    StatScores.__init__._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        StatScores()

    target = torch.tensor([1, 1, 0, 0])
    preds = torch.tensor([0, 1, 0, 0])
    confusion_matrix._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        assert torch.equal(
            confusion_matrix(preds, target, num_classes=2).float(),
            torch.tensor([[2.0, 0.0], [1.0, 1.0]]))

    target = torch.tensor([0, 1, 2, 0, 1, 2])
    preds = torch.tensor([0, 2, 1, 0, 0, 1])
    fbeta._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        assert torch.allclose(fbeta(preds, target, num_classes=3, beta=0.5),
                              torch.tensor(0.3333),
                              atol=1e-4)

    f1._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        assert torch.allclose(f1(preds, target, num_classes=3),
                              torch.tensor(0.3333),
                              atol=1e-4)

    target = torch.tensor([[0, 1], [1, 1]])
    preds = torch.tensor([[0, 1], [0, 1]])
    hamming_distance._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        assert hamming_distance(preds, target) == torch.tensor(0.25)

    preds = torch.tensor([1, 0, 2, 1])
    target = torch.tensor([1, 1, 2, 0])
    stat_scores._warned = False
    with pytest.deprecated_call(match="It will be removed in v1.5.0"):
        assert torch.equal(stat_scores(preds, target, reduce="micro"),
                           torch.tensor([2, 2, 6, 2, 4]))
Esempio n. 4
0
    def training_step(self, batch, idx, swa=False):
        x, y = batch
        logits = self(x)
        loss = self.bce_loss(logits, y)
        scores = torch.sigmoid(logits)

        scores = scores.gt(0.5).float()
        f1 = fbeta(scores, y, 1, average='macro', beta=0.7)
        acc = accuracy(scores, y, 2, class_reduction='macro')

        return {"loss": loss, 'f1': f1, 'acc': acc}
Esempio n. 5
0
    def get_test_metrics(self, display=True):
        # Get Precision - Recall
        output = precision_recall(self.preds,
                                  self.targets,
                                  num_classes=2,
                                  class_reduction='none')
        precision = output[0].numpy()
        recall = output[1].numpy()
        # Get Precision-Recall Curve
        precision_curve, recall_curve = self.get_precision_recall_curve(
            pos_label=1, display=display)
        # Confusion Matrix
        cm = self.get_confusion_matrix(display=display)
        # F1 Score
        f1_score = self.get_f1_score()
        # F0.5 score
        f05_score = fbeta(self.preds,
                          self.targets,
                          num_classes=2,
                          beta=0.5,
                          threshold=0.5,
                          average='none',
                          multilabel=False)
        # F2 Score
        f2_score = fbeta(self.preds,
                         self.targets,
                         num_classes=2,
                         beta=2,
                         threshold=0.5,
                         average='none',
                         multilabel=False)
        # Stats_score - Class 0
        tp_0, fp_0, tn_0, fn_0, sup_0 = self.get_stats_score(class_index=0)
        # Stats_score - Class 1
        tp_1, fp_1, tn_1, fn_1, sup_1 = self.get_stats_score(class_index=1)
        # ROC Curve
        roc_auc_0 = self.get_ROC_curve(pos_label=0)
        roc_auc_1 = self.get_ROC_curve(pos_label=1)
        # Classification Report
        report = classification_report(
            self.targets.detach().numpy(),
            (self.preds.argmax(dim=1)).detach().numpy(),
            output_dict=True)
        print("Confusion Matrix")
        print(cm)
        print("Classification Report")
        print(report)

        # Variables are saved in a file
        # List of metric, value for class 0, value for class 1
        metric = [
            'Precision', 'Recall', 'F1 Score', 'F0.5 Score', 'F2_Score', 'TP',
            'FP', 'TN', 'FN', 'ROC'
        ]
        value_class0 = [
            precision[0], recall[0], f1_score[0].numpy(), f05_score[0].numpy(),
            f2_score[0].numpy(), tp_0, fp_0, tn_0, fn_0, roc_auc_0
        ]
        value_class1 = [
            precision[1], recall[1], f1_score[1].numpy(), f05_score[1].numpy(),
            f2_score[1].numpy(), tp_1, tp_1, tn_1, fn_1, roc_auc_1
        ]
        # Dictionary of lists
        dict = {
            'Metric': metric,
            'Class 0': value_class0,
            'Class1': value_class1
        }
        df = pd.DataFrame(dict)
        # dictionary of report
        df_report = pd.DataFrame(report)
        # Saving the dataframe
        df.to_csv(self.CSV_PATH, header=True, index=False)
        df_report.to_csv(self.CSV_PATH, mode='a', header=True, index=False)