def test_confusion_matrix_metric_init_metric_name_str():
    with pytest.raises(NotImplementedError):
        metric = ConfusionMatrixMetric(skip_channel=True, metric_name="wwwww", calculation_method=False)
        x = Tensor(np.array([[[0], [1]], [[1], [0]]]))
        y = Tensor(np.array([[[0], [1]], [[1], [0]]]))
        metric.update(x, y)
        output = metric.eval()

        assert np.allclose(output, np.array([0.75]))
Beispiel #2
0
class F1():
    '''
    calculate F1 score
    '''
    def __init__(self, use_crf=False, num_labels=2, mode="Binary"):
        self.TP = 0
        self.FP = 0
        self.FN = 0
        self.use_crf = use_crf
        self.num_labels = num_labels
        self.mode = mode
        if self.mode.lower() not in ("binary", "multilabel"):
            raise ValueError(
                "Assessment mode not supported, support: [Binary, MultiLabel]")
        if self.mode.lower() != "binary":
            self.metric = ConfusionMatrixMetric(skip_channel=False,
                                                metric_name=("f1 score"),
                                                calculation_method=False,
                                                decrease="mean")

    def update(self, logits, labels):
        '''
        update F1 score
        '''
        labels = labels.asnumpy()
        labels = np.reshape(labels, -1)
        if self.use_crf:
            backpointers, best_tag_id = logits
            best_path = postprocess(backpointers, best_tag_id)
            logit_id = []
            for ele in best_path:
                logit_id.extend(ele)
        else:
            logits = logits.asnumpy()
            logit_id = np.argmax(logits, axis=-1)
            logit_id = np.reshape(logit_id, -1)

        if self.mode.lower() == "binary":
            pos_eva = np.isin(logit_id, [i for i in range(1, self.num_labels)])
            pos_label = np.isin(labels, [i for i in range(1, self.num_labels)])
            self.TP += np.sum(pos_eva & pos_label)
            self.FP += np.sum(pos_eva & (~pos_label))
            self.FN += np.sum((~pos_eva) & pos_label)
        else:
            target = np.zeros((len(labels), self.num_labels), dtype=np.int)
            pred = np.zeros((len(logit_id), self.num_labels), dtype=np.int)
            for i, label in enumerate(labels):
                target[i][label] = 1
            for i, label in enumerate(logit_id):
                pred[i][label] = 1
            self.metric.update(pred, target)

    def eval(self):
        return self.metric.eval()
Beispiel #3
0
 def __init__(self, use_crf=False, num_labels=2, mode="Binary"):
     self.TP = 0
     self.FP = 0
     self.FN = 0
     self.use_crf = use_crf
     self.num_labels = num_labels
     self.mode = mode
     if self.mode.lower() not in ("binary", "multilabel"):
         raise ValueError(
             "Assessment mode not supported, support: [Binary, MultiLabel]")
     if self.mode.lower() != "binary":
         self.metric = ConfusionMatrixMetric(skip_channel=False,
                                             metric_name=("f1 score"),
                                             calculation_method=False,
                                             decrease="mean")
def test_confusion_matrix_metric():
    """test_confusion_matrix_metric"""
    metric = ConfusionMatrixMetric(skip_channel=True, metric_name="tpr", calculation_method=False)
    metric.clear()
    x = Tensor(np.array([[[0], [1]], [[1], [0]]]))
    y = Tensor(np.array([[[0], [1]], [[0], [1]]]))
    metric.update(x, y)

    x = Tensor(np.array([[[0], [1]], [[1], [0]]]))
    y = Tensor(np.array([[[0], [1]], [[1], [0]]]))
    metric.update(x, y)
    output = metric.eval()

    assert np.allclose(output, np.array([0.75]))
def test_confusion_matrix_metric_update_len():
    x = Tensor(np.array([[0.2, 0.5, 0.7], [0.3, 0.1, 0.2], [0.9, 0.6, 0.5]]))
    metric = ConfusionMatrixMetric(skip_channel=True, metric_name="ppv", calculation_method=True)
    metric.clear()

    with pytest.raises(ValueError):
        metric.update(x)
def test_confusion_matrix_metric_runtime():
    metric = ConfusionMatrixMetric(skip_channel=True, metric_name="tnr", calculation_method=True)
    metric.clear()

    with pytest.raises(RuntimeError):
        metric.eval()
def test_confusion_matrix_metric_init_compute_sample():
    with pytest.raises(TypeError):
        ConfusionMatrixMetric(calculation_method=1)
def test_confusion_matrix_metric_init_skip_channel():
    with pytest.raises(TypeError):
        ConfusionMatrixMetric(skip_channel=1)