Exemple #1
0
    def test_error_type_labels(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5],
                                  size=(size, 2)).astype(int)
        y_pred = np.random.choice([0., 1.], p=[.5, .5],
                                  size=(size, 2)).astype(int)

        # multiclass-multioutput type
        with pytest.raises(ValueError):
            sklearn_api.score_metrics(y_true, y_pred, metrics='accuracy_score')

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, 2))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, 2))

        # continuous-multioutput type
        with pytest.raises(ValueError):
            sklearn_api.score_metrics(y_true, y_pred, metrics='accuracy_score')
Exemple #2
0
    def test_error_metrics(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        y_true = y_true.tolist()
        y_pred = y_pred.tolist()

        metric = 'dummy'

        with pytest.raises(ValueError):
            sklearn_api.score_metrics(y_true, y_pred, metrics=metric)

        metric = ['accuracy_score', 'dummy']

        with pytest.raises(ValueError):
            sklearn_api.score_metrics(y_true, y_pred, metrics=metric)
Exemple #3
0
    def test_sklearn_precision(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        scorer_res = sklearn_api.score_metrics(y_true,
                                               y_pred,
                                               metrics='precision_macro')
        sklearn_res = sk.precision_score(y_true, y_pred, average='macro')

        assert np.isclose(scorer_res, sklearn_res, rtol=1e-05, atol=1e-5)

        scorer_res = sklearn_api.score_metrics(y_true,
                                               y_pred,
                                               metrics='precision_micro')
        sklearn_res = sk.precision_score(y_true, y_pred, average='micro')

        assert np.isclose(scorer_res, sklearn_res, rtol=1e-05, atol=1e-5)
Exemple #4
0
    def test_metric(self):

        y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a']
        y_pred = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a']

        accuracy = sklearn_api.score_metrics(y_true,
                                             y_pred,
                                             metrics='accuracy_score')

        assert accuracy == 1
Exemple #5
0
    def test_sklearn_zero_one_loss(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        scorer_res = sklearn_api.score_metrics(y_true,
                                               y_pred,
                                               metrics='zero_one_loss')
        sklearn_res = sk.zero_one_loss(y_true, y_pred, normalize=False)

        assert np.isclose(scorer_res, sklearn_res, rtol=1e-05, atol=1e-5)
Exemple #6
0
    def test_sklearn_metrics(self, size, metric):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        scorer_res = sklearn_api.score_metrics(y_true, y_pred, metrics=metric)
        sklearn_res = getattr(sk, metric)(y_true, y_pred)

        if hasattr(scorer_res, '__iter__'):
            np.testing.assert_allclose(scorer_res,
                                       sklearn_res,
                                       rtol=1e-05,
                                       atol=1e-5)

        else:
            assert np.isclose(scorer_res, sklearn_res, rtol=1e-05, atol=1e-5)