コード例 #1
0
    def _compare_against_f1_score():
        y_true = np.random.randint(0, 2, (64, ))
        y_pred = np.random.uniform(0, 1, (64, ))
        pred_thresholds = [.01, .05, .1, .2, .4, .5, .6, .8, .95, .99]

        single_scores = [
            f1_score(y_true, y_pred, th) for th in pred_thresholds
        ]
        parallel_scores = f1_score_multi_th(y_true, y_pred, pred_thresholds)
        assert np.allclose(single_scores, parallel_scores, atol=1e-15)
コード例 #2
0
    def _test_vs_sklearn():
        y_true = np.array([1] * 5 + [0] * 27)  # imbalanced
        np.random.shuffle(y_true)
        y_pred = np.random.uniform(0, 1, 32)

        test_score = f1_score(y_true, y_pred, pred_threshold=.5)
        sklearn_score = sklearn.metrics.f1_score(y_true, y_pred > .5)
        adiff = abs(test_score - sklearn_score)
        assert (adiff < 1e-10), ("sklearn: {:.15f}\ntest:    {:.15f}"
                                 "\nabsdiff: {}".format(
                                     sklearn_score, test_score, adiff))
コード例 #3
0
 def _test_no_positive_predictions():
     y_true = [0, 0, 1]
     y_pred = [0, 0, 0]
     assert f1_score(y_true, y_pred) == 0
コード例 #4
0
 def _test_no_positive_labels():
     y_true = [0] * 6
     y_pred = [.1, .2, .3, .65, .7, .8]
     assert f1_score(y_true, y_pred) == 0.5
コード例 #5
0
 def _test_basic():
     y_true = [0, 0, 1, 0, 0, 0, 1, 1]
     y_pred = [.01, .93, .42, .61, .15, 0, 1, .5]
     assert abs(f1_score(y_true, y_pred) - 1 / 3) < 1e-15
コード例 #6
0
ファイル: misc_test.py プロジェクト: adbmd/deeptrain
 def f05_score(y_true, y_pred, pred_threshold=.5):
     return metrics.f1_score(y_true, y_pred, pred_threshold, beta=.5)