Пример #1
0
def test_compose():

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.LogLoss()

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.MAE() + metrics.LogLoss()
Пример #2
0
def test_compose():

    metrics.MAE() + metrics.MSE()
    metrics.Accuracy() + metrics.LogLoss()
    metrics.Accuracy() + metrics.ConfusionMatrix()

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.LogLoss()

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.MAE() + metrics.LogLoss()
Пример #3
0
 def default_metrics(self):
     return [
         metrics.Accuracy(),
         metrics.LogLoss(),
         metrics.Precision(),
         metrics.Recall(),
         metrics.F1()
     ]
Пример #4
0
def test_log_loss():

    y_true = [True, False, False, True]
    y_pred = [0.9, 0.1, 0.2, 0.65]

    metric = metrics.LogLoss()
    for i, (y_t, y_p) in enumerate(zip(y_true, y_pred)):
        metric = metric.update(y_t, y_p)
        if i >= 1:
            assert metric.get() == sk_metrics.log_loss(y_true[:i + 1],
                                                       y_pred[:i + 1])
Пример #5
0
def test_log_loss():

    metric = metrics.LogLoss()

    y_true = [True, False, False, True]
    y_pred = [0.9, 0.1, 0.2, 0.65]

    for i, (yt, yp) in enumerate(zip(y_true, y_pred)):
        metric.update(yt, yp)

        if i >= 1:
            assert math.isclose(metric.get(), sk_metrics.log_loss(y_true[:i + 1], y_pred[:i + 1]))

    metric.revert(y_true[-1], y_pred[-1])
    assert math.isclose(metric.get(), sk_metrics.log_loss(y_true[:-1], y_pred[:-1]))
Пример #6
0
      [True, False, True, True, True], [True, True, False, True, True]),
     (metrics.MacroFBeta(beta=0.5),
      functools.partial(sk_metrics.fbeta_score, beta=0.5,
                        average='macro'), [0, 1, 0, 2, 2], [0, 0, 1, 1, 2]),
     (metrics.MicroFBeta(beta=0.5),
      functools.partial(sk_metrics.fbeta_score, beta=0.5,
                        average='micro'), [0, 1, 0, 2, 2], [0, 0, 1, 1, 2]),
     (metrics.F1(), sk_metrics.f1_score, [True, False, True, True, True
                                          ], [True, True, False, True, True]),
     (metrics.MacroF1(), functools.partial(
         sk_metrics.f1_score, average='macro'), [0, 1, 2, 2,
                                                 2], [0, 0, 2, 2, 1]),
     (metrics.MicroF1(), functools.partial(
         sk_metrics.f1_score, average='micro'), [0, 1, 2, 2,
                                                 2], [0, 0, 2, 2, 1]),
     (metrics.LogLoss(), sk_metrics.log_loss, [True, False, False, True
                                               ], [0.9, 0.1, 0.2, 0.65]),
     (metrics.CrossEntropy(),
      functools.partial(sk_metrics.log_loss, labels=[0, 1, 2]), [0, 1, 2, 2],
      [[0.29450637, 0.34216758, 0.36332605],
       [0.21290077, 0.32728332, 0.45981591],
       [0.42860913, 0.33380113, 0.23758974],
       [0.44941979, 0.32962558, 0.22095463]])])
def test_metric(metric, sk_metric, y_true, y_pred):

    for i, (yt, yp) in enumerate(zip(y_true, y_pred)):

        if isinstance(yp, list):
            yp = dict(enumerate(yp))

        metric.update(yt, yp)