Esempio n. 1
0
def test_online_batch_consistent():

    # Batch

    batch = preprocessing.StandardScaler() | multiclass.OneVsRestClassifier(
        linear_model.LogisticRegression())

    dataset = datasets.ImageSegments()

    batch_metric = metrics.MacroF1()

    for i, x in enumerate(pd.read_csv(dataset.path, chunksize=1)):
        y = x.pop("category")
        y_pred = batch.predict_many(x)
        batch.learn_many(x, y)

        for yt, yp in zip(y, y_pred):
            if yp is not None:
                batch_metric.update(yt, yp)

        if i == 30:
            break

    # Online

    online = preprocessing.StandardScaler() | multiclass.OneVsRestClassifier(
        linear_model.LogisticRegression())

    online_metric = metrics.MacroF1()

    X = pd.read_csv(dataset.path)
    Y = X.pop("category")

    for i, (x, y) in enumerate(stream.iter_pandas(X, Y)):
        y_pred = online.predict_one(x)
        online.learn_one(x, y)

        if y_pred is not None:
            online_metric.update(y, y_pred)

        if i == 30:
            break

    assert online_metric.get() == batch_metric.get()
Esempio n. 2
0
    def __init__(self, cm: "metrics.ConfusionMatrix" = None):

        self.cm = metrics.ConfusionMatrix() if cm is None else cm
        self.accuracy = metrics.Accuracy(cm=self.cm)
        self.kappa = metrics.CohenKappa(cm=self.cm)
        self.kappa_m = metrics.KappaM(cm=self.cm)
        self.kappa_t = metrics.KappaT(cm=self.cm)
        self.recall = metrics.Recall(cm=self.cm)
        self.micro_recall = metrics.MicroRecall(cm=self.cm)
        self.macro_recall = metrics.MacroRecall(cm=self.cm)
        self.precision = metrics.Precision(cm=self.cm)
        self.micro_precision = metrics.MicroPrecision(cm=self.cm)
        self.macro_precision = metrics.MacroPrecision(cm=self.cm)
        self.f1 = metrics.F1(cm=self.cm)
        self.micro_f1 = metrics.MicroF1(cm=self.cm)
        self.macro_f1 = metrics.MacroF1(cm=self.cm)
        self.geometric_mean = metrics.GeometricMean(cm=self.cm)
Esempio n. 3
0
    (metrics.WeightedPrecision(),
     partial(sk_metrics.precision_score, average='weighted')),
    (metrics.Recall(), sk_metrics.recall_score),
    (metrics.MacroRecall(), partial(sk_metrics.recall_score, average='macro')),
    (metrics.MicroRecall(), partial(sk_metrics.recall_score, average='micro')),
    (metrics.WeightedRecall(),
     partial(sk_metrics.recall_score, average='weighted')),
    (metrics.FBeta(beta=.5), partial(sk_metrics.fbeta_score, beta=.5)),
    (metrics.MacroFBeta(beta=.5),
     partial(sk_metrics.fbeta_score, beta=.5, average='macro')),
    (metrics.MicroFBeta(beta=.5),
     partial(sk_metrics.fbeta_score, beta=.5, average='micro')),
    (metrics.WeightedFBeta(beta=.5),
     partial(sk_metrics.fbeta_score, beta=.5, average='weighted')),
    (metrics.F1(), sk_metrics.f1_score),
    (metrics.MacroF1(), partial(sk_metrics.f1_score, average='macro')),
    (metrics.MicroF1(), partial(sk_metrics.f1_score, average='micro')),
    (metrics.WeightedF1(), partial(sk_metrics.f1_score, average='weighted')),
    (metrics.MCC(), sk_metrics.matthews_corrcoef),
    (metrics.MAE(), sk_metrics.mean_absolute_error),
    (metrics.MSE(), sk_metrics.mean_squared_error),
]


@pytest.mark.parametrize('metric, sk_metric', [
    pytest.param(metric, sk_metric, id=f'{metric.__class__.__name__}')
    for metric, sk_metric in TEST_CASES
])
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings(
    'ignore::sklearn.metrics.classification.UndefinedMetricWarning')
Esempio n. 4
0
    (
        metrics.MicroFBeta(beta=0.5),
        partial(sk_metrics.fbeta_score,
                beta=0.5,
                average="micro",
                zero_division=0),
    ),
    (
        metrics.WeightedFBeta(beta=0.5),
        partial(sk_metrics.fbeta_score,
                beta=0.5,
                average="weighted",
                zero_division=0),
    ),
    (metrics.F1(), partial(sk_metrics.f1_score, zero_division=0)),
    (metrics.MacroF1(),
     partial(sk_metrics.f1_score, average="macro", zero_division=0)),
    (metrics.MicroF1(),
     partial(sk_metrics.f1_score, average="micro", zero_division=0)),
    (metrics.WeightedF1(),
     partial(sk_metrics.f1_score, average="weighted", zero_division=0)),
    (metrics.MCC(), sk_metrics.matthews_corrcoef),
    (metrics.MAE(), sk_metrics.mean_absolute_error),
    (metrics.MSE(), sk_metrics.mean_squared_error),
]


@pytest.mark.parametrize(
    "metric, sk_metric",
    [
        pytest.param(metric, sk_metric, id=f"{metric.__class__.__name__}")