Beispiel #1
0
def test_online_batch_consistent():

    # Batch

    batch = (
        preprocessing.StandardScaler() |
        multiclass.OneVsRestClassifier(
            linear_model.LogisticRegression()
        )
    )

    dataset = datasets.ImageSegments()

    batch_metric = metrics.MacroF1()

    for i, x in enumerate(pd.read_csv(dataset.path, chunksize=1)):
        y = x.pop('category')
        y_pred = batch.predict_many(x)
        batch.fit_many(x, y)

        for yt, yp in zip(y, y_pred):
            if yp is not None:
                batch_metric.update(yt, yp)

        if i == 30:
            break

    # Online

    online = (
        preprocessing.StandardScaler() |
        multiclass.OneVsRestClassifier(
            linear_model.LogisticRegression()
        )
    )

    online_metric = metrics.MacroF1()

    X = pd.read_csv(dataset.path)
    Y = X.pop('category')

    for i, (x, y) in enumerate(stream.iter_pandas(X, Y)):
        y_pred = online.predict_one(x)
        online.fit_one(x, y)

        if y_pred is not None:
            online_metric.update(y, y_pred)

        if i == 30:
            break

    assert online_metric.get() == batch_metric.get()
 def __init__(self, config: Config = None, seed: int = 42):
     self._config = config
     self._rng = RandomState(seed)
     self._oracle = self.build_oracle()
     self._oracle_metric = metrics.MacroF1()
     self._times = 1
     self._arms_selected = []
     self._arms = list(self._config.arms.keys())
     self.init_default_reward()
Beispiel #3
0
 def default_metrics(self):
     return [
         metrics.Accuracy(),
         metrics.CrossEntropy(),
         metrics.MacroPrecision(),
         metrics.MacroRecall(),
         metrics.MacroF1(),
         metrics.MicroPrecision(),
         metrics.MicroRecall(),
         metrics.MicroF1()
     ]
Beispiel #4
0
                        average='macro'), [0, 1, 2, 2, 2], [0, 0, 2, 2, 1]),
     (metrics.MicroRecall(),
      functools.partial(sk_metrics.recall_score,
                        average='micro'), [0, 1, 2, 2, 2], [0, 0, 2, 2, 1]),
     (metrics.FBeta(beta=0.5),
      functools.partial(sk_metrics.fbeta_score, beta=0.5),
      [True, False, True, True, True], [True, True, False, True, True]),
     (metrics.MacroFBeta(beta=0.5),
      functools.partial(sk_metrics.fbeta_score, beta=0.5,
                        average='macro'), [0, 1, 0, 2, 2], [0, 0, 1, 1, 2]),
     (metrics.MicroFBeta(beta=0.5),
      functools.partial(sk_metrics.fbeta_score, beta=0.5,
                        average='micro'), [0, 1, 0, 2, 2], [0, 0, 1, 1, 2]),
     (metrics.F1(), sk_metrics.f1_score, [True, False, True, True, True
                                          ], [True, True, False, True, True]),
     (metrics.MacroF1(), functools.partial(
         sk_metrics.f1_score, average='macro'), [0, 1, 2, 2,
                                                 2], [0, 0, 2, 2, 1]),
     (metrics.MicroF1(), functools.partial(
         sk_metrics.f1_score, average='micro'), [0, 1, 2, 2,
                                                 2], [0, 0, 2, 2, 1]),
     (metrics.LogLoss(), sk_metrics.log_loss, [True, False, False, True
                                               ], [0.9, 0.1, 0.2, 0.65]),
     (metrics.CrossEntropy(),
      functools.partial(sk_metrics.log_loss, labels=[0, 1, 2]), [0, 1, 2, 2],
      [[0.29450637, 0.34216758, 0.36332605],
       [0.21290077, 0.32728332, 0.45981591],
       [0.42860913, 0.33380113, 0.23758974],
       [0.44941979, 0.32962558, 0.22095463]])])
def test_metric(metric, sk_metric, y_true, y_pred):
Beispiel #5
0
    (metrics.MacroRecall(),
     functools.partial(sk_metrics.recall_score, average='macro')),
    (metrics.MicroRecall(),
     functools.partial(sk_metrics.recall_score, average='micro')),
    (metrics.WeightedRecall(),
     functools.partial(sk_metrics.recall_score, average='weighted')),
    (metrics.FBeta(beta=.5), functools.partial(sk_metrics.fbeta_score,
                                               beta=.5)),
    (metrics.MacroFBeta(beta=.5),
     functools.partial(sk_metrics.fbeta_score, beta=.5, average='macro')),
    (metrics.MicroFBeta(beta=.5),
     functools.partial(sk_metrics.fbeta_score, beta=.5, average='micro')),
    (metrics.WeightedFBeta(beta=.5),
     functools.partial(sk_metrics.fbeta_score, beta=.5, average='weighted')),
    (metrics.F1(), sk_metrics.f1_score),
    (metrics.MacroF1(), functools.partial(sk_metrics.f1_score,
                                          average='macro')),
    (metrics.MicroF1(), functools.partial(sk_metrics.f1_score,
                                          average='micro')),
    (metrics.WeightedF1(),
     functools.partial(sk_metrics.f1_score, average='weighted')),
    (metrics.MCC(), sk_metrics.matthews_corrcoef),
    (metrics.MAE(), sk_metrics.mean_absolute_error),
    (metrics.MSE(), sk_metrics.mean_squared_error),
]


@pytest.mark.parametrize('metric, sk_metric', TEST_CASES)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings(
    'ignore::sklearn.metrics.classification.UndefinedMetricWarning')
Beispiel #6
0
import pickle
import json

#read configurations
config = json.loads(input())

target = config['target']
predict = config['predict']
m = config['metric']

if (m == "Accuracy"):
    metric = metrics.Accuracy()
elif (m == "CrossEntropy"):
    metric = metrics.CrossEntropy()
elif (m == "MacroF1"):
    metric = metrics.MacroF1()
elif (m == "MacroPrecision"):
    metric = metrics.MacroPrecision()
elif (m == "MacroRecall"):
    metric = metrics.MacroRecall()
elif (m == "MicroF1"):
    metric = metrics.MicroF1()
elif (m == "MicroPrecision"):
    metric = metrics.MicroPrecision()
elif (m == "MicroRecall"):
    metric = metrics.MicroRecall()

while True:

    #wait request
    data = input()
Beispiel #7
0
TEST_CASES = [
    (metrics.Accuracy(), sk_metrics.accuracy_score),
    (metrics.Precision(), sk_metrics.precision_score),
    (metrics.MacroPrecision(), functools.partial(sk_metrics.precision_score, average='macro')),
    (metrics.MicroPrecision(), functools.partial(sk_metrics.precision_score, average='micro')),
    (metrics.WeightedPrecision(), functools.partial(sk_metrics.precision_score, average='weighted')),
    (metrics.Recall(), sk_metrics.recall_score),
    (metrics.MacroRecall(), functools.partial(sk_metrics.recall_score, average='macro')),
    (metrics.MicroRecall(), functools.partial(sk_metrics.recall_score, average='micro')),
    (metrics.WeightedRecall(), functools.partial(sk_metrics.recall_score, average='weighted')),
    (metrics.FBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5)),
    (metrics.MacroFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='macro')),
    (metrics.MicroFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='micro')),
    (metrics.WeightedFBeta(beta=.5), functools.partial(sk_metrics.fbeta_score, beta=.5, average='weighted')),
    (metrics.F1(), sk_metrics.f1_score),
    (metrics.MacroF1(), functools.partial(sk_metrics.f1_score, average='macro')),
    (metrics.MicroF1(), functools.partial(sk_metrics.f1_score, average='micro')),
    (metrics.WeightedF1(), functools.partial(sk_metrics.f1_score, average='weighted')),
    (metrics.MCC(), sk_metrics.matthews_corrcoef),
    (metrics.MAE(), sk_metrics.mean_absolute_error),
    (metrics.MSE(), sk_metrics.mean_squared_error),
]


@pytest.mark.parametrize('metric, sk_metric', TEST_CASES)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings('ignore::sklearn.metrics.classification.UndefinedMetricWarning')
def test_metric(metric, sk_metric):

    # Check str works
    str(metric)
Beispiel #8
0
    (metrics.WeightedPrecision(),
     partial(sk_metrics.precision_score, average='weighted')),
    (metrics.Recall(), sk_metrics.recall_score),
    (metrics.MacroRecall(), partial(sk_metrics.recall_score, average='macro')),
    (metrics.MicroRecall(), partial(sk_metrics.recall_score, average='micro')),
    (metrics.WeightedRecall(),
     partial(sk_metrics.recall_score, average='weighted')),
    (metrics.FBeta(beta=.5), partial(sk_metrics.fbeta_score, beta=.5)),
    (metrics.MacroFBeta(beta=.5),
     partial(sk_metrics.fbeta_score, beta=.5, average='macro')),
    (metrics.MicroFBeta(beta=.5),
     partial(sk_metrics.fbeta_score, beta=.5, average='micro')),
    (metrics.WeightedFBeta(beta=.5),
     partial(sk_metrics.fbeta_score, beta=.5, average='weighted')),
    (metrics.F1(), sk_metrics.f1_score),
    (metrics.MacroF1(), partial(sk_metrics.f1_score, average='macro')),
    (metrics.MicroF1(), partial(sk_metrics.f1_score, average='micro')),
    (metrics.WeightedF1(), partial(sk_metrics.f1_score, average='weighted')),
    (metrics.MCC(), sk_metrics.matthews_corrcoef),
    (metrics.MAE(), sk_metrics.mean_absolute_error),
    (metrics.MSE(), sk_metrics.mean_squared_error),
]


@pytest.mark.parametrize('metric, sk_metric', [
    pytest.param(metric, sk_metric, id=f'{metric.__class__.__name__}')
    for metric, sk_metric in TEST_CASES
])
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings(
    'ignore::sklearn.metrics.classification.UndefinedMetricWarning')