def test_getter(self, size): y_true = [1] * size y_pred = [1] * size y_true.append(0) y_pred.append(0) scorer = Scorer() with pytest.raises(ValueError): print(scorer['ACC(Accuracy)']) scorer.evaluate(y_true, y_pred) assert scorer.num_classes == 2 np.testing.assert_allclose( scorer['FP(False positive/type 1 error/false alarm)'], np.zeros(shape=(len(set(y_true)), ))) np.testing.assert_allclose( scorer.score['FN(False negative/miss/type 2 error)'], np.zeros(shape=(len(set(y_true)), ))) np.testing.assert_allclose(scorer.score['ACC(Accuracy)'], np.ones(shape=(len(set(y_true)), ))) with pytest.raises(KeyError): print(scorer['dummy'])
def test_wrong_nclass(self, size): y_true = [1] * size y_pred = [1] * size scorer = Scorer() with pytest.raises(ValueError): scorer.evaluate(y_true, y_pred)
def test_wrong_size(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size + 1, )) scorer = Scorer() with pytest.raises(ValueError): scorer.evaluate(y_true, y_pred)
def test_keys(self): y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a'] y_pred = ['b', 'b', 'a', 'c', 'b', 'a', 'c', 'b', 'a', 'b', 'a', 'a'] scorer = Scorer() assert len(scorer.keys()) == 0 scorer.evaluate(y_true, y_pred) assert len(scorer.keys()) == 116
def test_setter(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) scorer = Scorer() scorer.evaluate(y_true, y_pred) with pytest.warns(UserWarning): scorer['Nico'] = 'Nico'
def test_encoder(self): y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a'] y_pred = ['b', 'b', 'a', 'c', 'b', 'a', 'c', 'b', 'a', 'b', 'a', 'a'] scorer = Scorer() scorer.evaluate(y_true, y_pred) str_score = scorer.score y_true = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2] y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2] scorer.evaluate(y_true, y_pred) num_score = scorer.score assert str_score == num_score
def test_numpy(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) scorer = Scorer() _ = scorer.evaluate(y_true, y_pred) assert isinstance(_, type(scorer)) assert repr(scorer) == '<Scorer (classes: 2)>'
def test(): args = parse_args() pattern = Pattern() pattern.load(args.patterns, args.bin, args.delimiter) rsgd = rSGD() rsgd.load_weights(args.weights) start_time = time.time() predicted_labels = rsgd.predict(pattern) elapsed_time = time.time() - start_time print ('{0}: Predicted in {1:.2f} seconds'.format(args.patterns, elapsed_time)) if USE_SCORER: scorer = Scorer() scorer.evaluate(pattern.labels, predicted_labels) print(scorer)
def test_lut_alias(self): y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a'] y_pred = ['b', 'b', 'a', 'c', 'b', 'a', 'c', 'b', 'a', 'b', 'a', 'a'] scorer = Scorer() scorer.evaluate(y_true, y_pred) np.testing.assert_allclose(scorer['ACC(Accuracy)'], scorer['class_accuracy']) np.testing.assert_allclose( scorer['FP(False positive/type 1 error/false alarm)'], scorer['class_false_positive']) np.testing.assert_allclose(scorer['TOP(Test outcome positive)'], scorer['class_test_outcome_positive']) np.testing.assert_allclose(scorer['FDR(False discovery rate)'], scorer['class_false_discovery_rate']) np.testing.assert_allclose(scorer['Overall ACC'], scorer['accuracy_score']) with pytest.raises(KeyError): scorer['dummy']
def test_getter_alias(self): y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a'] y_pred = ['b', 'b', 'a', 'c', 'b', 'a', 'c', 'b', 'a', 'b', 'a', 'a'] scorer = Scorer() scorer.evaluate(y_true, y_pred) np.testing.assert_allclose(scorer['ACC(Accuracy)'], scorer.ACC) np.testing.assert_allclose( scorer['FP(False positive/type 1 error/false alarm)'], scorer.FP) np.testing.assert_allclose(scorer['TOP(Test outcome positive)'], scorer.TOP) np.testing.assert_allclose(scorer['FDR(False discovery rate)'], scorer.FDR) np.testing.assert_allclose(scorer['Overall ACC'], scorer.accuracy_score) np.testing.assert_allclose( scorer['FP(False positive/type 1 error/false alarm)'], scorer.class_false_positive) with pytest.raises(AttributeError): scorer.dummy
def test_print(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) scorer = Scorer() _ = scorer.evaluate(y_true, y_pred) stdout = StringIO() sys.stdout = stdout print(scorer) sys.stdout = sys.__stdout__ printed = stdout.getvalue().splitlines() assert printed[0] == 'Classes: 0.0, 1.0'
def score_metrics(y_true, y_pred, metrics=['accuracy_score']): ''' Scikit-learn compatibility API for Scorer usage. Evaluate the required score-metric using the Scorer object. Parameters ---------- y_true : array-like List of true labels y_pred : array-like List of predicted labels metrics : str or array-like List of metric-names to evaluate Returns ------- metrics : float or array-like The required metrics Example ------- >>> from scorer import sklearn_api >>> >>> y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a'] >>> y_pred = ['b', 'b', 'a', 'c', 'b', 'a', 'c', 'b', 'a', 'b', 'a', 'a'] >>> >>> metrics = sklearn_api.score_metrics(y_true, y_pred, metrics='accuracy_score') Or you can use the scorer metrics inside a sklearn pipeline like Example ------- >>> from scorer import sklearn_api >>> from sklearn.svm import SVC >>> from sklearn.metrics import make_scorer >>> from sklearn.model_selection import cross_val_score >>> from sklearn.datasets import load_iris >>> >>> X, y = load_iris(return_X_y=True) >>> clf = SVC(kernel='linear', C=1.) >>> my_scorer = make_scorer(sklearn_api.score_metrics, metrics='accuracy_score') >>> >>> scores = cross_val_score(clf, # classifier >>> X, # training data >>> y, # training labels >>> cv=5, # split data randomly into 10 parts: 9 for training, 1 for scoring >>> scoring=my_scorer, # which scoring metric? >>> ) ''' y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type not in {'binary', 'multiclass'}: raise ValueError('{0} is not supported'.format(y_type)) scorer = Scorer() available_metrics = scorer._get_available_metrics # convert str to iterable if isinstance(metrics, str): metrics = [metrics] # check metric params if not all(metric in available_metrics for metric in metrics): raise ValueError('score_metrics error: metric {0} not found. \ Available metrics are {1}'.format( metrics, ','.join(available_metrics))) scorer.evaluate(y_true, y_pred) metrics = [available_metrics[metric] for metric in metrics] results = [scorer[metric] for metric in metrics] return results if len(results) > 1 else results[0]
accuracy=['accurate', 'exact'], randfact=0.1, epsil=0.5, protocol='pseudo_reinforcement', size=101, nth=2) start_time = time.time() rfbp.fit(pattern, label) elapsed_time = time.time() - start_time pattern = Pattern(X=pattern, y=label) print('{0}: Training completed in {1:.2f} seconds'.format( pattern, elapsed_time)) predicted_labels = rfbp.predict(pattern) print('Predictions:') if USE_SCORER: scorer = Scorer() scorer.evaluate(pattern.labels, predicted_labels) print(scorer) else: print(predicted_labels) print('Done.')
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division from __future__ import print_function #import numpy as np from scorer import Scorer __author__ = ['Nico Curti'] __email__ = ['*****@*****.**'] if __name__ == '__main__': y_true = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2] # np.array([2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2], dtype=np.int32) y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2] # np.array([0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2], dtype=np.int32) scorer = Scorer() scorer.evaluate(y_true, y_pred) print(scorer)