def test_with_confusion_matrices_on_biclass(self):
     learner = random_learner
     ds = data.Table("monks-1")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm)
     self.assertIsInstance(scores, list)
 def test_with_confusion_matrix_on_multiclass(self):
     learner = random_learner
     ds = data.Table("iris")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm[0])
     self.assertIsInstance(scores, float)
    def test_with_test_results_on_multiclass(self):
        learner = random_learner
        ds = data.Table("iris")
        pt = testing.proportion_test([learner], ds, times=1)

        scores = self.score(pt)
        self.assertIsInstance(scores, list)
    def test_construct_confusion_matrix_from_multiclass(self):
        learner = random_learner
        ds = data.Table("iris")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt)

        self.assertTrue(isinstance(cm[0], list))
 def test_with_confusion_matrix_on_multiclass(self):
     learner = random_learner
     ds = data.Table("iris")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm[0])
     self.assertIsInstance(scores, float)
    def test_construct_confusion_matrix_from_biclass(self):
        learner = random_learner
        ds = data.Table("monks-1")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt, class_index=1)

        self.assertTrue(hasattr(cm[0], "TP"))
    def test_with_test_results_on_multiclass(self):
        learner = random_learner
        ds = data.Table("iris")
        pt = testing.proportion_test([learner], ds, times=1)

        scores = self.score(pt)
        self.assertIsInstance(scores, list)
 def test_with_confusion_matrices_on_biclass(self):
     learner = random_learner
     ds = data.Table("monks-1")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm)
     self.assertIsInstance(scores, list)
    def test_construct_confusion_matrix_from_multiclass(self):
        learner = random_learner
        ds = data.Table("iris")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt)

        self.assertTrue(isinstance(cm[0], list))
    def test_construct_confusion_matrix_from_biclass(self):
        learner = random_learner
        ds = data.Table("monks-1")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt, class_index=1)

        self.assertTrue(hasattr(cm[0], "TP"))
    def test_auc_on_monks(self):
        ds = data.Table("monks-1")
        cv = testing.cross_validation([self.learner], ds, folds=5)
        pt = testing.proportion_test([self.learner], ds, times=1)

        auc = scoring.AUC(cv)
        self.assertEqual(len(auc), 1)

        auc = scoring.AUC(pt)
        self.assertEqual(len(auc), 1)
    def test_auc_on_monks(self):
        ds = data.Table("monks-1")
        cv = testing.cross_validation([self.learner], ds, folds=5)
        pt = testing.proportion_test([self.learner], ds, times=1)

        auc = scoring.AUC(cv)
        self.assertEqual(len(auc), 1)

        auc = scoring.AUC(pt)
        self.assertEqual(len(auc), 1)
 def test_ca_from_confusion_matrix_for_classification_on_iris_se(self):
     ds = data.Table("iris")
     pt = testing.proportion_test([self.learner], ds, times=1)
     self.assertEqual(pt.number_of_iterations, 1)
     ca = scoring.CA(pt, report_se=True)
     self.assertEqual(len(ca), 1)
Example #14
0
def run_tests(datasets, measures, tests, iterations=10):
    for ds, ds_name in datasets:
        for t, t_name in tests:
            print "Testing %s on %s" % (t_name, ds_name)
            test_results = [t(random_learner, ds) for _ in xrange(iterations)]
            test(measures, test_results)


datasets = (
    (data.Table("iris"), "Iris"),
    (data.Table("monks-1"), "Monks")
    )

measures = (
    (lambda x:ca(x), "CA"),
    (lambda x:ca(x, report_se=False, ignore_weights=False), "CA-SE-W"),
    (lambda x:ca(x, report_se=True, ignore_weights=False), "CA+SE-W"),
    (lambda x:[lambda x:[None]], ""),
    (lambda x:ca(x, report_se=False, ignore_weights=True), "CA-SE+W"),
    (lambda x:ca(x, report_se=True, ignore_weights=True), "CA+SE+W"),
    (lambda x:[lambda x:[None]], ""),
    )

tests = (
    (lambda l, ds: testing.cross_validation([l],ds), "CV"),
    (lambda l, ds: testing.proportion_test([l], ds, .7, 1), "Proportion test"),
    (lambda l, ds: scoring.confusion_matrices(testing.proportion_test([l], ds, .7, 1)), "Confusion matrix"),
    )

run_tests(datasets, measures, tests)
 def test_ca_from_confusion_matrix_for_classification_on_iris_se(self):
     ds = data.Table("iris")
     pt = testing.proportion_test([self.learner], ds, times=1)
     self.assertEqual(pt.number_of_iterations, 1)
     ca = scoring.CA(pt, report_se=True)
     self.assertEqual(len(ca), 1)
Example #16
0
from Orange import data
from Orange.classification import svm

vehicle = data.Table("vehicle.tab")

svm_easy = svm.SVMLearnerEasy(name="svm easy", folds=3)
svm_normal = svm.SVMLearner(name="svm")
learners = [svm_easy, svm_normal]

from Orange.evaluation import testing, scoring

results = testing.proportion_test(learners, vehicle, times=1)
print "Name     CA        AUC"
for learner,CA,AUC in zip(learners, scoring.CA(results), scoring.AUC(results)):
    print "%-8s %.2f      %.2f" % (learner.name, CA, AUC)
Example #17
0
from Orange import data
from Orange.classification import svm

vehicle = data.Table("vehicle.tab")

svm_easy = svm.SVMLearnerEasy(name="svm easy", folds=3)
svm_normal = svm.SVMLearner(name="svm")
learners = [svm_easy, svm_normal]

from Orange.evaluation import testing, scoring

results = testing.proportion_test(learners, vehicle, times=1)
print "Name     CA        AUC"
for learner, CA, AUC in zip(learners, scoring.CA(results),
                            scoring.AUC(results)):
    print "%-8s %.2f      %.2f" % (learner.name, CA, AUC)
Example #18
0
        print "%8.4f"*n % tuple(m[0](tr)[0] for m in measures)
    print

def run_tests(datasets, measures, tests, iterations=10):
    for ds, ds_name in datasets:
        for t, t_name in tests:
            print "Testing %s on %s" % (t_name, ds_name)
            test_results = [t(random_learner, ds) for _ in xrange(iterations)]
            test(measures, test_results)


datasets = (
    (data.Table("iris"), "Iris"),
    (data.Table("monks-1"), "Monks")
)

measures = (
    (lambda x:auc(x), "AUC"),
    (lambda x:auc(x, multiclass=0), "AUC+M0"),
    (lambda x:auc(x, multiclass=1), "AUC+M1"),
    (lambda x:auc(x, multiclass=2), "AUC+M2"),
    (lambda x:auc(x, multiclass=3), "AUC+M3"),
)

tests = (
    (lambda l, ds: testing.cross_validation([l], ds), "CV"),
    (lambda l, ds: testing.proportion_test([l], ds, .7, 1), "Proportion test"),
)

run_tests(datasets, measures, tests)
Example #19
0
    print


def run_tests(datasets, measures, tests, iterations=10):
    for ds, ds_name in datasets:
        for t, t_name in tests:
            print "Testing %s on %s" % (t_name, ds_name)
            test_results = [t(random_learner, ds) for _ in xrange(iterations)]
            test(measures, test_results)


datasets = ((data.Table("iris"), "Iris"), (data.Table("monks-1"), "Monks"))

measures = (
    (lambda x: ca(x), "CA"),
    (lambda x: ca(x, report_se=False, ignore_weights=False), "CA-SE-W"),
    (lambda x: ca(x, report_se=True, ignore_weights=False), "CA+SE-W"),
    (lambda x: [lambda x: [None]], ""),
    (lambda x: ca(x, report_se=False, ignore_weights=True), "CA-SE+W"),
    (lambda x: ca(x, report_se=True, ignore_weights=True), "CA+SE+W"),
    (lambda x: [lambda x: [None]], ""),
)

tests = (
    (lambda l, ds: testing.cross_validation([l], ds), "CV"),
    (lambda l, ds: testing.proportion_test([l], ds, .7, 1), "Proportion test"),
    (lambda l, ds: scoring.confusion_matrices(
        testing.proportion_test([l], ds, .7, 1)), "Confusion matrix"),
)

run_tests(datasets, measures, tests)