コード例 #1
0
    def test_auc_on_iris_weighted_one_against_all(self):
        ds = data.Table("iris")
        test_results = testing.cross_validation([self.learner], ds, folds=5)
        auc = scoring.AUC(test_results,
                          multiclass=scoring.AUC.WeightedOneAgainstAll)

        self.assertEqual(len(auc), 1)
コード例 #2
0
 def test_auc_on_iris_pair(self):
     ds = data.Table("iris")
     test_results = testing.cross_validation([self.learner], ds, folds=5)
     auc = scoring.AUC.pair(test_results, 0, 1)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC.pair(test_results, 0, 2)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC.pair(test_results, 1, 2)
     self.assertEqual(len(auc), 1)
コード例 #3
0
 def test_auc_on_iris_pair(self):
     ds = data.Table("iris")
     test_results = testing.cross_validation([self.learner], ds, folds=5)
     auc = scoring.AUC_for_pair_of_classes(test_results, 0, 1)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC_for_pair_of_classes(test_results, 0, 2)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC_for_pair_of_classes(test_results, 1, 2)
     self.assertEqual(len(auc), 1)
コード例 #4
0
    def test_auc_on_monks(self):
        ds = data.Table("monks-1")
        cv = testing.cross_validation([self.learner], ds, folds=5)
        pt = testing.proportion_test([self.learner], ds, times=1)

        auc = scoring.AUC(cv)
        self.assertEqual(len(auc), 1)

        auc = scoring.AUC(pt)
        self.assertEqual(len(auc), 1)
コード例 #5
0
    def test_auc_on_monks(self):
        ds = data.Table("monks-1")
        cv = testing.cross_validation([self.learner], ds, folds=5)
        pt = testing.proportion_test([self.learner], ds, times=1)

        auc = scoring.AUC(cv)
        self.assertEqual(len(auc), 1)

        auc = scoring.AUC(pt)
        self.assertEqual(len(auc), 1)
 def test_auc_on_iris_single_class(self):
     ds = data.Table("iris")
     test_results = testing.cross_validation([self.learner], ds, folds=5)
     auc = scoring.AUC_for_single_class(test_results)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC_for_single_class(test_results, 0)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC_for_single_class(test_results, 1)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC_for_single_class(test_results, 2)
     self.assertEqual(len(auc), 1)
コード例 #7
0
 def test_auc_on_iris_single_class(self):
     ds = data.Table("iris")
     test_results = testing.cross_validation([self.learner], ds, folds=5)
     auc = scoring.AUC.single_class(test_results)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC.single_class(test_results, 0)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC.single_class(test_results, 1)
     self.assertEqual(len(auc), 1)
     auc = scoring.AUC.single_class(test_results, 2)
     self.assertEqual(len(auc), 1)
コード例 #8
0
ファイル: test_svm.py プロジェクト: dreampocketit/dp_for_cct
def acc(sta, sto, subject):

#	print str(sta)+"-"+str(sto)
#	data = Orange.data.Table("../data/"+str(subject)+'-'+str(sta)+"-"+str(sto)+".csv")
	data = Orange.data.Table("../data/"+str(subject)+'-'+str(sta)+"-"+str(sto)+"_attr.csv")
	classes = data.domain.classVar.values
#	print "analyze "+classes[0]+":"
	highest_precision = 0
	highest_precision_recall = 0
	highest_recall = 0
	highest_recall_precision = 0
	for i in range(1,101):
#	for i in range(1,2):
		j=float(i)/100

#		svm_l = Orange.classification.svm.SVMLearner(kernel_type=Orange.classification.svm.kernels.Linear)
#		rfe = Orange.classification.svm.RFE(learner=svm_l)
#		data_with_subset_of_features = rfe(data, 10)
#		print data_with_subset_of_features.domain
		
#		n = 10
#		ma = Orange.feature.scoring.score_all(data)
#		best = Orange.feature.selection.top_rated(ma, n)
#		print 'Best %d features:' % n
#		for s in best:
#			print s

		learner = svm.SVMLearner(gamma=j, verbose=False)
#		learner = svm.SVMLearner(kernel_type=Orange.classification.svm.kernels.Linear ,verbose=False)
#		learner = Orange.feature.selection.FilteredLearner(learner, filter=Orange.feature.selection.FilterBestN(n=10), name='filtered')
		results = testing.cross_validation([learner], data, folds=10)
#		print scoring.CA(results)
#		print "analyze "+classes[0]+":"
		cm = scoring.confusion_matrices( results, class_index=0, ignore_weights=False, cutoff=0.52)[0]
#		print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN)
		
		if cm.TP+cm.FP!=0:
			if cm.TP/(cm.TP+cm.FP)>highest_precision:
				highest_precision = cm.TP/(cm.TP+cm.FP)
				highest_precision_recall = cm.TP/(cm.TP+cm.FN)

		if cm.TP+cm.FN!=0:
			if cm.TP/(cm.TP+cm.FN)>highest_recall:
				highest_recall = cm.TP/(cm.TP+cm.FN)
				highest_recall_precision = cm.TP/(cm.TP+cm.FP)

#		time.sleep(0.1)

	return 	format(highest_precision,'.3f'),format(highest_precision_recall,'.3f'), len(data)
コード例 #9
0
ファイル: svm.py プロジェクト: dreampocketit/dp_for_cct
def acc(name):


	data = Orange.data.Table("../data/"+str(name)+".csv")
	for row in data:
		print data

	highest = 0
	for i in range(1,101):
		j=float(i)/100
		learner = svm.SVMLearner(gamma=j, verbose=True)
		results = testing.cross_validation([learner], data, folds=10)

		if scoring.CA(results)[0]>highest:
			highest = scoring.CA(results)[0]
#		print "CA:  %.2f" % scoring.CA(results)[0]
#		print "AUC: %.2f" % scoring.AUC(results)[0]

	print "highest:"+str(highest)
コード例 #10
0
    def test_split_by_classifier(self):
        learners = [random_learner, random_learner, random_learner]
        ds = data.Table("lenses")
        cv = testing.cross_validation(learners, ds, folds=5, store_examples=True)
        cv_split = scoring.split_by_classifiers(cv)
        ca_scores = scoring.CA(cv)
        auc_scores = scoring.AUC(cv)
        for i, cv1 in enumerate(cv_split):
            self.assertEqual(cv1.class_values, cv.class_values)
            self.assertEqual(cv1.classifier_names, [cv.classifier_names[i]])
            self.assertEqual(cv1.number_of_iterations, cv.number_of_iterations)
            self.assertEqual(cv1.number_of_learners, 1)
            self.assertEqual(cv1.base_class, cv.base_class)
            self.assertEqual(cv1.weights, cv.weights)
            self.assertEqual(len(cv1.results), len(cv.results))
            self.assertEqual(cv1.examples, cv.examples)

            ca_one = scoring.CA(cv1)[0]
            auc_one = scoring.AUC(cv1)[0]
            self.assertAlmostEqual(ca_scores[i], ca_one, delta=1e-10)
            self.assertAlmostEquals(auc_scores[i], auc_one, delta=1e-10)
コード例 #11
0
    def test_split_by_classifier(self):
        learners = [random_learner, random_learner, random_learner]
        ds = data.Table("lenses")
        cv = testing.cross_validation(learners,
                                      ds,
                                      folds=5,
                                      store_examples=True)
        cv_split = scoring.split_by_classifiers(cv)
        ca_scores = scoring.CA(cv)
        auc_scores = scoring.AUC(cv)
        for i, cv1 in enumerate(cv_split):
            self.assertEqual(cv1.class_values, cv.class_values)
            self.assertEqual(cv1.classifier_names, [cv.classifier_names[i]])
            self.assertEqual(cv1.number_of_iterations, cv.number_of_iterations)
            self.assertEqual(cv1.number_of_learners, 1)
            self.assertEqual(cv1.base_class, cv.base_class)
            self.assertEqual(cv1.weights, cv.weights)
            self.assertEqual(len(cv1.results), len(cv.results))
            self.assertEqual(cv1.examples, cv.examples)

            ca_one = scoring.CA(cv1)[0]
            auc_one = scoring.AUC(cv1)[0]
            self.assertAlmostEqual(ca_scores[i], ca_one, delta=1e-10)
            self.assertAlmostEquals(auc_scores[i], auc_one, delta=1e-10)
コード例 #12
0
import Orange
from Orange.classification import svm
from Orange.evaluation import testing, scoring
data = Orange.data.Table("dataset.tab")
learner = svm.SVMLearner(verbose=True, normalization= True)
results = testing.cross_validation([learner], data, folds = 1)
print "CA:  %.4f" % scoring.CA(results)[0]
print "AUC: %.4f" % scoring.AUC(results)
コード例 #13
0
 def test_ca_on_iris(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     ca = scoring.CA(cv, report_se=True)
     self.assertEqual(len(ca), 1)
コード例 #14
0
 def test_ca_from_confusion_matrix_list_on_iris(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv)
     ca = scoring.CA(cm)
     self.assertEqual(len(ca), 1)
コード例 #15
0
 def test_ca_from_confusion_matrix_on_iris_se(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv, class_index=1)
     ca = scoring.CA(cm[0], report_se=True)
     self.assertEqual(len(ca), 1)
コード例 #16
0
from Orange import data
from Orange.classification import svm

vehicle = data.Table("vehicle.tab")

svm_easy = svm.SVMLearnerEasy(name="svm easy", folds=3)
svm_normal = svm.SVMLearner(name="svm")
learners = [svm_easy, svm_normal]

from Orange.evaluation import testing, scoring

results = testing.cross_validation(learners, vehicle, folds=5)
print "Name     CA        AUC"
for learner,CA,AUC in zip(learners, scoring.CA(results), scoring.AUC(results)):
    print "%-8s %.2f      %.2f" % (learner.name, CA, AUC)
コード例 #17
0
 def test_auc_matrix_on_iris(self):
     ds = data.Table("iris")
     test_results = testing.cross_validation([self.learner], ds, folds=5)
     auc = scoring.AUC.matrix(test_results)
     self.assertEqual(len(auc), 1)
     self.assertEqual(len(auc[0]), 3)
    def test_auc_on_iris_by_weighted_pairs(self):
        ds = data.Table("iris")
        test_results = testing.cross_validation([self.learner], ds, folds=5)
        auc = scoring.AUC(test_results, multiclass=scoring.AUC.ByWeightedPairs)

        self.assertEqual(len(auc), 1)
コード例 #19
0
def test_solution(factory, weight, data, results, idx, folds):  
    weighted_data = data.to_numpy("ac")[0] * np.concatenate((np.sqrt(weight), [1]))
    new_data = Orange.data.Table(data.domain, weighted_data)
    learner = factory()
    results[idx] = CA(cross_validation([learner], new_data, folds = folds))[0]
コード例 #20
0
    def test_auc_on_iris_by_weighted_pairs(self):
        ds = data.Table("iris")
        test_results = testing.cross_validation([self.learner], ds, folds=5)
        auc = scoring.AUC(test_results, multiclass=scoring.AUC.ByWeightedPairs)

        self.assertEqual(len(auc), 1)
コード例 #21
0
# Description: Naive Bayes Learner with auto adjusted treshold
# Category:    classification
# Uses:        iris
# Referenced:  Orange.classification.bayes
# Classes:     Orange.classification.bayes.NaiveLearner, Orange.classification.bayes.NaiveClassifier

import Orange
from Orange.classification import bayes
from Orange.evaluation import testing, scoring

adult = Orange.data.Table("adult_sample.tab")

nb = bayes.NaiveLearner(name="Naive Bayes")
adjusted_nb = bayes.NaiveLearner(adjust_threshold=True,
                                 name="Adjusted Naive Bayes")

results = testing.cross_validation([nb, adjusted_nb], adult)
print "%.6f, %.6f" % tuple(scoring.CA(results))
コード例 #22
0
 def test_ca_from_confusion_matrix_list_on_iris(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv)
     ca = scoring.CA(cm)
     self.assertEqual(len(ca), 1)
    def test_auc_on_iris_one_against_all(self):
        ds = data.Table("iris")
        test_results = testing.cross_validation([self.learner], ds, folds=5)
        auc = scoring.AUC(test_results, multiclass=scoring.AUC.OneAgainstAll)

        self.assertEqual(len(auc), 1)
コード例 #24
0
    def test_auc_on_iris_one_against_all(self):
        ds = data.Table("iris")
        test_results = testing.cross_validation([self.learner], ds, folds=5)
        auc = scoring.AUC.one_against_all(test_results)

        self.assertEqual(len(auc), 1)
コード例 #25
0
 def test_ca_from_confusion_matrix_on_iris_se(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv, class_index=1)
     ca = scoring.CA(cm[0], report_se=True)
     self.assertEqual(len(ca), 1)
コード例 #26
0
from Orange import data
from Orange.classification import svm

vehicle = data.Table("vehicle.tab")

svm_easy = svm.SVMLearnerEasy(name="svm easy", folds=3)
svm_normal = svm.SVMLearner(name="svm")
learners = [svm_easy, svm_normal]

from Orange.evaluation import testing, scoring

results = testing.cross_validation(learners, vehicle, folds=5)
print "Name     CA        AUC"
for learner, CA, AUC in zip(learners, scoring.CA(results),
                            scoring.AUC(results)):
    print "%-8s %.2f      %.2f" % (learner.name, CA, AUC)
コード例 #27
0
 def test_auc_matrix_on_iris(self):
     ds = data.Table("iris")
     test_results = testing.cross_validation([self.learner], ds, folds=5)
     auc = scoring.AUC_matrix(test_results)
     self.assertEqual(len(auc), 1)
     self.assertEqual(len(auc[0]), 3)
コード例 #28
0
 def test_ca_on_iris(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     ca = scoring.CA(cv, report_se=True)
     self.assertEqual(len(ca), 1)
コード例 #29
0
import Orange
from Orange.evaluation import testing, scoring

jobs = Orange.data.Table("jobs.100.sparse.continuous_classes.tab")

learner = Orange.classification.svm.SVMLearnerEasy()
results = testing.cross_validation([learner], jobs, folds=5)

print "CA:  %.4f" % scoring.CA(results)[0]
# print "AUC: %.4f" % scoring.AUC(results)[0]
コード例 #30
0
    def test_auc_on_iris_weighted_one_against_all(self):
        ds = data.Table("iris")
        test_results = testing.cross_validation([self.learner], ds, folds=5)
        auc = scoring.AUC.weighted_one_against_all(test_results)

        self.assertEqual(len(auc), 1)
コード例 #31
0
ファイル: test_auc.py プロジェクト: yisuax11/orange2
        print "%8.4f"*n % tuple(m[0](tr)[0] for m in measures)
    print

def run_tests(datasets, measures, tests, iterations=10):
    for ds, ds_name in datasets:
        for t, t_name in tests:
            print "Testing %s on %s" % (t_name, ds_name)
            test_results = [t(random_learner, ds) for _ in xrange(iterations)]
            test(measures, test_results)


datasets = (
    (data.Table("iris"), "Iris"),
    (data.Table("monks-1"), "Monks")
)

measures = (
    (lambda x:auc(x), "AUC"),
    (lambda x:auc(x, multiclass=0), "AUC+M0"),
    (lambda x:auc(x, multiclass=1), "AUC+M1"),
    (lambda x:auc(x, multiclass=2), "AUC+M2"),
    (lambda x:auc(x, multiclass=3), "AUC+M3"),
)

tests = (
    (lambda l, ds: testing.cross_validation([l], ds), "CV"),
    (lambda l, ds: testing.proportion_test([l], ds, .7, 1), "Proportion test"),
)

run_tests(datasets, measures, tests)
コード例 #32
0
# Description: Naive Bayes Learner with auto adjusted treshold
# Category:    classification
# Uses:        iris
# Referenced:  Orange.classification.bayes
# Classes:     Orange.classification.bayes.NaiveLearner, Orange.classification.bayes.NaiveClassifier

import Orange
from Orange.classification import bayes
from Orange.evaluation import testing, scoring

adult = Orange.data.Table("adult_sample.tab")

nb = bayes.NaiveLearner(name="Naive Bayes")
adjusted_nb = bayes.NaiveLearner(adjust_threshold=True, name="Adjusted Naive Bayes")

results = testing.cross_validation([nb, adjusted_nb], adult)
print "%.6f, %.6f" % tuple(scoring.CA(results))