def test_with_confusion_matrix_on_multiclass(self):
     learner = random_learner
     ds = data.Table("iris")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm[0])
     self.assertIsInstance(scores, float)
    def test_construct_confusion_matrix_from_biclass(self):
        learner = random_learner
        ds = data.Table("monks-1")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt, class_index=1)

        self.assertTrue(hasattr(cm[0], "TP"))
 def test_with_confusion_matrices_on_biclass(self):
     learner = random_learner
     ds = data.Table("monks-1")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm)
     self.assertIsInstance(scores, list)
    def test_construct_confusion_matrix_from_biclass(self):
        learner = random_learner
        ds = data.Table("monks-1")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt, class_index=1)

        self.assertTrue(hasattr(cm[0], "TP"))
    def test_construct_confusion_matrix_from_multiclass(self):
        learner = random_learner
        ds = data.Table("iris")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt)

        self.assertTrue(isinstance(cm[0], list))
def acc(sta,sto,subject):
	data = Orange.data.Table("../data/"+str(subject)+'-'+str(sta)+"-"+str(sto)+".csv")
	classes = data.domain.classVar.values
	#ma = Orange.feature.scoring.score_all(data)
	highest_precision = 0
	highest_precision_recall = 0
	highest_recall = 0
	highest_recall_precision = 0
	for mid_node in range(5,20):
		ann = Orange.classification.neural.NeuralNetworkLearner(n_mid=mid_node, reg_fact=1, max_iter=200, rand=random, normalize=True)
		results = Orange.evaluation.testing.cross_validation([ann], data, folds=10)

#		print "analyze "+classes[0]+":"
		cm = scoring.confusion_matrices( results, class_index=0, ignore_weights=False, cutoff=0.5)[0]
#		print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN)

		if cm.TP+cm.FP!=0:
			if cm.TP/(cm.TP+cm.FP)>highest_precision:
				highest_precision = cm.TP/(cm.TP+cm.FP)
				highest_precision_recall = cm.TP/(cm.TP+cm.FN)

		if cm.TP+cm.FN!=0:
			if cm.TP/(cm.TP+cm.FN)>highest_recall:
				highest_recall = cm.TP/(cm.TP+cm.FN)
				highest_recall_precision = cm.TP/(cm.TP+cm.FP)

	return 	format(highest_precision,'.3f'),format(highest_precision_recall,'.3f')
    def test_construct_confusion_matrix_from_multiclass(self):
        learner = random_learner
        ds = data.Table("iris")
        pt = testing.proportion_test([learner], ds, times=1)
        cm = scoring.confusion_matrices(pt)

        self.assertTrue(isinstance(cm[0], list))
 def test_with_confusion_matrix_on_multiclass(self):
     learner = random_learner
     ds = data.Table("iris")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm[0])
     self.assertIsInstance(scores, float)
 def test_with_confusion_matrices_on_biclass(self):
     learner = random_learner
     ds = data.Table("monks-1")
     pt = testing.proportion_test([learner], ds, times=1)
     cm = scoring.confusion_matrices(pt, class_index=1)
     scores = self.score(cm)
     self.assertIsInstance(scores, list)
Example #10
0
def acc(sta, sto, subject):

#	print str(sta)+"-"+str(sto)
#	data = Orange.data.Table("../data/"+str(subject)+'-'+str(sta)+"-"+str(sto)+".csv")
	data = Orange.data.Table("../data/"+str(subject)+'-'+str(sta)+"-"+str(sto)+"_attr.csv")
	classes = data.domain.classVar.values
#	print "analyze "+classes[0]+":"
	highest_precision = 0
	highest_precision_recall = 0
	highest_recall = 0
	highest_recall_precision = 0
	for i in range(1,101):
#	for i in range(1,2):
		j=float(i)/100

#		svm_l = Orange.classification.svm.SVMLearner(kernel_type=Orange.classification.svm.kernels.Linear)
#		rfe = Orange.classification.svm.RFE(learner=svm_l)
#		data_with_subset_of_features = rfe(data, 10)
#		print data_with_subset_of_features.domain
		
#		n = 10
#		ma = Orange.feature.scoring.score_all(data)
#		best = Orange.feature.selection.top_rated(ma, n)
#		print 'Best %d features:' % n
#		for s in best:
#			print s

		learner = svm.SVMLearner(gamma=j, verbose=False)
#		learner = svm.SVMLearner(kernel_type=Orange.classification.svm.kernels.Linear ,verbose=False)
#		learner = Orange.feature.selection.FilteredLearner(learner, filter=Orange.feature.selection.FilterBestN(n=10), name='filtered')
		results = testing.cross_validation([learner], data, folds=10)
#		print scoring.CA(results)
#		print "analyze "+classes[0]+":"
		cm = scoring.confusion_matrices( results, class_index=0, ignore_weights=False, cutoff=0.52)[0]
#		print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN)
		
		if cm.TP+cm.FP!=0:
			if cm.TP/(cm.TP+cm.FP)>highest_precision:
				highest_precision = cm.TP/(cm.TP+cm.FP)
				highest_precision_recall = cm.TP/(cm.TP+cm.FN)

		if cm.TP+cm.FN!=0:
			if cm.TP/(cm.TP+cm.FN)>highest_recall:
				highest_recall = cm.TP/(cm.TP+cm.FN)
				highest_recall_precision = cm.TP/(cm.TP+cm.FP)

#		time.sleep(0.1)

	return 	format(highest_precision,'.3f'),format(highest_precision_recall,'.3f'), len(data)
 def test_ca_from_confusion_matrix_on_iris_se(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv, class_index=1)
     ca = scoring.CA(cm[0], report_se=True)
     self.assertEqual(len(ca), 1)
 def test_ca_from_confusion_matrix_list_on_iris(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv)
     ca = scoring.CA(cm)
     self.assertEqual(len(ca), 1)
 def test_ca_from_confusion_matrix_on_iris_se(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv, class_index=1)
     ca = scoring.CA(cm[0], report_se=True)
     self.assertEqual(len(ca), 1)
 def test_ca_from_confusion_matrix_list_on_iris(self):
     ds = data.Table("iris")
     cv = testing.cross_validation([self.learner], ds, folds=5)
     cm = scoring.confusion_matrices(cv)
     ca = scoring.CA(cm)
     self.assertEqual(len(ca), 1)
Example #15
0
def run_tests(datasets, measures, tests, iterations=10):
    for ds, ds_name in datasets:
        for t, t_name in tests:
            print "Testing %s on %s" % (t_name, ds_name)
            test_results = [t(random_learner, ds) for _ in xrange(iterations)]
            test(measures, test_results)


datasets = (
    (data.Table("iris"), "Iris"),
    (data.Table("monks-1"), "Monks")
    )

measures = (
    (lambda x:ca(x), "CA"),
    (lambda x:ca(x, report_se=False, ignore_weights=False), "CA-SE-W"),
    (lambda x:ca(x, report_se=True, ignore_weights=False), "CA+SE-W"),
    (lambda x:[lambda x:[None]], ""),
    (lambda x:ca(x, report_se=False, ignore_weights=True), "CA-SE+W"),
    (lambda x:ca(x, report_se=True, ignore_weights=True), "CA+SE+W"),
    (lambda x:[lambda x:[None]], ""),
    )

tests = (
    (lambda l, ds: testing.cross_validation([l],ds), "CV"),
    (lambda l, ds: testing.proportion_test([l], ds, .7, 1), "Proportion test"),
    (lambda l, ds: scoring.confusion_matrices(testing.proportion_test([l], ds, .7, 1)), "Confusion matrix"),
    )

run_tests(datasets, measures, tests)
Example #16
0
    print


def run_tests(datasets, measures, tests, iterations=10):
    for ds, ds_name in datasets:
        for t, t_name in tests:
            print "Testing %s on %s" % (t_name, ds_name)
            test_results = [t(random_learner, ds) for _ in xrange(iterations)]
            test(measures, test_results)


datasets = ((data.Table("iris"), "Iris"), (data.Table("monks-1"), "Monks"))

measures = (
    (lambda x: ca(x), "CA"),
    (lambda x: ca(x, report_se=False, ignore_weights=False), "CA-SE-W"),
    (lambda x: ca(x, report_se=True, ignore_weights=False), "CA+SE-W"),
    (lambda x: [lambda x: [None]], ""),
    (lambda x: ca(x, report_se=False, ignore_weights=True), "CA-SE+W"),
    (lambda x: ca(x, report_se=True, ignore_weights=True), "CA+SE+W"),
    (lambda x: [lambda x: [None]], ""),
)

tests = (
    (lambda l, ds: testing.cross_validation([l], ds), "CV"),
    (lambda l, ds: testing.proportion_test([l], ds, .7, 1), "Proportion test"),
    (lambda l, ds: scoring.confusion_matrices(
        testing.proportion_test([l], ds, .7, 1)), "Confusion matrix"),
)

run_tests(datasets, measures, tests)