def print_results(learners, results): # output the results print "Learner CA IS Brier AUC" for i in range(len(learners)): print "%-8s %5.4f %5.3f %5.3f %5.3f" % ( learners[i].name, orngStat.CA(results)[i], orngStat.IS(results)[i], orngStat.BrierScore(results)[i], orngStat.AUC(results)[i])
def cforange_information_score(input_dict): import orngStat results = input_dict['results'] if input_dict['reportSE']=='true': reportSE = True else: reportSE = False ISs = orngStat.IS(results,apriori=None,reportSE=reportSE) if len(ISs)==1: ISs = ISs[0] output_dict = {} output_dict['is']=ISs return output_dict
# Description: Demostration of use of cross-validation as provided in orngEval module # Category: evaluation # Uses: voting.tab # Classes: orngTest.crossValidation # Referenced: c_performance.htm import orange, orngTest, orngStat, orngTree # set up the learners bayes = orange.BayesLearner() tree = orngTree.TreeLearner(mForPruning=2) bayes.name = "bayes" tree.name = "tree" learners = [bayes, tree] # compute accuracies on data data = orange.ExampleTable("voting") results = orngTest.crossValidation(learners, data, folds=10) # output the results print "Learner CA IS Brier AUC" for i in range(len(learners)): print "%-8s %5.3f %5.3f %5.3f %5.3f" % (learners[i].name, \ orngStat.CA(results)[i], orngStat.IS(results)[i], orngStat.BrierScore(results)[i], orngStat.AUC(results)[i])
learners = [orange.BayesLearner(name = "bayes"), orngTree.TreeLearner(name="tree"), orange.MajorityLearner(name="majrty")] voting = orange.ExampleTable("voting") res = orngTest.crossValidation(learners, voting) vehicle = orange.ExampleTable("vehicle") resVeh = orngTest.crossValidation(learners, vehicle) import orngStat CAs = orngStat.CA(res) APs = orngStat.AP(res) Briers = orngStat.BrierScore(res) ISs = orngStat.IS(res) print print "method\tCA\tAP\tBrier\tIS" for l in range(len(learners)): print "%s\t%5.3f\t%5.3f\t%5.3f\t%6.3f" % (learners[l].name, CAs[l], APs[l], Briers[l], ISs[l]) CAs = orngStat.CA(res, reportSE=True) APs = orngStat.AP(res, reportSE=True) Briers = orngStat.BrierScore(res, reportSE=True) ISs = orngStat.IS(res, reportSE=True) print print "method\tCA\tAP\tBrier\tIS" for l in range(len(learners)):
data_scored.append(0) import matplotlib.pyplot as plt import matplotlib X1w = [] X2w = [] for i in range(len(X)): if data_scored[i] == 0: X1w.append(X[i][0]) X2w.append(X[i][1]) ######################## # Plot the misclassified data ######################## fig = plt.figure() ax1 = fig.add_subplot(111) ax1.scatter(X11, X12, s=10, c='b', marker="+") ax1.scatter(X21, X22, s=10, c='c', marker="o") ax1.scatter(X31, X32, s=10, c='y', marker="x") ax1.scatter(X1w, X2w, s=10, c='m', marker="^") plt.title('Plot of Three Classes of Data, Showing the Misclassified Elements') plt.show() # output the results print "Learner CA IS Brier AUC" for i in range(len(learners)): print "%-8s %5.3f %5.3f %5.3f %5.3f" % (learners[i].name, \ orngStat.CA(results)[i], orngStat.IS(results)[i], orngStat.BrierScore(results)[i], orngStat.AUC(results)[i])
bayes = orange.BayesLearner() tree = orngTree.TreeLearner(mForPruning=2) bayes.name = "bayes" tree.name = "tree" learners = [bayes, tree] # compute accuracies on data data = orange.ExampleTable("voting") res = orngTest.crossValidation(learners, data, folds=10) cm = orngStat.computeConfusionMatrices( res, classIndex=data.domain.classVar.values.index('democrat')) stat = ( ('CA', lambda res, cm: orngStat.CA(res)), ('Sens', lambda res, cm: orngStat.sens(cm)), ('Spec', lambda res, cm: orngStat.spec(cm)), ('AUC', lambda res, cm: orngStat.AUC(res)), ('IS', lambda res, cm: orngStat.IS(res)), ('Brier', lambda res, cm: orngStat.BrierScore(res)), ('F1', lambda res, cm: orngStat.F1(cm)), ('F2', lambda res, cm: orngStat.Falpha(cm, alpha=2.0)), ('MCC', lambda res, cm: orngStat.MCC(cm)), ('sPi', lambda res, cm: orngStat.scottsPi(cm)), ) scores = [s[1](res, cm) for s in stat] print print "Learner " + "".join(["%-7s" % s[0] for s in stat]) for (i, l) in enumerate(learners): print "%-8s " % l.name + "".join(["%5.3f " % s[i] for s in scores])