Beispiel #1
0
def AUC_single(res, classIndex = -1, useWeights = True):
    if classIndex<0:
        if res.baseClass>=0:
            classIndex = res.baseClass
        else:
            classIndex = 1

    if res.numberOfIterations > 1:
        return orngStat.AUC_iterations(orngStat.AUC_i, orngStat.splitByIterations(res), (classIndex, useWeights, res, res.numberOfIterations))
    else:
        return orngStat.AUC_i(res, classIndex, useWeights)
Beispiel #2
0
def AUC_single(res, classIndex=-1, useWeights=True):
    if classIndex < 0:
        if res.baseClass >= 0:
            classIndex = res.baseClass
        else:
            classIndex = 1

    if res.numberOfIterations > 1:
        return orngStat.AUC_iterations(
            orngStat.AUC_i, orngStat.splitByIterations(res),
            (classIndex, useWeights, res, res.numberOfIterations))
    else:
        return orngStat.AUC_i(res, classIndex, useWeights)
    def results(self, dres):
        self.closeContext()

        self.FPcostList = []
        self.FNcostList = []
        self.pvalueList = []

        self.classCombo.clear()
        self.removeGraphs()
        self.testSetsQLB.clear()
        self.classifiersQLB.clear()

        self.dres = dres

        if not dres:
            self.targetClass = None
            self.openContext("", dres)
            return

        if dres and dres.test_type != TEST_TYPE_SINGLE:
            self.warning(0, "Lift curve is supported only for single-target prediction problems.")
            return
        self.warning(0, None)

        self.defaultPerfLinePValues = []
        if self.dres <> None:
            ## classQLB
            self.numberOfClasses = len(self.dres.classValues)
            self.graphs = []

            for i in range(self.numberOfClasses):
                self.FPcostList.append(500)
                self.FNcostList.append(500)
                graph = singleClassLiftCurveGraph(self.mainArea, "", "Predicted class: " + self.dres.classValues[i])
                self.graphs.append(graph)
                self.classCombo.addItem(self.dres.classValues[i])

            ## classifiersQLB
            self.classifierColor = []
            self.numberOfClassifiers = self.dres.numberOfLearners
            if self.numberOfClassifiers > 1:
                allCforHSV = self.numberOfClassifiers - 1
            else:
                allCforHSV = self.numberOfClassifiers
            for i in range(self.numberOfClassifiers):
                newColor = QColor()
                newColor.setHsv(i * 255 / allCforHSV, 255, 255)
                self.classifierColor.append(newColor)

            ## testSetsQLB
            self.dresSplitByIterations = orngStat.splitByIterations(self.dres)
            self.numberOfIterations = len(self.dresSplitByIterations)

            self.calcAllClassGraphs()

            ## classifiersQLB
            for i in range(self.numberOfClassifiers):
                newColor = self.classifierColor[i]
                self.classifiersQLB.addItem(QListWidgetItem(ColorPixmap(newColor), self.dres.classifierNames[i]))
            self.classifiersQLB.selectAll()

            ## testSetsQLB
            self.testSetsQLB.addItems([str(i) for i in range(self.numberOfIterations)])
            self.testSetsQLB.selectAll()

            ## calculate default pvalues
            reminder = self.maxp
            for f in orngStat.classProbabilitiesFromRes(self.dres):
                v = int(round(f * self.maxp))
                reminder -= v
                if reminder < 0:
                    v = v + reminder
                self.defaultPerfLinePValues.append(v)
                self.pvalueList.append(v)

            self.targetClass = 0  ## select first target
            self.target()
        else:
            self.classifierColor = None
        self.openContext("", self.dres)
        self.performanceTabCosts.setEnabled(1)
        self.setDefaultPValues()
Beispiel #4
0
    def test_results(self, dres):
        self.FPcostList = []
        self.FNcostList = []
        self.pvalueList = []

        self.closeContext()

        if not dres:
            self.targetClass = None
            self.classCombo.clear()
            self.testSetsQLB.clear()
            self.classifiersQLB.clear()
            self.removeGraphs()
            self.openContext("", dres)
            return
        self.dres = dres

        self.classifiersQLB.clear()
        self.testSetsQLB.clear()
        self.removeGraphs()
        self.classCombo.clear()

        self.defaultPerfLinePValues = []
        if self.dres != None:
            ## classQLB
            self.numberOfClasses = len(self.dres.classValues)
            self.graphs = []

            for i in range(self.numberOfClasses):
                self.FPcostList.append(500)
                self.FNcostList.append(500)
                graph = singleClassROCgraph(
                    self.mainArea, "",
                    "Predicted class: " + self.dres.classValues[i])
                self.graphs.append(graph)
                self.classCombo.addItem(self.dres.classValues[i])

            ## classifiersQLB
            self.classifierColor = []
            self.numberOfClassifiers = self.dres.numberOfLearners
            if self.numberOfClassifiers > 1:
                allCforHSV = self.numberOfClassifiers - 1
            else:
                allCforHSV = self.numberOfClassifiers
            for i in range(self.numberOfClassifiers):
                newColor = QColor()
                newColor.setHsv(i * 255 / allCforHSV, 255, 255)
                self.classifierColor.append(newColor)

            ## testSetsQLB
            self.dresSplitByIterations = orngStat.splitByIterations(self.dres)
            self.numberOfIterations = len(self.dresSplitByIterations)

            self.calcAllClassGraphs()

            ## classifiersQLB
            for i in range(self.numberOfClassifiers):
                newColor = self.classifierColor[i]
                self.classifiersQLB.addItem(
                    QListWidgetItem(ColorPixmap(newColor),
                                    self.dres.classifierNames[i]))
            self.classifiersQLB.selectAll()

            ## testSetsQLB
            self.testSetsQLB.addItems(
                [str(i) for i in range(self.numberOfIterations)])
            self.testSetsQLB.selectAll()

            ## calculate default pvalues
            reminder = self.maxp
            for f in orngStat.classProbabilitiesFromRes(self.dres):
                v = int(round(f * self.maxp))
                reminder -= v
                if reminder < 0:
                    v = v + reminder
                self.defaultPerfLinePValues.append(v)
                self.pvalueList.append(v)

            self.targetClass = 0  ## select first target
            self.target()
        else:
            self.classifierColor = None
        self.openContext("", self.dres)
        self.performanceTabCosts.setEnabled(self.AveragingMethod == 'merge')
        self.setDefaultPValues()
Beispiel #5
0
    def __call__(self, examples, weightID=0, **kwds):
        import orngTest, orngStat, statc

        self.__dict__.update(kwds)

        if self.removeThreshold < self.addThreshold:
            raise "'removeThreshold' should be larger or equal to 'addThreshold'"

        classVar = examples.domain.classVar

        indices = orange.MakeRandomIndicesCV(examples,
                                             folds=getattr(self, "folds", 10))
        domain = orange.Domain([], classVar)

        res = orngTest.testWithIndices([self.learner],
                                       orange.ExampleTable(domain, examples),
                                       indices)

        oldStat = self.stat(res)[0]
        oldStats = [self.stat(x)[0] for x in orngStat.splitByIterations(res)]
        print ".", oldStat, domain
        stop = False
        while not stop:
            stop = True
            if len(domain.attributes) >= 2:
                bestStat = None
                for attr in domain.attributes:
                    newdomain = orange.Domain(
                        filter(lambda x: x != attr, domain.attributes),
                        classVar)
                    res = orngTest.testWithIndices(
                        [self.learner],
                        (orange.ExampleTable(newdomain, examples), weightID),
                        indices)

                    newStat = self.stat(res)[0]
                    newStats = [
                        self.stat(x)[0]
                        for x in orngStat.splitByIterations(res)
                    ]
                    print "-", newStat, newdomain
                    ## If stat has increased (ie newStat is better than bestStat)
                    if not bestStat or cmp(newStat, bestStat) == self.statsign:
                        if cmp(newStat, oldStat) == self.statsign:
                            bestStat, bestStats, bestAttr = newStat, newStats, attr
                        elif statc.wilcoxont(
                                oldStats, newStats)[1] > self.removeThreshold:
                            bestStat, bestAttr, bestStats = newStat, newStats, attr
                if bestStat:
                    domain = orange.Domain(
                        filter(lambda x: x != bestAttr, domain.attributes),
                        classVar)
                    oldStat, oldStats = bestStat, bestStats
                    stop = False
                    print "removed", bestAttr.name

            bestStat, bestAttr = oldStat, None
            for attr in examples.domain.attributes:
                if not attr in domain.attributes:
                    newdomain = orange.Domain(domain.attributes + [attr],
                                              classVar)
                    res = orngTest.testWithIndices(
                        [self.learner],
                        (orange.ExampleTable(newdomain, examples), weightID),
                        indices)

                    newStat = self.stat(res)[0]
                    newStats = [
                        self.stat(x)[0]
                        for x in orngStat.splitByIterations(res)
                    ]
                    print "+", newStat, newdomain

                    ## If stat has increased (ie newStat is better than bestStat)
                    if cmp(newStat,
                           bestStat) == self.statsign and statc.wilcoxont(
                               oldStats, newStats)[1] < self.addThreshold:
                        bestStat, bestStats, bestAttr = newStat, newStats, attr
            if bestAttr:
                domain = orange.Domain(domain.attributes + [bestAttr],
                                       classVar)
                oldStat, oldStats = bestStat, bestStats
                stop = False
                print "added", bestAttr.name

        return self.learner(orange.ExampleTable(domain, examples), weightID)
Beispiel #6
0
    def results(self, dres):
        self.closeContext()

        self.FPcostList = []
        self.FNcostList = []
        self.pvalueList = []

        self.classCombo.clear()
        self.removeGraphs()
        self.testSetsQLB.clear()
        self.classifiersQLB.clear()

        self.warning([0, 1])

        if dres is not None and dres.class_values is None:
            self.warning(1, "Lift Curve cannot be used for regression results.")
            dres = None

        self.dres = dres

        if not dres:
            self.targetClass = None
            self.openContext("", dres)
            return

        if dres and dres.test_type != TEST_TYPE_SINGLE:
            self.warning(0, "Lift curve is supported only for single-target prediction problems.")
            return

        self.defaultPerfLinePValues = []
        if self.dres <> None:
            ## classQLB
            self.numberOfClasses = len(self.dres.classValues)
            self.graphs = []

            for i in range(self.numberOfClasses):
                self.FPcostList.append( 500)
                self.FNcostList.append( 500)
                graph = singleClassLiftCurveGraph(self.mainArea, "", "Predicted class: " + self.dres.classValues[i])
                self.graphs.append( graph )
                self.classCombo.addItem(self.dres.classValues[i])

            ## classifiersQLB
            self.classifierColor = []
            self.numberOfClassifiers = self.dres.numberOfLearners
            if self.numberOfClassifiers > 1:
                allCforHSV = self.numberOfClassifiers - 1
            else:
                allCforHSV = self.numberOfClassifiers
            for i in range(self.numberOfClassifiers):
                newColor = QColor()
                newColor.setHsv(i*255/allCforHSV, 255, 255)
                self.classifierColor.append( newColor )

            ## testSetsQLB
            self.dresSplitByIterations = orngStat.splitByIterations(self.dres)
            self.numberOfIterations = len(self.dresSplitByIterations)

            self.calcAllClassGraphs()

            ## classifiersQLB
            for i in range(self.numberOfClassifiers):
                newColor = self.classifierColor[i]
                self.classifiersQLB.addItem(QListWidgetItem(ColorPixmap(newColor), self.dres.classifierNames[i]))
            self.classifiersQLB.selectAll()

            ## testSetsQLB
            self.testSetsQLB.addItems([str(i) for i in range(self.numberOfIterations)])
            self.testSetsQLB.selectAll()

            ## calculate default pvalues
            reminder = self.maxp
            for f in orngStat.classProbabilitiesFromRes(self.dres):
                v = int(round(f * self.maxp))
                reminder -= v
                if reminder < 0:
                    v = v+reminder
                self.defaultPerfLinePValues.append(v)
                self.pvalueList.append( v)

            self.targetClass = 0  # select first class as default target
            self.openContext("", self.dres)

            # Update target class and selected classifiers from
            # context settings
            self.target()
            self.classifiersSelectionChange()

        else:
            self.classifierColor = None
        self.performanceTabCosts.setEnabled(1)
        self.setDefaultPValues()
Beispiel #7
0
    # You can test different learning methods alone or all at once
    # by adjusting the simple array of methods here. Orange contains
    # many more methods. In particular, SVM has several variations,
    # such as different basis functions. You can set up many of these
    # and include them here or not.
    learners = [knn, svm, bayes, tree, forest, bs, bg]

    # Carry out the cross validation calculations with all of the different learning methods
    results = Orange.evaluation.testing.cross_validation(learners, data, folds=kFolds)

    # Compute statistics on the results and print out
    cm = orngStat.computeConfusionMatrices(results, class_index=data.domain.classVar.values.index(target))
    ma = orngFSS.attMeasure(data)
    t0 = orngStat.CA(results)

    roc = orngStat.splitByIterations(results)
    #print "shape of roc = ", np.shape(roc)

    stat = (('CA', 'CA(results)'),
            ('Sens', 'sens(cm)'),
            ('Spec', 'spec(cm)'),
            ('AUC', 'AUC(results)'),
            ('IS', 'IS(results)'),
            ('Brier', 'BrierScore(results)'))

    #----------------------------------------------------------------------------------------
    # Perform a permutation analysis to compute an empirical p-value. The
    # method implemented here is described in detail in:
    #
    #   Golland, P., and Fischl, B. (2003). Permutation tests for classification: towards
    #   statistical significance in image-based studies. Inf Process Med Imaging 18, 330-341.
        roc = "\\multirow{%i}{*}{\includegraphics[scale=0.3]{%s} }" % (learnersCount, os.path.join('..', directory, ROC_PLOT))
        first = False

    print "%s & %s & %5.3f & %5.3f & %i & %i & %i & %i & %5.3f & %5.3f & %5.3f & %s \\\\" % \
        (rowName, learners[index].name, CAs[index], APs[index], \
             CMs[index].TP, CMs[index].FP, CMs[index].FN, CMs[index].TN, \
             orngStat.sens(CMs[index]), orngStat.PPV(CMs[index]), orngStat.F1(CMs[index]), \
             roc)

minimalRows = 9
if (minimalRows - learnersCount) > 0:
    for _ in range(minimalRows - learnersCount):
        print ' &  &  &  &  &  &  &  &  &  &  &  \\\\'


iterations = orngStat.splitByIterations(result)
curves = []
for iteration in iterations:
    ROCs = orngStat.computeROC(iteration)
    for ROC in ROCs:
        curves.append(ROC)

for index in range(learnersCount):
    points = []
    for curve in range(index, len(curves), learnersCount):
        points.extend(curves[curve])
    x = map(lambda a: a[0], points)
    y = map(lambda a: a[1], points)

    mpl.plot(x, y, 'o', label=learners[index].name, alpha=0.5)
mpl.plot([0, 1], [0, 1], '--', color='grey')