Example #1
0
def nipsEvalFine():
    '''
    takes the data from nipsBuild() and runs it through a set of classifiers
    in order to see how well the FEs can classify a piece by year, giving
    an error value to misjudged years.

    Coarse evaluations worked much better
    '''
    data1 = orange.ExampleTable('d:/desktop/year1.tab')
    data2 = orange.ExampleTable('d:/desktop/year2.tab')

    learners = {}
    learners['maj'] = orange.MajorityLearner
    learners['bayes'] = orange.BayesLearner
    learners['tree'] = orngTree.TreeLearner
    learners['knn'] = orange.kNNLearner

    for cName in learners.keys():
        cType = learners[cName]
        for cData, cStr, matchData, matchStr in [
                                                 (data1, 'file1', data2, 'file2'),
                                                 (data2, 'file2', data1, 'file1'),
                                                 ]:
            # train with data1
            classifier = cType(cData)
            mismatch = []
            for i in range(len(matchData)):
                c = classifier(matchData[i])
                mismatch.append(c - int(matchData[i].getclass()))
            stdDev = common.standardDeviation(mismatch)
            print('%s %s: std. deviation %f on %s' % (cStr, cName, stdDev, matchStr))
Example #2
0
def nipsEvalFine():
    '''
    takes the data from nipsBuild() and runs it through a set of classifiers
    in order to see how well the FEs can classify a piece by year, giving
    an error value to misjudged years.
    
    Coarse evaluations worked much better
    '''
    data1 = orange.ExampleTable('d:/desktop/year1.tab')
    data2 = orange.ExampleTable('d:/desktop/year2.tab')
    
    learners = {}
    learners['maj'] = orange.MajorityLearner
    learners['bayes'] = orange.BayesLearner
    learners['tree'] = orngTree.TreeLearner
    learners['knn'] = orange.kNNLearner
    
    for cName in learners.keys():
        cType = learners[cName]
        for cData, cStr, matchData, matchStr in [
                                                 (data1, 'file1', data2, 'file2'),
                                                 (data2, 'file2', data1, 'file1'),
                                                 ]:
            # train with data1
            classifier = cType(cData)
            mismatch = []
            for i in range(len(matchData)):
                c = classifier(matchData[i])
                mismatch.append(c - int(matchData[i].getclass()))
            stdDev = common.standardDeviation(mismatch)
            print('%s %s: std. deviation %f on %s' % (cStr, cName, stdDev, matchStr))
Example #3
0
    def _tonalCertainityCorrelationCoefficient(self, *args, **keywords):
        # possible measures:
        if self.alternateInterpretations is None or len(self.alternateInterpretations) == 0:
            raise KeySignatureException('cannot process ambiguity without a list of .alternateInterpretations')
        focus = []
        focus.append(self.correlationCoefficient)
        for subKey in self.alternateInterpretations:
            cc = subKey.correlationCoefficient
            if cc > 0:
                focus.append(cc)
#         print focus
#         print

        # take abs magnitude as one factor; assume between 0 and 1
        # greater certainty often has a larger number
        absMagnitude = focus[0] 

        # take distance from first to second; greater certainty
        # seems to have a greater span
        leaderSpan = focus[0] - focus[1]

        # take average of all non-negative values
        meanMagnitude = sum(focus) / float(len(focus))

        # standard deviation of all non-neg values
        standardDeviation = common.standardDeviation(focus, bassel=False)

        environLocal.printDebug(['absMagnitude', absMagnitude, 'leaderSpan', leaderSpan, 'meanMagnitude', meanMagnitude, 'standardDeviation', standardDeviation])

        # combine factors with a weighting for each 
        # estimate range as 2, normalize between zero and 1
        return (absMagnitude * 1) + (leaderSpan * 2)
Example #4
0
    def _tonalCertainityCorrelationCoefficient(self, *args, **keywords):
        # possible measures:
        if self.alternateInterpretations is None or len(self.alternateInterpretations) == 0:
            raise KeySignatureException('cannot process ambiguity without a list of .alternateInterpretations')
        focus = []
        focus.append(self.correlationCoefficient)
        for subKey in self.alternateInterpretations:
            cc = subKey.correlationCoefficient
            if cc > 0:
                focus.append(cc)
#         print focus
#         print

        # take abs magnitude as one factor; assume between 0 and 1
        # greater certainty often has a larger number
        absMagnitude = focus[0] 

        # take distance from first to second; greater certainty
        # seems to have a greater span
        leaderSpan = focus[0] - focus[1]

        # take average of all non-negative values
        meanMagnitude = sum(focus) / float(len(focus))

        # standard deviation of all non-neg values
        standardDeviation = common.standardDeviation(focus, bassel=False)

        environLocal.printDebug(['absMagnitude', absMagnitude, 'leaderSpan', leaderSpan, 'meanMagnitude', meanMagnitude, 'standardDeviation', standardDeviation])

        # combine factors with a weighting for each 
        # estimate range as 2, normalize between zero and 1
        return (absMagnitude * 1) + (leaderSpan * 2)