Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description="Anavec test and evaluation script", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--referencefile', type=str, help="Reference list (particular format)", action='store',required=True)
    setup_argparser(parser) #for anavec
    args = parser.parse_args()


    references = []
    alltestwords = []
    allmask = []
    correct = 0
    with open(args.referencefile, 'r',encoding='utf-8') as f:
        windowmask = [InputTokenState.CORRECT]*2 + [InputTokenState.CORRECTABLE] + [InputTokenState.CORRECT, InputTokenState.CORRECT | InputTokenState.EOL ]
        for line in f:
            fields = line.strip().split('#')
            error = fields[0]
            correction = fields[1]
            context = fields[2].split(' ')
            windowwords = context[:2] + [error] + context[2:]
            alltestwords += windowwords
            allmask += windowmask
            references.append( (windowwords, windowmask, correction) )

    l =  len(references)
    print("Read " + str(l) + " reference instances",file=sys.stderr)

    assert len(alltestwords) == len(allmask)

    corrector = Corrector(**vars(args))

    observations = []
    goals = []
    for result, (testwords, mask, reference) in zip(corrector.correct(alltestwords, allmask), references):
        output = list(sorted(result['candidatetree'][2][1], key=lambda x: -1 * x.score))
        if output:
            if reference == output[0].text:
                match = "BEST"
            elif reference in [c.text for c in output]:
                match = "YES"
            else:
                match = "NO"
            print(match + "\t[" + reference + "]\t" +  testwords[2] + "\t-->\t" + "\t".join([ c.text + "["+str(c.score)+"]" for c in output]))
            output = output[0].text
        else:
            output = ""
            print("NO\t[" + reference + "]\t" + testwords[2] + "\t-->\tNO-SUGGESTIONS! [0.0]")
        observations.append(output)
        goals.append(reference)

    evaluation = ClassEvaluation(goals, observations)
    print("# Precision = ", evaluation.precision())
    print("# Recall = ", evaluation.recall())
Esempio n. 2
0
 def test001(self):
     e = ClassEvaluation(self.goals, self.observations)
     print
     print e
     print e.confusionmatrix()
Esempio n. 3
0
 def test001(self):
     e = ClassEvaluation(self.goals, self.observations)
     print()
     print(e)
     print(e.confusionmatrix())
Esempio n. 4
0
    def test001(self):
        """Class evaluation test -- (See also http://en.wikipedia.org/wiki/Confusion_matrix , using same data)"""
        e = ClassEvaluation(self.goals, self.observations)

        print
        print(e)
        print(e.confusionmatrix())

        self.assertEqual(e.tp['cat'], 5)
        self.assertEqual(e.fp['cat'], 2)
        self.assertEqual(e.tn['cat'], 17)
        self.assertEqual(e.fn['cat'], 3)

        self.assertEqual(e.tp['rabbit'], 11)
        self.assertEqual(e.fp['rabbit'], 1)
        self.assertEqual(e.tn['rabbit'], 13)
        self.assertEqual(e.fn['rabbit'], 2)

        self.assertEqual(e.tp['dog'], 3)
        self.assertEqual(e.fp['dog'], 5)
        self.assertEqual(e.tn['dog'], 16)
        self.assertEqual(e.fn['dog'], 3)

        self.assertEqual(round(e.precision('cat'), 6), 0.714286)
        self.assertEqual(round(e.precision('rabbit'), 6), 0.916667)
        self.assertEqual(round(e.precision('dog'), 6), 0.375000)

        self.assertEqual(round(e.recall('cat'), 6), 0.625000)
        self.assertEqual(round(e.recall('rabbit'), 6), 0.846154)
        self.assertEqual(round(e.recall('dog'), 6), 0.500000)

        self.assertEqual(round(e.fscore('cat'), 6), 0.666667)
        self.assertEqual(round(e.fscore('rabbit'), 6), 0.880000)
        self.assertEqual(round(e.fscore('dog'), 6), 0.428571)

        self.assertEqual(round(e.accuracy(), 6), 0.703704)
Esempio n. 5
0
    def test001(self):
        """Class evaluation test -- (See also http://en.wikipedia.org/wiki/Confusion_matrix , using same data)"""
        e = ClassEvaluation(self.goals, self.observations)
        
        print
        print(e)
        print(e.confusionmatrix())
    
                
        self.assertEqual(e.tp['cat'], 5)
        self.assertEqual(e.fp['cat'], 2)
        self.assertEqual(e.tn['cat'], 17)
        self.assertEqual(e.fn['cat'], 3)
        
        self.assertEqual(e.tp['rabbit'], 11)
        self.assertEqual(e.fp['rabbit'], 1)
        self.assertEqual(e.tn['rabbit'], 13)
        self.assertEqual(e.fn['rabbit'], 2)
        
        self.assertEqual(e.tp['dog'], 3)
        self.assertEqual(e.fp['dog'], 5)
        self.assertEqual(e.tn['dog'], 16)
        self.assertEqual(e.fn['dog'], 3)
        
        self.assertEqual( round(e.precision('cat'),6), 0.714286)
        self.assertEqual( round(e.precision('rabbit'),6), 0.916667)
        self.assertEqual( round(e.precision('dog'),6), 0.375000)

        self.assertEqual( round(e.recall('cat'),6), 0.625000)
        self.assertEqual( round(e.recall('rabbit'),6), 0.846154)
        self.assertEqual( round(e.recall('dog'),6),0.500000)

        self.assertEqual( round(e.fscore('cat'),6), 0.666667)
        self.assertEqual( round(e.fscore('rabbit'),6), 0.880000)
        self.assertEqual( round(e.fscore('dog'),6),0.428571)

        self.assertEqual( round(e.accuracy(),6), 0.703704)