Example #1
0
 def testEvaluatorRegular(self):
     noProb = loadAlignment("../support/ut_align_no_prob.a")
     noType = loadAlignment("../support/ut_align_no_type.a")
     certainAlign = [sentence["certain"] for sentence in noProb]
     from evaluator import evaluate
     correctAnswer = {
         "Precision": 1.0,
         "Recall": 1.0,
         "AER": 0.0,
         "F-score": 1.0
     }
     self.assertEqual(evaluate(certainAlign, noType), correctAnswer)
     return
Example #2
0
 def testEvaluatorWithType(self):
     original = loadAlignment("../support/ut_align_no_tag.a")
     clean = loadAlignment("../support/ut_align_no_tag_clean.a")
     cleanAll = \
         [sentence["certain"] + sentence["probable"] for sentence in clean]
     from evaluatorWithType import evaluate
     correctAnswer = {
         "Precision": 1.0,
         "Recall": 1.0,
         "AER": 0.0,
         "F-score": 1.0
     }
     self.assertEqual(evaluate(cleanAll, original), correctAnswer)
     return
    optparser = optparse.OptionParser()
    optparser.add_option("-v",
                         "--testSize",
                         dest="testSize",
                         default=1956,
                         type="int",
                         help="Number of sentences to use for testing")
    optparser.add_option("-r",
                         "--reference",
                         dest="reference",
                         default="",
                         help="Location of reference file")
    optparser.add_option("-a",
                         "--alignment",
                         dest="alignment",
                         default="",
                         help="Location of alignment file")
    (opts, _) = optparser.parse_args()

    if not opts.reference:
        logger.error("reference file missing")
    if not opts.alignment:
        logger.error("alignment file missing")

    alignment = loadAlignment(opts.alignment, opts.testSize)
    goldAlignment = loadAlignment(opts.reference, opts.testSize)

    testAlignment = [sentence["certain"] for sentence in alignment]

    evaluate(testAlignment, goldAlignment)
Example #4
0
            else:
                alignResult = resultAlignment

        if config['intersect'] is True:
            # Intersection is performed here.
            result = []
            for align, alignRev in zip(alignResult, alignResultRev):
                sentenceAlignment = []
                for item in align:
                    if len(item) == 2:
                        # Without alignment type
                        if (item[1], item[0]) in alignRev:
                            sentenceAlignment.append(item)
                    else:
                        # With alignment type
                        if (item[1], item[0], item[2]) in alignRev:
                            sentenceAlignment.append(item)
                result.append(sentenceAlignment)
            alignResult = result

        if config['output'] != "":
            exportToFile(alignResult, config['output'])

        if config['reference'] != "":
            reference = loadAlignment(config['reference'])
            if aligner.evaluate:
                aligner.evaluate(alignResult, reference, config['showFigure'])
        if config['showFigure'] > 0:
            from models.plot import showPlot
            showPlot()