示例#1
0
 def testROUGEScorer(self):
     refs = ["Hello world !", "How is it going ?"]
     scorer = scorers.ROUGEScorer()
     score = self._run_scorer(scorer, refs, refs)
     self.assertIsInstance(score, dict)
     self.assertIn("rouge-l", score)
     self.assertIn("rouge-1", score)
     self.assertIn("rouge-2", score)
     self.assertAlmostEqual(1.0, score["rouge-1"])
示例#2
0
 def testROUGEScorer(self):
     rouge_scorer = scorers.ROUGEScorer()
     ref_path, hyp_path = self._make_perfect_hypothesis_file()
     score = rouge_scorer(ref_path, hyp_path)
     self.assertIsInstance(score, dict)
     self.assertIn("rouge-l", score)
     self.assertIn("rouge-1", score)
     self.assertIn("rouge-2", score)
     self.assertAlmostEqual(1.0, score["rouge-1"])
示例#3
0
 def testEvaluationWithRougeScorer(self):
     features_file = os.path.join(self.get_temp_dir(), "features.txt")
     labels_file = os.path.join(self.get_temp_dir(), "labels.txt")
     model_dir = self.get_temp_dir()
     with open(features_file, "w") as features, open(labels_file,
                                                     "w") as labels:
         features.write("1\n2\n")
         labels.write("1\n2\n")
     model = TestModel()
     evaluator = evaluation.Evaluator(model,
                                      features_file,
                                      labels_file,
                                      batch_size=1,
                                      scorers=[scorers.ROUGEScorer()],
                                      model_dir=model_dir)
     self.assertNotIn("rouge", evaluator.metrics_name)
     self.assertIn("rouge-1", evaluator.metrics_name)
     self.assertIn("rouge-2", evaluator.metrics_name)
     self.assertIn("rouge-l", evaluator.metrics_name)