def score(system_conllu_file, gold_conllu_file): """ Wrapper for lemma scorer. """ gold_ud = ud_eval.load_conllu_file(gold_conllu_file) system_ud = ud_eval.load_conllu_file(system_conllu_file) evaluation = ud_eval.evaluate(gold_ud, system_ud) el = evaluation["Lemmas"] p, r, f = el.precision, el.recall, el.f1 return p, r, f
def run_eval_script(gold_conllu_file, system_conllu_file, evals=None): """ Wrapper for lemma scorer. """ gold_ud = ud_eval.load_conllu_file(gold_conllu_file) system_ud = ud_eval.load_conllu_file(system_conllu_file) evaluation = ud_eval.evaluate(gold_ud, system_ud) if evals is None: return ud_eval.build_evaluation_table(evaluation, verbose=True, counts=False) else: results = [evaluation[key].f1 for key in evals] return " ".join("{:.2f}".format(100 * x) for x in results)
def ud_scores(gold_conllu_file, system_conllu_file): gold_ud = ud_eval.load_conllu_file(gold_conllu_file) system_ud = ud_eval.load_conllu_file(system_conllu_file) evaluation = ud_eval.evaluate(gold_ud, system_ud) return evaluation