コード例 #1
0
ファイル: utils.py プロジェクト: LouisGerard/Eisner
def score_las(filename_test, filename_gold):
    score = 0
    with open(filename_test, 'r') as ftest, \
            open(filename_gold, 'r') as fgold:
        test = las.load_conllu(ftest)
        gold = las.load_conllu(fgold)
        score = las.evaluate(gold, test)['LAS'].f1
    return score
コード例 #2
0
def evaluate(nlp, text_loc, gold_loc, sys_loc, limit=None):
    with text_loc.open("r", encoding="utf8") as text_file:
        texts = split_text(text_file.read())
        docs = list(nlp.pipe(texts))
    with sys_loc.open("w", encoding="utf8") as out_file:
        write_conllu(docs, out_file)
    with gold_loc.open("r", encoding="utf8") as gold_file:
        gold_ud = conll17_ud_eval.load_conllu(gold_file)
        with sys_loc.open("r", encoding="utf8") as sys_file:
            sys_ud = conll17_ud_eval.load_conllu(sys_file)
        scores = conll17_ud_eval.evaluate(gold_ud, sys_ud)
    return scores
コード例 #3
0
ファイル: conllu.py プロジェクト: spacy-io/spaCy
def evaluate(nlp, text_loc, gold_loc, sys_loc, limit=None):
    with text_loc.open("r", encoding="utf8") as text_file:
        texts = split_text(text_file.read())
        docs = list(nlp.pipe(texts))
    with sys_loc.open("w", encoding="utf8") as out_file:
        write_conllu(docs, out_file)
    with gold_loc.open("r", encoding="utf8") as gold_file:
        gold_ud = conll17_ud_eval.load_conllu(gold_file)
        with sys_loc.open("r", encoding="utf8") as sys_file:
            sys_ud = conll17_ud_eval.load_conllu(sys_file)
        scores = conll17_ud_eval.evaluate(gold_ud, sys_ud)
    return scores
コード例 #4
0
ファイル: run_eval.py プロジェクト: monasaad/CAPEsFinal
def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language):
    """" Fetch the txt files for all treebanks for a given set of languages """
    all_treebanks = dict()
    treebank_size = dict()
    for l in languages:
        all_treebanks[l] = []
        treebank_size[l] = 0

    for treebank_dir in ud_dir.iterdir():
        if treebank_dir.is_dir():
            for txt_path in treebank_dir.iterdir():
                if txt_path.name.endswith('-ud-' + corpus + '.txt'):
                    file_lang = txt_path.name.split('_')[0]
                    if file_lang in languages:
                        gold_path = treebank_dir / txt_path.name.replace('.txt', '.conllu')
                        stats_xml = treebank_dir / "stats.xml"
                        # ignore treebanks where the texts are not publicly available
                        if not _contains_blinded_text(stats_xml):
                            if not best_per_language:
                                all_treebanks[file_lang].append(txt_path)
                            # check the tokens in the gold annotation to keep only the biggest treebank per language
                            else:
                                with gold_path.open(mode='r', encoding='utf-8') as gold_file:
                                    gold_ud = conll17_ud_eval.load_conllu(gold_file)
                                    gold_tokens = len(gold_ud.tokens)
                                if treebank_size[file_lang] < gold_tokens:
                                    all_treebanks[file_lang] = [txt_path]
                                    treebank_size[file_lang] = gold_tokens

    return all_treebanks
コード例 #5
0
ファイル: run_eval.py プロジェクト: monasaad/CAPEsFinal
def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks):
    """" Run an evaluation for each language with its specified models and treebanks """
    print_header = True

    for tb_lang, treebank_list in treebanks.items():
        print()
        print("Language", tb_lang)
        for text_path in treebank_list:
            print(" Evaluating on", text_path)

            gold_path = text_path.parent / (text_path.stem + '.conllu')
            print("  Gold data from ", gold_path)

            # nested try blocks to ensure the code can continue with the next iteration after a failure
            try:
                with gold_path.open(mode='r', encoding='utf-8') as gold_file:
                    gold_ud = conll17_ud_eval.load_conllu(gold_file)

                for nlp, nlp_loading_time, nlp_name in models[tb_lang]:
                    try:
                        print("   Benchmarking", nlp_name)
                        tmp_output_path = text_path.parent / str('tmp_' + nlp_name + '.conllu')
                        run_single_eval(nlp, nlp_loading_time, nlp_name, text_path, gold_ud, tmp_output_path, out_file,
                                        print_header, check_parse, print_freq_tasks)
                        print_header = False
                    except Exception as e:
                        print("    Ran into trouble: ", str(e))
            except Exception as e:
                print("   Ran into trouble: ", str(e))
コード例 #6
0
def benchmark_model(model_name, test_data_path, ner_test_data):
    with open(test_data_path) as f:
        data = conllu.parse(f.read())
        text = " ".join(d.metadata["text"] for d in data)

    load_model = getattr(importlib.import_module(model_name), "load")
    nlp = load_model()

    _parsed = StringIO(format_as_conllu(nlp(text), 1))
    parsed = conll17_ud_eval.load_conllu(_parsed)
    gold = conll17_ud_eval.load_conllu_file(test_data_path)

    results = pd.DataFrame({
        k: v.__dict__
        for k, v in conll17_ud_eval.evaluate(gold, parsed).items()
    }).T

    print(results)

    diterator = DataIterator()
    test_sents = list(
        itertools.islice(diterator.tagged_sentences(ner_test_data), None))
    scorer = Scorer()
    for sentence, annot in test_sents:
        doc_gold_text = nlp.make_doc(sentence)
        gold = GoldParse(doc_gold_text, entities=annot)
        predicted = nlp(sentence)
        scorer.score(predicted, gold)

    print(scorer.scores)
コード例 #7
0
ファイル: ud_train.py プロジェクト: ritchieleeann/spaCy
def evaluate(nlp, text_loc, gold_loc, sys_loc, limit=None):
    if text_loc.parts[-1].endswith(".conllu"):
        docs = []
        with text_loc.open() as file_:
            for conllu_doc in read_conllu(file_):
                for conllu_sent in conllu_doc:
                    words = [line[1] for line in conllu_sent]
                    docs.append(Doc(nlp.vocab, words=words))
        for name, component in nlp.pipeline:
            docs = list(component.pipe(docs))
    else:
        with text_loc.open("r", encoding="utf8") as text_file:
            texts = split_text(text_file.read())
            docs = list(nlp.pipe(texts))
    with sys_loc.open("w", encoding="utf8") as out_file:
        write_conllu(docs, out_file)
    with gold_loc.open("r", encoding="utf8") as gold_file:
        gold_ud = conll17_ud_eval.load_conllu(gold_file)
        with sys_loc.open("r", encoding="utf8") as sys_file:
            sys_ud = conll17_ud_eval.load_conllu(sys_file)
        scores = conll17_ud_eval.evaluate(gold_ud, sys_ud)
    return docs, scores
コード例 #8
0
def benchmark_model(model_name, test_data_path):
    with open(test_data_path) as f:
        data = conllu.parse(f.read())
        text = " ".join(d.metadata["text"] for d in data)

    load_model = getattr(importlib.import_module(model_name), "load")
    nlp = load_model()

    _parsed = StringIO(format_as_conllu(nlp(text), 1))
    parsed = conll17_ud_eval.load_conllu(_parsed)
    gold = conll17_ud_eval.load_conllu_file(test_data_path)

    results = pd.DataFrame({
        k: v.__dict__
        for k, v in conll17_ud_eval.evaluate(gold, parsed).items()
    }).T

    print(results)
コード例 #9
0
ファイル: run_eval.py プロジェクト: monasaad/CAPEsFinal
def run_single_eval(nlp, loading_time, print_name, text_path, gold_ud, tmp_output_path, out_file, print_header,
                    check_parse, print_freq_tasks):
    """" Run an evaluation of a model nlp on a certain specified treebank """
    with text_path.open(mode='r', encoding='utf-8') as f:
        flat_text = f.read()

    # STEP 1: tokenize text
    tokenization_start = time.time()
    texts = split_text(flat_text)
    docs = list(nlp.pipe(texts))
    tokenization_end = time.time()
    tokenization_time = tokenization_end - tokenization_start

    # STEP 2: record stats and timings
    tokens_per_s = int(len(gold_ud.tokens) / tokenization_time)

    print_header_1 = ['date', 'text_path', 'gold_tokens', 'model', 'loading_time', 'tokenization_time', 'tokens_per_s']
    print_string_1 = [str(datetime.date.today()), text_path.name, len(gold_ud.tokens),
                      print_name, "%.2f" % loading_time, "%.2f" % tokenization_time, tokens_per_s]

    # STEP 3: evaluate predicted tokens and features
    with tmp_output_path.open(mode="w", encoding="utf8") as tmp_out_file:
        write_conllu(docs, tmp_out_file)
    with tmp_output_path.open(mode="r", encoding="utf8") as sys_file:
        sys_ud = conll17_ud_eval.load_conllu(sys_file, check_parse=check_parse)
    tmp_output_path.unlink()
    scores = conll17_ud_eval.evaluate(gold_ud, sys_ud, check_parse=check_parse)

    # STEP 4: format the scoring results
    eval_headers = EVAL_PARSE
    if not check_parse:
        eval_headers = EVAL_NO_PARSE

    for score_name in eval_headers:
        score = scores[score_name]
        print_string_1.extend(["%.2f" % score.precision,
                               "%.2f" % score.recall,
                               "%.2f" % score.f1])
        print_string_1.append("-" if score.aligned_accuracy is None else "%.2f" % score.aligned_accuracy)
        print_string_1.append("-" if score.undersegmented is None else "%.4f" % score.under_perc)
        print_string_1.append("-" if score.oversegmented is None else "%.4f" % score.over_perc)

        print_header_1.extend([score_name + '_p', score_name + '_r', score_name + '_F', score_name + '_acc',
                               score_name + '_under', score_name + '_over'])

        if score_name in print_freq_tasks:
            print_header_1.extend([score_name + '_word_under_ex', score_name + '_shape_under_ex',
                                   score_name + '_word_over_ex', score_name + '_shape_over_ex'])

            d_under_words = get_freq_tuples(score.undersegmented, PRINT_TOTAL)
            d_under_shapes = get_freq_tuples([word_shape(x) for x in score.undersegmented], PRINT_TOTAL)
            d_over_words = get_freq_tuples(score.oversegmented, PRINT_TOTAL)
            d_over_shapes = get_freq_tuples([word_shape(x) for x in score.oversegmented], PRINT_TOTAL)

            # saving to CSV with ; seperator so blinding ; in the example output
            print_string_1.append(
                str({k: v for k, v in d_under_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
            print_string_1.append(
                str({k: v for k, v in d_under_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
            print_string_1.append(
                str({k: v for k, v in d_over_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
            print_string_1.append(
                str({k: v for k, v in d_over_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))

    # STEP 5: print the formatted results to CSV
    if print_header:
        out_file.write(';'.join(map(str, print_header_1)) + '\n')
    out_file.write(';'.join(map(str, print_string_1)) + '\n')