Exemple #1
0
print("###    Obtained Scores    ###")
print("### (full dataset, top 15)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall    : %.4f" % recall_top)
print("### F1        : %.4f" % f1_top)
print("###                       ###")

if DATASET == Semeval2017:
    print("### SEMEVAL ANNOTATOR ###")
    print("###        All        ###")

    from eval import anno_generator

    anno_generator.write_anno("/tmp/simplernn", test_doc_str, obtained_words)
    from data.Semeval2017 import eval

    eval.calculateMeasures("data/Semeval2017/test",
                           "/tmp/simplernn-all",
                           remove_anno=["types"])

    print("###     Filtered      ###")
    anno_generator.write_anno("/tmp/simplernn-clean", test_doc_str,
                              clean_words)
    eval.calculateMeasures("data/Semeval2017/test",
                           "/tmp/simplernn-clean",
                           remove_anno=["types"])

    print("###      Top 15       ###")
    anno_generator.write_anno("/tmp/simplernn-15", test_doc_str,
Exemple #2
0
keras_f1 = keras_metrics.keras_f1(test_y,output)

print("###    Obtained Scores    ###")
print("###    (fixed dataset)    ###")
print("###")
print("### Precision : %.4f" % keras_precision)
print("### Recall    : %.4f" % keras_recall)
print("### F1        : %.4f" % keras_f1)
print("###                       ###")

clean_words = postprocessing.get_valid_patterns(obtained_words)

precision = metrics.precision(test_answer,clean_words)
recall = metrics.recall(test_answer,clean_words)
f1 = metrics.f1(precision,recall)

print("###    Obtained Scores    ###")
print("### (full dataset,        ###")
print("###  pos patterns filter) ###")
print("###")
print("### Precision : %.4f" % precision)
print("### Recall    : %.4f" % recall)
print("### F1        : %.4f" % f1)
print("###                       ###")

if DATASET == Semeval2017:
    from eval import anno_generator
    anno_generator.write_anno("/tmp/simplernn",test_doc_str,obtained_words)
    from data.Semeval2017 import eval
    eval.calculateMeasures("data/Semeval2017/test","/tmp/simplernn",remove_anno=["types"])
print("###    Obtained Scores    ###")
print("### (full dataset, top 10)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall    : %.4f" % recall_top)
print("### F1        : %.4f" % f1_top)
print("###                       ###")

obtained_words_top = postprocessing.get_top_words(test_doc, output, 15)

precision_top = metrics.precision(test_answer, obtained_words_top, STEM_MODE)
recall_top = metrics.recall(test_answer, obtained_words_top, STEM_MODE)
f1_top = metrics.f1(precision_top, recall_top)

print("###    Obtained Scores    ###")
print("### (full dataset, top 15)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall    : %.4f" % recall_top)
print("### F1        : %.4f" % f1_top)
print("###                       ###")

if DATASET == Semeval2017:
    from eval import anno_generator
    anno_generator.write_anno("/tmp/mergernn2", test_doc_str, clean_words)
    from data.Semeval2017 import eval
    eval.calculateMeasures("data/Semeval2017/test",
                           "/tmp/simplernn",
                           remove_anno=["types"])