Esempio n. 1
0
def test_eval_finder(document_store: BaseDocumentStore, reader):
    retriever = ElasticsearchRetriever(document_store=document_store)
    finder = Finder(reader=reader, retriever=retriever)

    # add eval data (SQUAD format)
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
    document_store.add_eval_data(filename="samples/squad/tiny.json", doc_index="test_eval_document", label_index="test_feedback")
    assert document_store.get_document_count(index="test_eval_document") == 2

    # eval finder
    results = finder.eval(label_index="test_feedback", doc_index="test_eval_document", top_k_retriever=1, top_k_reader=5)
    assert results["retriever_recall"] == 1.0
    assert results["retriever_map"] == 1.0
    assert abs(results["reader_topk_f1"] - 0.66666) < 0.001
    assert abs(results["reader_topk_em"] - 0.5) < 0.001
    assert abs(results["reader_topk_accuracy"] - 1) < 0.001
    assert results["reader_top1_f1"] <= results["reader_topk_f1"]
    assert results["reader_top1_em"] <= results["reader_topk_em"]
    assert results["reader_top1_accuracy"] <= results["reader_topk_accuracy"]

    # batch eval finder
    results_batch = finder.eval_batch(label_index="test_feedback", doc_index="test_eval_document", top_k_retriever=1,
                          top_k_reader=5)
    assert results_batch["retriever_recall"] == 1.0
    assert results_batch["retriever_map"] == 1.0
    assert results_batch["reader_top1_f1"] == results["reader_top1_f1"]
    assert results_batch["reader_top1_em"] == results["reader_top1_em"]
    assert results_batch["reader_topk_accuracy"] == results["reader_topk_accuracy"]

    # clean up
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
Esempio n. 2
0
finder = Finder(reader, retriever)


## Evaluate Retriever on its own
if eval_retriever_only:
    retriever_eval_results = retriever.eval()
    ## Retriever Recall is the proportion of questions for which the correct document containing the answer is
    ## among the correct documents
    print("Retriever Recall:", retriever_eval_results["recall"])
    ## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank
    print("Retriever Mean Avg Precision:", retriever_eval_results["map"])

# Evaluate Reader on its own
if eval_reader_only:
    reader_eval_results = reader.eval(document_store=document_store, device=device)
    # Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
    #reader_eval_results = reader.eval_on_file("../data/natural_questions", "dev_subset.json", device=device)

    ## Reader Top-N-Recall is the proportion of predicted answers that overlap with their corresponding correct answer
    print("Reader Top-N-Recall:", reader_eval_results["top_n_recall"])
    ## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer
    print("Reader Exact Match:", reader_eval_results["EM"])
    ## Reader F1-Score is the average overlap between the predicted answers and the correct answers
    print("Reader F1-Score:", reader_eval_results["f1"])


# Evaluate combination of Reader and Retriever through Finder
if eval_both:
    finder_eval_results = finder.eval(top_k_retriever = 10, top_k_reader = 10)
    finder.print_eval_results(finder_eval_results)
Esempio n. 3
0
finder = Finder(reader, retriever)


## Evaluate Retriever on its own
if eval_retriever_only:
    retriever_eval_results = retriever.eval(top_k=1, label_index=label_index, doc_index=doc_index)
    ## Retriever Recall is the proportion of questions for which the correct document containing the answer is
    ## among the correct documents
    print("Retriever Recall:", retriever_eval_results["recall"])
    ## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank
    print("Retriever Mean Avg Precision:", retriever_eval_results["map"])

# Evaluate Reader on its own
if eval_reader_only:
    reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index)
    # Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
    #reader_eval_results = reader.eval_on_file("../data/nq", "nq_dev_subset_v2.json", device=device)

    ## Reader Top-N-Accuracy is the proportion of predicted answers that match with their corresponding correct answer
    print("Reader Top-N-Accuracy:", reader_eval_results["top_n_accuracy"])
    ## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer
    print("Reader Exact Match:", reader_eval_results["EM"])
    ## Reader F1-Score is the average overlap between the predicted answers and the correct answers
    print("Reader F1-Score:", reader_eval_results["f1"])


# Evaluate combination of Reader and Retriever through Finder
if eval_both:
    finder_eval_results = finder.eval(top_k_retriever=1, top_k_reader=10, label_index=label_index, doc_index=doc_index)
    finder.print_eval_results(finder_eval_results)
Esempio n. 4
0
print("Retriever Mean Avg Precision:", retriever_eval_results["map"])

# Evaluate Reader on its own
reader_eval_results = reader.eval(document_store=document_store, device=device)
# Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
#reader_eval_results = reader.eval_on_file("../data/natural_questions", "dev_subset.json", device=device)

## Reader Top-N-Recall is the proportion of predicted answers that overlap with their corresponding correct answer
print("Reader Top-N-Recall:", reader_eval_results["top_n_recall"])
## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer
print("Reader Exact Match:", reader_eval_results["EM"])
## Reader F1-Score is the average overlap between the predicted answers and the correct answers
print("Reader F1-Score:", reader_eval_results["f1"])

# Evaluate combination of Reader and Retriever through Finder
finder_eval_results = finder.eval()

print("\n___Retriever Metrics in Finder___")
print("Retriever Recall:", finder_eval_results["retriever_recall"])
print("Retriever Mean Avg Precision:", finder_eval_results["retriever_map"])

# Reader is only evaluated with those questions, where the correct document is among the retrieved ones
print("\n___Reader Metrics in Finder___")
print("Reader Top-1 accuracy:", finder_eval_results["reader_top1_accuracy"])
print("Reader Top-1 accuracy (has answer):",
      finder_eval_results["reader_top1_accuracy_has_answer"])
print("Reader Top-k accuracy:", finder_eval_results["reader_top_k_accuracy"])
print("Reader Top-k accuracy (has answer):",
      finder_eval_results["reader_topk_accuracy_has_answer"])
print("Reader Top-1 EM:", finder_eval_results["reader_top1_em"])
print("Reader Top-1 EM (has answer):",