def test_eval_finder(document_store: BaseDocumentStore, reader): retriever = ElasticsearchRetriever(document_store=document_store) finder = Finder(reader=reader, retriever=retriever) # add eval data (SQUAD format) document_store.delete_all_documents(index="test_eval_document") document_store.delete_all_documents(index="test_feedback") document_store.add_eval_data(filename="samples/squad/tiny.json", doc_index="test_eval_document", label_index="test_feedback") assert document_store.get_document_count(index="test_eval_document") == 2 # eval finder results = finder.eval(label_index="test_feedback", doc_index="test_eval_document", top_k_retriever=1, top_k_reader=5) assert results["retriever_recall"] == 1.0 assert results["retriever_map"] == 1.0 assert abs(results["reader_topk_f1"] - 0.66666) < 0.001 assert abs(results["reader_topk_em"] - 0.5) < 0.001 assert abs(results["reader_topk_accuracy"] - 1) < 0.001 assert results["reader_top1_f1"] <= results["reader_topk_f1"] assert results["reader_top1_em"] <= results["reader_topk_em"] assert results["reader_top1_accuracy"] <= results["reader_topk_accuracy"] # batch eval finder results_batch = finder.eval_batch(label_index="test_feedback", doc_index="test_eval_document", top_k_retriever=1, top_k_reader=5) assert results_batch["retriever_recall"] == 1.0 assert results_batch["retriever_map"] == 1.0 assert results_batch["reader_top1_f1"] == results["reader_top1_f1"] assert results_batch["reader_top1_em"] == results["reader_top1_em"] assert results_batch["reader_topk_accuracy"] == results["reader_topk_accuracy"] # clean up document_store.delete_all_documents(index="test_eval_document") document_store.delete_all_documents(index="test_feedback")
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document", create_index=False) # Add evaluation data to Elasticsearch database if LAUNCH_ELASTICSEARCH: document_store.add_eval_data("../data/nq/nq_dev_subset.json") else: logger.warning("Since we already have a running ES instance we should not index the same documents again." "If you still want to do this call: 'document_store.add_eval_data('../data/nq/nq_dev_subset.json')' manually ") # Initialize Retriever retriever = ElasticsearchRetriever(document_store=document_store) # Initialize Reader reader = FARMReader("deepset/roberta-base-squad2") # Initialize Finder which sticks together Reader and Retriever finder = Finder(reader, retriever) ## Evaluate Retriever on its own if eval_retriever_only: retriever_eval_results = retriever.eval() ## Retriever Recall is the proportion of questions for which the correct document containing the answer is ## among the correct documents print("Retriever Recall:", retriever_eval_results["recall"]) ## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank print("Retriever Mean Avg Precision:", retriever_eval_results["map"]) # Evaluate Reader on its own if eval_reader_only: reader_eval_results = reader.eval(document_store=document_store, device=device) # Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
# Alternative: Evaluate DensePassageRetriever # Note, that DPR works best when you index short passages < 512 tokens as only those tokens will be used for the embedding. # Here, for nq_dev_subset_v2.json we have avg. num of tokens = 5220(!). # DPR still outperforms Elastic's BM25 by a small margin here. # from haystack.retriever.dense import DensePassageRetriever # retriever = DensePassageRetriever(document_store=document_store, embedding_model="dpr-bert-base-nq",batch_size=32) # document_store.update_embeddings(retriever, index="eval_document") # Initialize Reader reader = FARMReader("deepset/roberta-base-squad2") # Initialize Finder which sticks together Reader and Retriever finder = Finder(reader, retriever) ## Evaluate Retriever on its own if eval_retriever_only: retriever_eval_results = retriever.eval(top_k=1, label_index=label_index, doc_index=doc_index) ## Retriever Recall is the proportion of questions for which the correct document containing the answer is ## among the correct documents print("Retriever Recall:", retriever_eval_results["recall"]) ## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank print("Retriever Mean Avg Precision:", retriever_eval_results["map"]) # Evaluate Reader on its own if eval_reader_only: reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index) # Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document", create_index=False) # Add evaluation data to Elasticsearch database document_store.add_eval_data("../data/nq/nq_dev_subset.json") # Initialize Retriever retriever = ElasticsearchRetriever(document_store=document_store) # Initialize Reader reader = FARMReader("deepset/roberta-base-squad2") # Initialize Finder which sticks together Reader and Retriever finder = Finder(reader, retriever) # Evaluate Retriever on its own retriever_eval_results = retriever.eval() ## Retriever Recall is the proportion of questions for which the correct document containing the answer is ## among the correct documents print("Retriever Recall:", retriever_eval_results["recall"]) ## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank print("Retriever Mean Avg Precision:", retriever_eval_results["map"]) # Evaluate Reader on its own reader_eval_results = reader.eval(document_store=document_store, device=device) # Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch #reader_eval_results = reader.eval_on_file("../data/natural_questions", "dev_subset.json", device=device) ## Reader Top-N-Recall is the proportion of predicted answers that overlap with their corresponding correct answer