def test_eval_pipeline(document_store: BaseDocumentStore, reader, retriever):
    # add eval data (SQUAD format)
    document_store.add_eval_data(
        filename="samples/squad/tiny.json",
        doc_index="haystack_test_eval_document",
        label_index="haystack_test_feedback",
    )

    labels = document_store.get_all_labels_aggregated(
        index="haystack_test_feedback")
    q_to_l_dict = {l.question: {"retriever": l, "reader": l} for l in labels}

    eval_retriever = EvalRetriever()
    eval_reader = EvalReader()

    assert document_store.get_document_count(
        index="haystack_test_eval_document") == 2
    p = Pipeline()
    p.add_node(component=retriever, name="ESRetriever", inputs=["Query"])
    p.add_node(component=eval_retriever,
               name="EvalRetriever",
               inputs=["ESRetriever"])
    p.add_node(component=reader, name="QAReader", inputs=["EvalRetriever"])
    p.add_node(component=eval_reader, name="EvalReader", inputs=["QAReader"])
    for q, l in q_to_l_dict.items():
        res = p.run(
            query=q,
            top_k_retriever=10,
            labels=l,
            top_k_reader=10,
            index="haystack_test_eval_document",
        )
    assert eval_retriever.recall == 1.0
    assert round(eval_reader.top_k_f1, 4) == 0.8333
    assert eval_reader.top_k_em == 0.5
Esempio n. 2
0
def test_eval_finder(document_store: BaseDocumentStore, reader, retriever):
    finder = Finder(reader=reader, retriever=retriever)

    # add eval data (SQUAD format)
    document_store.add_eval_data(
        filename="samples/squad/tiny.json",
        doc_index="haystack_test_eval_document",
        label_index="haystack_test_feedback",
    )
    assert document_store.get_document_count(index="haystack_test_eval_document") == 2

    # eval finder
    results = finder.eval(
        label_index="haystack_test_feedback", doc_index="haystack_test_eval_document", top_k_retriever=1, top_k_reader=5
    )
    assert results["retriever_recall"] == 1.0
    assert results["retriever_map"] == 1.0
    assert abs(results["reader_topk_f1"] - 0.66666) < 0.001
    assert abs(results["reader_topk_em"] - 0.5) < 0.001
    assert abs(results["reader_topk_accuracy"] - 1) < 0.001
    assert results["reader_top1_f1"] <= results["reader_topk_f1"]
    assert results["reader_top1_em"] <= results["reader_topk_em"]
    assert results["reader_top1_accuracy"] <= results["reader_topk_accuracy"]

    # batch eval finder
    results_batch = finder.eval_batch(
        label_index="haystack_test_feedback", doc_index="haystack_test_eval_document", top_k_retriever=1, top_k_reader=5
    )
    assert results_batch["retriever_recall"] == 1.0
    assert results_batch["retriever_map"] == 1.0
    assert results_batch["reader_top1_f1"] == results["reader_top1_f1"]
    assert results_batch["reader_top1_em"] == results["reader_top1_em"]
    assert results_batch["reader_topk_accuracy"] == results["reader_topk_accuracy"]
Esempio n. 3
0
def test_eval_elastic_retriever(document_store: BaseDocumentStore, open_domain, retriever):
    # add eval data (SQUAD format)
    document_store.add_eval_data(
        filename="samples/squad/tiny.json",
        doc_index="haystack_test_eval_document",
        label_index="haystack_test_feedback",
    )
    assert document_store.get_document_count(index="haystack_test_eval_document") == 2

    # eval retriever
    results = retriever.eval(
        top_k=1, label_index="haystack_test_feedback", doc_index="haystack_test_eval_document", open_domain=open_domain
    )
    assert results["recall"] == 1.0
    assert results["mrr"] == 1.0
    if not open_domain:
        assert results["map"] == 1.0
Esempio n. 4
0
def test_eval_reader(reader, document_store: BaseDocumentStore):
    # add eval data (SQUAD format)
    document_store.add_eval_data(
        filename="samples/squad/tiny.json",
        doc_index="haystack_test_eval_document",
        label_index="haystack_test_feedback",
    )
    assert document_store.get_document_count(index="haystack_test_eval_document") == 2
    # eval reader
    reader_eval_results = reader.eval(
        document_store=document_store,
        label_index="haystack_test_feedback",
        doc_index="haystack_test_eval_document",
        device="cpu",
    )
    assert reader_eval_results["f1"] > 66.65
    assert reader_eval_results["f1"] < 66.67
    assert reader_eval_results["EM"] == 50
    assert reader_eval_results["top_n_accuracy"] == 100.0
Esempio n. 5
0
def test_eval_pipeline(document_store: BaseDocumentStore, reader, retriever):
    # add eval data (SQUAD format)
    document_store.add_eval_data(
        filename="samples/squad/tiny.json",
        doc_index="haystack_test_eval_document",
        label_index="haystack_test_feedback",
    )

    labels = document_store.get_all_labels_aggregated(index="haystack_test_feedback")

    eval_retriever = EvalDocuments()
    eval_reader = EvalAnswers(sas_model="sentence-transformers/paraphrase-MiniLM-L3-v2",debug=True)
    eval_reader_cross = EvalAnswers(sas_model="cross-encoder/stsb-TinyBERT-L-4",debug=True)
    eval_reader_vanila = EvalAnswers()

    assert document_store.get_document_count(index="haystack_test_eval_document") == 2
    p = Pipeline()
    p.add_node(component=retriever, name="ESRetriever", inputs=["Query"])
    p.add_node(component=eval_retriever, name="EvalDocuments", inputs=["ESRetriever"])
    p.add_node(component=reader, name="QAReader", inputs=["EvalDocuments"])
    p.add_node(component=eval_reader, name="EvalAnswers", inputs=["QAReader"])
    p.add_node(component=eval_reader_cross, name="EvalAnswers_cross", inputs=["QAReader"])
    p.add_node(component=eval_reader_vanila, name="EvalAnswers_vanilla", inputs=["QAReader"])
    for l in labels:
        res = p.run(
            query=l.question,
            top_k_retriever=10,
            labels=l,
            top_k_reader=10,
            index="haystack_test_eval_document",
        )
    assert eval_retriever.recall == 1.0
    assert round(eval_reader.top_k_f1, 4) == 0.8333
    assert eval_reader.top_k_em == 0.5
    assert round(eval_reader.top_k_sas, 3) == 0.800
    assert round(eval_reader_cross.top_k_sas, 3) == 0.671
    assert eval_reader.top_k_em == eval_reader_vanila.top_k_em
Esempio n. 6
0
    def eval(
        self,
        document_store: BaseDocumentStore,
        device: str,
        label_index: str = "label",
        doc_index: str = "eval_document",
        label_origin: str = "gold_label",
    ):
        """
        Performs evaluation on evaluation documents in the DocumentStore.
        Returns a dict containing the following metrics:
              - "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
              - "f1": Average overlap between predicted answers and their corresponding correct answers
              - "top_n_accuracy": Proportion of predicted answers that overlap with correct answer

        :param document_store: DocumentStore containing the evaluation documents
        :param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
        :param label_index: Index/Table name where labeled questions are stored
        :param doc_index: Index/Table name where documents that are used for evaluation are stored
        """

        if self.top_k_per_candidate != 4:
            logger.info(
                f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
                f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
                f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5"
            )

        # extract all questions for evaluation
        filters = {"origin": [label_origin]}

        labels = document_store.get_all_labels(index=label_index,
                                               filters=filters)

        # Aggregate all answer labels per question
        aggregated_per_doc = defaultdict(list)
        for label in labels:
            if not label.document_id:
                logger.error(f"Label does not contain a document_id")
                continue
            aggregated_per_doc[label.document_id].append(label)

        # Create squad style dicts
        d: Dict[str, Any] = {}
        all_doc_ids = [
            x.id for x in document_store.get_all_documents(doc_index)
        ]
        for doc_id in all_doc_ids:
            doc = document_store.get_document_by_id(doc_id, index=doc_index)
            if not doc:
                logger.error(
                    f"Document with the ID '{doc_id}' is not present in the document store."
                )
                continue
            d[str(doc_id)] = {"context": doc.text}
            # get all questions / answers
            aggregated_per_question: Dict[str, Any] = defaultdict(list)
            if doc_id in aggregated_per_doc:
                for label in aggregated_per_doc[doc_id]:
                    # add to existing answers
                    if label.question in aggregated_per_question.keys():
                        # Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
                        if len(aggregated_per_question[label.question]
                               ["answers"]) >= 6:
                            continue
                        aggregated_per_question[
                            label.question]["answers"].append({
                                "text":
                                label.answer,
                                "answer_start":
                                label.offset_start_in_doc
                            })
                    # create new one
                    else:
                        aggregated_per_question[label.question] = {
                            "id":
                            str(hash(str(doc_id) + label.question)),
                            "question":
                            label.question,
                            "answers": [{
                                "text":
                                label.answer,
                                "answer_start":
                                label.offset_start_in_doc
                            }]
                        }
            # Get rid of the question key again (after we aggregated we don't need it anymore)
            d[str(doc_id)]["qas"] = [
                v for v in aggregated_per_question.values()
            ]

        # Convert input format for FARM
        farm_input = [v for v in d.values()]
        n_queries = len([y for x in farm_input for y in x["qas"]])

        # Create DataLoader that can be passed to the Evaluator
        tic = perf_counter()
        indices = range(len(farm_input))
        dataset, tensor_names = self.inferencer.processor.dataset_from_dicts(
            farm_input, indices=indices)
        data_loader = NamedDataLoader(dataset=dataset,
                                      batch_size=self.inferencer.batch_size,
                                      tensor_names=tensor_names)

        evaluator = Evaluator(data_loader=data_loader,
                              tasks=self.inferencer.processor.tasks,
                              device=device)

        eval_results = evaluator.eval(self.inferencer.model)
        toc = perf_counter()
        reader_time = toc - tic
        results = {
            "EM": eval_results[0]["EM"],
            "f1": eval_results[0]["f1"],
            "top_n_accuracy": eval_results[0]["top_n_accuracy"],
            "top_n": self.inferencer.model.prediction_heads[0].n_best,
            "reader_time": reader_time,
            "seconds_per_query": reader_time / n_queries
        }
        return results
Esempio n. 7
0
def test_eval_reader(reader, document_store: BaseDocumentStore):
    # add eval data (SQUAD format)
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
    document_store.add_eval_data(filename="samples/squad/tiny.json",
                                 doc_index="test_eval_document",
                                 label_index="test_feedback")
    assert document_store.get_document_count(index="test_eval_document") == 2
    # eval reader
    reader_eval_results = reader.eval(document_store=document_store,
                                      label_index="test_feedback",
                                      doc_index="test_eval_document",
                                      device="cpu")
    assert reader_eval_results["f1"] > 0.65
    assert reader_eval_results["f1"] < 0.67
    assert reader_eval_results["EM"] == 0.5
    assert reader_eval_results["top_n_accuracy"] == 1.0

    # clean up
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
Esempio n. 8
0
def test_eval_elastic_retriever(document_store: BaseDocumentStore,
                                open_domain):
    retriever = ElasticsearchRetriever(document_store=document_store)

    # add eval data (SQUAD format)
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
    document_store.add_eval_data(filename="samples/squad/tiny.json",
                                 doc_index="test_eval_document",
                                 label_index="test_feedback")
    assert document_store.get_document_count(index="test_eval_document") == 2

    # eval retriever
    results = retriever.eval(top_k=1,
                             label_index="test_feedback",
                             doc_index="test_eval_document",
                             open_domain=open_domain)
    assert results["recall"] == 1.0
    assert results["map"] == 1.0

    # clean up
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")