Exemplo n.º 1
0
def test_eval_finder(document_store: BaseDocumentStore, reader):
    retriever = ElasticsearchRetriever(document_store=document_store)
    finder = Finder(reader=reader, retriever=retriever)

    # add eval data (SQUAD format)
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
    document_store.add_eval_data(filename="samples/squad/tiny.json", doc_index="test_eval_document", label_index="test_feedback")
    assert document_store.get_document_count(index="test_eval_document") == 2

    # eval finder
    results = finder.eval(label_index="test_feedback", doc_index="test_eval_document", top_k_retriever=1, top_k_reader=5)
    assert results["retriever_recall"] == 1.0
    assert results["retriever_map"] == 1.0
    assert abs(results["reader_topk_f1"] - 0.66666) < 0.001
    assert abs(results["reader_topk_em"] - 0.5) < 0.001
    assert abs(results["reader_topk_accuracy"] - 1) < 0.001
    assert results["reader_top1_f1"] <= results["reader_topk_f1"]
    assert results["reader_top1_em"] <= results["reader_topk_em"]
    assert results["reader_top1_accuracy"] <= results["reader_topk_accuracy"]

    # batch eval finder
    results_batch = finder.eval_batch(label_index="test_feedback", doc_index="test_eval_document", top_k_retriever=1,
                          top_k_reader=5)
    assert results_batch["retriever_recall"] == 1.0
    assert results_batch["retriever_map"] == 1.0
    assert results_batch["reader_top1_f1"] == results["reader_top1_f1"]
    assert results_batch["reader_top1_em"] == results["reader_top1_em"]
    assert results_batch["reader_topk_accuracy"] == results["reader_topk_accuracy"]

    # clean up
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
Exemplo n.º 2
0
def test_eval_reader(reader, document_store: BaseDocumentStore):
    # add eval data (SQUAD format)
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
    document_store.add_eval_data(filename="samples/squad/tiny.json", doc_index="test_eval_document", label_index="test_feedback")
    assert document_store.get_document_count(index="test_eval_document") == 2
    # eval reader
    reader_eval_results = reader.eval(document_store=document_store, label_index="test_feedback",
                                      doc_index="test_eval_document", device="cpu")
    assert reader_eval_results["f1"] > 0.65
    assert reader_eval_results["f1"] < 0.67
    assert reader_eval_results["EM"] == 0.5
    assert reader_eval_results["top_n_accuracy"] == 1.0

    # clean up
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
Exemplo n.º 3
0
def test_eval_elastic_retriever(document_store: BaseDocumentStore, open_domain):
    retriever = ElasticsearchRetriever(document_store=document_store)

    # add eval data (SQUAD format)
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
    document_store.add_eval_data(filename="samples/squad/tiny.json", doc_index="test_eval_document", label_index="test_feedback")
    assert document_store.get_document_count(index="test_eval_document") == 2

    # eval retriever
    results = retriever.eval(top_k=1, label_index="test_feedback", doc_index="test_eval_document", open_domain=open_domain)
    assert results["recall"] == 1.0
    assert results["map"] == 1.0

    # clean up
    document_store.delete_all_documents(index="test_eval_document")
    document_store.delete_all_documents(index="test_feedback")
Exemplo n.º 4
0
    def eval(
        self,
        document_store: BaseDocumentStore,
        device: str,
        label_index: str = "label",
        doc_index: str = "eval_document",
        label_origin: str = "gold_label",
    ):
        """
        Performs evaluation on evaluation documents in the DocumentStore.

        Returns a dict containing the following metrics:
            - "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
            - "f1": Average overlap between predicted answers and their corresponding correct answers
            - "top_n_accuracy": Proportion of predicted answers that match with correct answer

        :param document_store: DocumentStore containing the evaluation documents
        :param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
        :param label_index: Index/Table name where labeled questions are stored
        :param doc_index: Index/Table name where documents that are used for evaluation are stored
        """

        if self.top_k_per_candidate != 4:
            logger.info(
                f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
                f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
                f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5"
            )

        # extract all questions for evaluation
        filters = {"origin": [label_origin]}

        labels = document_store.get_all_labels(index=label_index,
                                               filters=filters)

        # Aggregate all answer labels per question
        aggregated_per_doc = defaultdict(list)
        for label in labels:
            if not label.document_id:
                logger.error(f"Label does not contain a document_id")
                continue
            aggregated_per_doc[label.document_id].append(label)

        # Create squad style dicts
        d: Dict[str, Any] = {}
        all_doc_ids = [
            x.id for x in document_store.get_all_documents(doc_index)
        ]
        for doc_id in all_doc_ids:
            doc = document_store.get_document_by_id(doc_id, index=doc_index)
            if not doc:
                logger.error(
                    f"Document with the ID '{doc_id}' is not present in the document store."
                )
                continue
            d[str(doc_id)] = {"context": doc.text}
            # get all questions / answers
            aggregated_per_question: Dict[str, Any] = defaultdict(list)
            if doc_id in aggregated_per_doc:
                for label in aggregated_per_doc[doc_id]:
                    # add to existing answers
                    if label.question in aggregated_per_question.keys():
                        # Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
                        if len(aggregated_per_question[label.question]
                               ["answers"]) >= 6:
                            continue
                        aggregated_per_question[
                            label.question]["answers"].append({
                                "text":
                                label.answer,
                                "answer_start":
                                label.offset_start_in_doc
                            })
                    # create new one
                    else:
                        aggregated_per_question[label.question] = {
                            "id":
                            str(hash(str(doc_id) + label.question)),
                            "question":
                            label.question,
                            "answers": [{
                                "text":
                                label.answer,
                                "answer_start":
                                label.offset_start_in_doc
                            }]
                        }
            # Get rid of the question key again (after we aggregated we don't need it anymore)
            d[str(doc_id)]["qas"] = [
                v for v in aggregated_per_question.values()
            ]

        # Convert input format for FARM
        farm_input = [v for v in d.values()]

        # Create DataLoader that can be passed to the Evaluator
        indices = range(len(farm_input))
        dataset, tensor_names = self.inferencer.processor.dataset_from_dicts(
            farm_input, indices=indices)
        data_loader = NamedDataLoader(dataset=dataset,
                                      batch_size=self.inferencer.batch_size,
                                      tensor_names=tensor_names)

        evaluator = Evaluator(data_loader=data_loader,
                              tasks=self.inferencer.processor.tasks,
                              device=device)

        eval_results = evaluator.eval(self.inferencer.model)
        results = {
            "EM": eval_results[0]["EM"],
            "f1": eval_results[0]["f1"],
            "top_n_accuracy": eval_results[0]["top_n_accuracy"]
        }
        return results