コード例 #1
0
def benchmark_reader():
    reader_results = []
    doc_store = get_document_store("elasticsearch")
    docs, labels = eval_data_from_file(data_dir / filename)
    index_to_doc_store(doc_store, docs, None, labels)
    for reader_name in reader_models:
        for reader_type in reader_types:
            try:
                reader = get_reader(reader_name, reader_type)
                results = reader.eval(document_store=doc_store,
                                      doc_index=doc_index,
                                      label_index=label_index,
                                      device="cuda")
                # print(results)
                results["passages_per_second"] = n_passages / results[
                    "reader_time"]
                results["reader"] = reader_name
                results["error"] = ""
                reader_results.append(results)
            except Exception as e:
                results = {
                    'EM': 0.,
                    'f1': 0.,
                    'top_n_accuracy': 0.,
                    'top_n': 0,
                    'reader_time': 0.,
                    "passages_per_second": 0.,
                    "seconds_per_query": 0.,
                    'reader': reader_name,
                    "error": e
                }
                reader_results.append(results)
            reader_df = pd.DataFrame.from_records(reader_results)
            reader_df.to_csv("reader_results.csv")
コード例 #2
0
ファイル: retriever.py プロジェクト: shira07tech01/haystack
def prepare_data(data_dir, filename_gold, filename_negative, n_docs=None, n_queries=None, add_precomputed=False):
    """
    filename_gold points to a squad format file.
    filename_negative points to a csv file where the first column is doc_id and second is document text.
    If add_precomputed is True, this fn will look in the embeddings files for precomputed embeddings to add to each Document
    """

    gold_docs, labels = eval_data_from_file(data_dir / filename_gold)

    # Reduce number of docs
    gold_docs = gold_docs[:n_docs]

    # Remove labels whose gold docs have been removed
    doc_ids = [x.id for x in gold_docs]
    labels = [x for x in labels if x.document_id in doc_ids]

    # Filter labels down to n_queries
    selected_queries = list(set(f"{x.document_id} | {x.question}" for x in labels))
    selected_queries = selected_queries[:n_queries]
    labels = [x for x in labels if f"{x.document_id} | {x.question}" in selected_queries]

    n_neg_docs = max(0, n_docs - len(gold_docs))
    neg_docs = prepare_negative_passages(data_dir, filename_negative, n_neg_docs)
    docs = gold_docs + neg_docs

    if add_precomputed:
        docs = add_precomputed_embeddings(data_dir / embeddings_dir, embeddings_filenames, docs)

    return docs, labels
コード例 #3
0
ファイル: reader.py プロジェクト: swayson/haystack
def benchmark_reader(ci=False,
                     update_json=False,
                     save_markdown=False,
                     **kwargs):
    if ci:
        reader_models = reader_models_ci
        max_docs = 100
        # heuristic to estimate num of passages for the reduced num of docs
        n_passages = n_total_passages * (max_docs / n_total_docs)
    else:
        reader_models = reader_models_full
        max_docs = None
        n_passages = n_total_passages
    reader_results = []
    doc_store = get_document_store("elasticsearch")
    # download squad data
    _download_extract_downstream_data(input_file=data_dir / filename)
    docs, labels = eval_data_from_file(data_dir / filename, max_docs)

    index_to_doc_store(doc_store, docs, None, labels)
    for reader_name in reader_models:
        for reader_type in reader_types:
            logger.info(
                f"##### Start reader run - model:{reader_name}, type: {reader_type} ##### "
            )
            try:
                reader = get_reader(reader_name, reader_type)
                results = reader.eval(document_store=doc_store,
                                      doc_index=doc_index,
                                      label_index=label_index,
                                      device="cuda")
                # results = reader.eval_on_file(data_dir, filename, device="cuda")
                print(results)
                results["passages_per_second"] = n_passages / results[
                    "reader_time"]
                results["reader"] = reader_name
                results["error"] = ""
                reader_results.append(results)
            except Exception as e:
                results = {
                    'EM': 0.,
                    'f1': 0.,
                    'top_n_accuracy': 0.,
                    'top_n': 0,
                    'reader_time': 0.,
                    "passages_per_second": 0.,
                    "seconds_per_query": 0.,
                    'reader': reader_name,
                    "error": e
                }
                reader_results.append(results)
            reader_df = pd.DataFrame.from_records(reader_results)
            reader_df.to_csv(results_file)
            if save_markdown:
                md_file = results_file.replace(".csv", ".md")
                with open(md_file, "w") as f:
                    f.write(str(reader_df.to_markdown()))
    if update_json:
        populate_reader_json()
コード例 #4
0
    def add_eval_data(self, filename: str, doc_index: str = "eval_document", label_index: str = "label"):
        """
        Adds a SQuAD-formatted file to the DocumentStore in order to be able to perform evaluation on it.

        :param filename: Name of the file containing evaluation data
        :type filename: str
        :param doc_index: Elasticsearch index where evaluation documents should be stored
        :type doc_index: str
        :param label_index: Elasticsearch index where labeled questions should be stored
        :type label_index: str
        """

        docs, labels = eval_data_from_file(filename)
        self.write_documents(docs, index=doc_index)
        self.write_labels(labels, index=label_index)