def main(config: configure_finetuning.FinetuningConfig, split, bioasq=False):
    expected_version = '1.1'
    # parser = argparse.ArgumentParser(
    #     description='Evaluation for SQuAD ' + expected_version)
    # parser.add_argument('dataset_file', help='Dataset file')
    # parser.add_argument('prediction_file', help='Prediction File')
    # args = parser.parse_args()
    Args = collections.namedtuple("Args", ["dataset_file", "prediction_file"])
    args = Args(dataset_file=os.path.join(
        config.raw_data_dir("squadv1"),
        split + ("-debug" if config.debug else "") + ".json"),
                prediction_file=config.qa_preds_file("squadv1"))

    if bioasq:
        args = Args(dataset_file=os.path.join(
            config.raw_data_dir("bioasq"),
            split + ("0debug" if config.debug else "") + ".json"),
                    prediction_file=config.qa_preds_file("bioasq"))

    with tf.io.gfile.GFile(args.dataset_file) as dataset_file:
        dataset_json = json.load(dataset_file)
        if dataset_json['version'] != expected_version:
            print('Evaluation expects v-' + expected_version +
                  ', but got dataset with v-' + dataset_json['version'],
                  file=sys.stderr)
        dataset = dataset_json['data']
    with tf.io.gfile.GFile(args.prediction_file) as prediction_file:
        predictions = json.load(prediction_file)
    return evaluate(dataset, predictions)
Ejemplo n.º 2
0
 def __init__(self, config: configure_finetuning.FinetuningConfig,
              tokenizer):
     categories = read_tsv(
         os.path.join(config.raw_data_dir("scopefold"),
                      "categories" + ".tsv"))
     # with open("./ft_data/scope/folds.tsv") as f:
     #   categories = [line.rstrip('\n') for line in f]
     super(SCOPeFold, self).__init__(config, "scope", tokenizer, categories)
Ejemplo n.º 3
0
def set_opts(config: configure_finetuning.FinetuningConfig, split):
    global OPTS
    Options = collections.namedtuple("Options", [
        "data_file", "pred_file", "out_file", "na_prob_file", "na_prob_thresh",
        "out_image_dir", "verbose"
    ])
    OPTS = Options(data_file=os.path.join(
        config.raw_data_dir("squad"),
        split + ("-debug" if config.debug else "") + ".json"),
                   pred_file=config.qa_preds_file("squad"),
                   out_file=config.qa_eval_file("squad"),
                   na_prob_file=config.qa_na_file("squad"),
                   na_prob_thresh=config.qa_na_threshold,
                   out_image_dir=None,
                   verbose=False)
Ejemplo n.º 4
0
def main(config: configure_finetuning.FinetuningConfig, split, task_name):
    answers, samples = read_answers(os.path.join(config.raw_data_dir(task_name), split + ".json"))
    predictions = read_predictions(config.qa_preds_file(task_name + "_" + split))
    return evaluate(answers, predictions, samples, config.pred_bad_file(task_name + "_" + split))
Ejemplo n.º 5
0
def main(config: configure_finetuning.FinetuningConfig, split, task_name):
    answers = read_answers(
        os.path.join(config.raw_data_dir(task_name), split + ".jsonl"))
    predictions = read_predictions(config.qa_preds_file(task_name))
    return evaluate(answers, predictions, True)