Beispiel #1
0
    def test_run_qa(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_qa.py
            --model_name_or_path bert-base-uncased
            --version_2_with_negative
            --train_file tests/fixtures/tests_samples/SQUAD/sample.json
            --validation_file tests/fixtures/tests_samples/SQUAD/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --num_train_epochs=3
            --warmup_steps=2
            --do_train
            --do_eval
            --logging_steps 2 --eval_steps 2
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
        """.split()

        with patch.object(sys, "argv", testargs):
            run_qa.main()
            result = get_results(tmp_dir)
            self.assertGreaterEqual(result["eval_f1"], 30)
            self.assertGreaterEqual(result["eval_exact"], 30)
Beispiel #2
0
def answer():
    question = request.form['question']
    # Convert data to squad json format
    x = {
        "version":
        "BioASQ6b",
        "data": [{
            "title":
            "BioASQ6b",
            "paragraphs": [{
                "context":
                question,
                "qas": [{
                    "question": question,
                    "id": "56c073fcef6e394741000020_000"
                }]
            }]
        }]
    }
    y = json.dumps(x)
    fo = open("./BIOASQ_DIR/Question.json", "w")
    fo.writelines(y)
    fo.close()
    run_qa.main()
    with open("./tmp/QA_output/predictions.json") as f:
        answer_mod = json.load(f)
    f.close()
    for k in answer_mod:
        answer = answer_mod[k]

    return render_template('answ.html', question=question, answer=answer)