Beispiel #1
0
#include palm for easier nlp coding
from palm.toolkit.configure import PDConfig

from squad.evaluate_v1 import *


def do_eval(args):

    expected_version = "1.1"

    with open(args.evaluation_file) as dataset_file:
        dataset_json = json.load(dataset_file)
        if (dataset_json['version'] != expected_version):
            print('Evaluation expects v-' + expected_version +
                  ', but got dataset with v-' + dataset_json['version'])
        dataset = dataset_json['data']

        with open(args.output_prediction_file) as prediction_file:
            predictions = json.load(prediction_file)

        print(json.dumps(evaluate(dataset, predictions)))


if __name__ == "__main__":

    args = PDConfig(yaml_file="./data/config/squad1.yaml")
    args.build()
    args.Print()

    do_eval(args)
Beispiel #2
0
import numpy as np
import paddle
import paddle.fluid as fluid

#include palm for easier nlp coding
from palm.toolkit.configure import PDConfig

# from my_train import do_predict
# from predict import do_predict
from train import do_train
from inference_model import do_save_inference_model

if __name__ == "__main__":
    LOG_FORMAT = "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] %(message)s"
    logging.basicConfig(stream=sys.stdout,
                        level=logging.DEBUG,
                        format=LOG_FORMAT)
    logging.getLogger().setLevel(logging.INFO)

    args = PDConfig(yaml_file="./transformer.yaml")
    args.build()
    args.Print()

    if args.do_train:
        do_train(args)

    if args.do_predict:
        do_predict(args)

    if args.do_save_inference_model:
        do_save_inference_model(args)