Esempio n. 1
0
def do_inference(args):
    prepare_output_path(args.output_dir, args.overwrite_output_dir)
    device, n_gpus = setup_backend(args.no_cuda)
    args.task_name = args.task_name.lower()
    task = get_glue_task(args.task_name, data_dir=args.data_dir)
    args.batch_size = args.per_gpu_eval_batch_size * max(1, n_gpus)
    classifier = TransformerSequenceClassifier.load_model(
        model_path=args.model_path,
        model_type=args.model_type,
        task_type=task.task_type,
        metric_fn=get_metric_fn(task.name),
        do_lower_case=args.do_lower_case,
        load_quantized=args.load_quantized_model,
    )
    classifier.to(device, n_gpus)
    examples = task.get_dev_examples(
    ) if args.evaluate else task.get_test_examples()
    preds = classifier.inference(examples,
                                 args.max_seq_length,
                                 args.batch_size,
                                 evaluate=args.evaluate)
    with io.open(os.path.join(args.output_dir, "output.txt"),
                 "w",
                 encoding="utf-8") as fw:
        for p in preds:
            fw.write("{}\n".format(p))
Esempio n. 2
0
def do_inference(args):
    prepare_output_path(args.output_dir, args.overwrite_output_dir)
    device, n_gpus = setup_backend(args.no_cuda)
    args.task_name = args.task_name.lower()
    task = get_glue_task(args.task_name, data_dir=args.data_dir)
    args.batch_size = args.per_gpu_eval_batch_size * max(1, n_gpus)
    classifier = TransformerSequenceClassifier.load_model(
        model_path=args.model_path, model_type=args.model_type)
    classifier.to(device, n_gpus)
    preds = classifier.inference(task.get_test_examples(), args.batch_size)
    with io.open(os.path.join(args.output_dir, "output.txt"),
                 "w",
                 encoding="utf-8") as fw:
        for p in preds:
            fw.write("{}\n".format(p))