Exemplo n.º 1
0
def main(args):
    logger.info(f"Arguments:{args}")
    if args.precision == Precision.FLOAT16:
        assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    torch.set_num_threads(
        psutil.cpu_count(
            logical=True) if args.thread_num <= 0 else args.thread_num)
    print(torch.__config__.parallel_info())

    cache_dir = args.cache_dir
    output_dir = args.onnx_dir
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    model_class = MODEL_CLASSES[args.model_class][0]

    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        torchscript=args.torchscript,
                                        cache_dir=cache_dir)
    model = model_class.from_pretrained(args.model_name_or_path,
                                        config=config,
                                        cache_dir=cache_dir)

    # This scirpt does not support float16 for PyTorch.
    #if args.float16:
    #    model.half()

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.to(device)
    use_external_data_format = (config.n_layer > 24
                                )  #TODO: find a way to check model size > 2GB
    onnx_model_paths = Gpt2Helper.get_onnx_paths(
        output_dir,
        args.model_name_or_path,
        args.model_class,
        has_past=True,
        new_folder=use_external_data_format)

    onnx_model_path = onnx_model_paths["raw"]
    use_padding = MODEL_CLASSES[args.model_class][2]
    Gpt2Helper.export_onnx(model,
                           device,
                           onnx_model_path,
                           args.verbose,
                           use_external_data_format,
                           has_position_ids=use_padding,
                           has_attention_mask=use_padding)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        onnx_model_path = onnx_model_paths[str(
            args.precision) if args.precision != Precision.INT8 else 'fp32']
        Gpt2Helper.optimize_onnx(onnx_model_paths["raw"], onnx_model_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size,
                                 use_external_data_format)

        if args.precision == Precision.INT8:
            logger.info("quantizing model...")
            QuantizeHelper.quantize_onnx_model(onnx_model_path,
                                               onnx_model_paths["int8"],
                                               use_external_data_format)
            model = QuantizeHelper.quantize_torch_model(model)
            logger.info("finished quantizing model")
            onnx_model_path = onnx_model_paths["int8"]

    if args.torchscript:
        model = Gpt2Helper.torchscript(model,
                                       config,
                                       device,
                                       has_position_ids=use_padding,
                                       has_attention_mask=use_padding)

    session = create_onnxruntime_session(onnx_model_path,
                                         args.use_gpu,
                                         enable_all_optimization=False,
                                         num_threads=args.thread_num,
                                         verbose=args.verbose)
    if session is None:
        return

    # Allocate output buffers for IO Binding
    max_output_shapes = Gpt2Helper.get_output_shapes(
        max(args.batch_sizes), max(args.past_sequence_lengths),
        max(args.sequence_lengths), config, args.model_class)
    output_buffers = Gpt2Helper.get_output_buffers(
        max_output_shapes, device, args.precision == Precision.FLOAT16)

    csv_filename = args.result_csv or "benchmark_result_{}.csv".format(
        datetime.now().strftime("%Y%m%d-%H%M%S"))
    with open(csv_filename, mode="a", newline='') as csv_file:
        column_names = [
            "model_name", "model_class", "gpu", "precision", "optimizer",
            "torchscript", "batch_size", "sequence_length",
            "past_sequence_length", "torch_latency", "onnxruntime_latency",
            "onnxruntime_io_binding_latency"
        ]
        csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
        csv_writer.writeheader()

        for batch_size in args.batch_sizes:
            for sequence_length in args.sequence_lengths:
                for past_sequence_length in args.past_sequence_lengths:
                    assert batch_size > 0 and sequence_length > 0 and past_sequence_length >= 0
                    logger.debug(
                        f"Running test for batch_size={batch_size} sequence_length={sequence_length} past_sequence_length={past_sequence_length}..."
                    )
                    dummy_inputs = Gpt2Helper.get_dummy_inputs(
                        batch_size,
                        past_sequence_length,
                        sequence_length,
                        config.num_attention_heads,
                        config.hidden_size,
                        config.n_layer,
                        config.vocab_size,
                        device,
                        float16=(args.precision == Precision.FLOAT16),
                        has_position_ids=use_padding,
                        has_attention_mask=use_padding)
                    output_shapes = Gpt2Helper.get_output_shapes(
                        batch_size, past_sequence_length, sequence_length,
                        config, args.model_class)

                    try:
                        outputs, torch_latency = Gpt2Helper.pytorch_inference(
                            model, dummy_inputs, args.test_times)
                        ort_outputs, ort_latency = Gpt2Helper.onnxruntime_inference(
                            session, dummy_inputs, args.test_times)
                        ort_io_outputs, ort_io_latency = Gpt2Helper.onnxruntime_inference_with_binded_io(
                            session,
                            dummy_inputs,
                            output_buffers,
                            output_shapes,
                            args.test_times,
                            return_numpy=False,
                            include_copy_output_latency=args.
                            include_copy_output_latency)

                        if args.validate_onnx:
                            if Gpt2Helper.compare_outputs(
                                    outputs,
                                    ort_outputs,
                                    rtol=DEFAULT_TOLERANCE[args.precision],
                                    atol=DEFAULT_TOLERANCE[args.precision]):
                                logger.info(
                                    f'Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).'
                                )

                            # Results of IO binding might be in GPU. Copy outputs to CPU for comparison.
                            copy_outputs = []
                            for output in ort_io_outputs:
                                copy_outputs.append(output.cpu().numpy())

                            if Gpt2Helper.compare_outputs(
                                    outputs,
                                    copy_outputs,
                                    rtol=DEFAULT_TOLERANCE[args.precision],
                                    atol=DEFAULT_TOLERANCE[args.precision]):
                                logger.info(
                                    f'Pytorch and ONNX Runtime IO Binding outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).'
                                )

                        logger.info(
                            f"batch_size={batch_size}, sequence_length={sequence_length}, past_sequence_length={past_sequence_length}, torch_latency={torch_latency:.2f}, onnxruntime_latency={ort_latency:.2f}, onnxruntime_io_binding_latency={ort_io_latency:.2f}"
                        )

                        row = {
                            "model_name":
                            args.model_name_or_path,
                            "model_class":
                            args.model_class,
                            "gpu":
                            args.use_gpu,
                            "precision":
                            args.precision,
                            "optimizer":
                            args.optimize_onnx,
                            "torchscript":
                            args.torchscript,
                            "batch_size":
                            batch_size,
                            "sequence_length":
                            sequence_length,
                            "past_sequence_length":
                            past_sequence_length,
                            "torch_latency":
                            f"{torch_latency:.2f}",
                            "onnxruntime_latency":
                            f"{ort_latency:.2f}",
                            "onnxruntime_io_binding_latency":
                            f"{ort_io_latency:.2f}"
                        }
                        csv_writer.writerow(row)
                    except:
                        logger.error(f"Exception", exc_info=True)

    logger.info(f"Results are saved to file {csv_filename}")
    return csv_filename
Exemplo n.º 2
0
def main():
    args = parse_arguments()
    setup_logger(args.verbose)

    logger.info(f"Arguments:{args}")
    if args.precision == Precision.FLOAT16:
        assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    torch.set_num_threads(
        psutil.cpu_count(
            logical=True) if args.thread_num <= 0 else args.thread_num)
    print(torch.__config__.parallel_info())

    cache_dir = args.cache_dir
    output_dir = args.onnx_dir
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    model_class = MODEL_CLASSES[args.model_class][0]

    config = AutoConfig.from_pretrained(args.model_name,
                                        torchscript=args.torchscript,
                                        cache_dir=cache_dir)
    if hasattr(config, 'return_tuple'):
        config.return_tuple = True
    model = model_class.from_pretrained(args.model_name,
                                        config=config,
                                        cache_dir=cache_dir)

    # This scirpt does not support float16 for PyTorch.
    #if args.float16:
    #    model.half()

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.to(device)

    onnx_model_paths = Gpt2Helper.get_onnx_paths(output_dir, args.model_name,
                                                 args.model_class)

    onnx_model_path = onnx_model_paths["raw"]
    Gpt2Helper.export_onnx(model, device, onnx_model_path, args.verbose)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        onnx_model_path = onnx_model_paths[str(args.precision)]
        Gpt2Helper.optimize_onnx(onnx_model_paths["raw"], onnx_model_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size)

        if args.precision == Precision.INT8:
            logger.info("quantizing model...")
            QuantizeHelper.quantize_onnx_model(onnx_model_path,
                                               onnx_model_path)
            model = QuantizeHelper.quantize_torch_model(model)
            logger.info("finished quantizing model")

    if args.torchscript:
        model = Gpt2Helper.torchscript(model, config, device)

    session = create_onnxruntime_session(onnx_model_path,
                                         args.use_gpu,
                                         enable_all_optimization=False,
                                         num_threads=args.thread_num,
                                         verbose=args.verbose)
    if session is None:
        return

    # One word is generated for each inference. This length does not include that of past state.
    sequence_length = 1

    # Allocate output buffers for IO Binding
    max_output_shapes = Gpt2Helper.get_output_shapes(
        max(args.batch_sizes), max(args.past_sequence_lengths),
        sequence_length, config, args.model_class)
    output_buffers = Gpt2Helper.get_output_buffers(
        max_output_shapes, device, args.precision == Precision.FLOAT16)

    csv_filename = args.result_csv or "benchmark_result_{}.csv".format(
        datetime.now().strftime("%Y%m%d-%H%M%S"))
    with open(csv_filename, mode="a", newline='') as csv_file:
        column_names = [
            "model_name", "model_class", "gpu", "precision", "optimizer",
            "torchscript", "batch_size", "past_sequence_length",
            "torch_latency", "ort_latency", "ort_io_latency"
        ]
        csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
        csv_writer.writeheader()

        for batch_size in args.batch_sizes:
            for past_sequence_length in args.past_sequence_lengths:
                logger.debug(
                    f"Running test for batch_size={batch_size} past_sequence_length={past_sequence_length}..."
                )
                dummy_inputs = Gpt2Helper.get_dummy_inputs(
                    batch_size, past_sequence_length, sequence_length,
                    config.num_attention_heads, config.hidden_size,
                    config.n_layer, config.vocab_size, device,
                    args.precision == Precision.FLOAT16)
                output_shapes = Gpt2Helper.get_output_shapes(
                    batch_size, past_sequence_length, sequence_length, config,
                    args.model_class)

                try:
                    outputs, torch_latency = Gpt2Helper.pytorch_inference(
                        model, dummy_inputs, args.test_times)
                    ort_outputs, ort_latency = Gpt2Helper.onnxruntime_inference(
                        session, dummy_inputs, args.test_times)
                    ort_io_outputs, ort_io_latency = Gpt2Helper.onnxruntime_inference_with_binded_io(
                        session, dummy_inputs, output_buffers, output_shapes,
                        args.test_times)
                    if args.validate_onnx:
                        if Gpt2Helper.compare_outputs(
                                outputs,
                                ort_outputs,
                                rtol=DEFAULT_TOLERANCE[args.precision],
                                atol=DEFAULT_TOLERANCE[args.precision]):
                            logger.info(
                                f'Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).'
                            )
                        if Gpt2Helper.compare_outputs(
                                outputs,
                                ort_io_outputs,
                                rtol=DEFAULT_TOLERANCE[args.precision],
                                atol=DEFAULT_TOLERANCE[args.precision]):
                            logger.info(
                                f'Pytorch and ONNX Runtime IO Binding outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).'
                            )

                    logger.info(
                        f"batch_size={batch_size}, past_sequence_length={past_sequence_length}, torch_latency={torch_latency:.2f}, ort_latency={ort_latency:.2f}, ort_io_latency={ort_io_latency:.2f}"
                    )

                    row = {
                        "model_name": args.model_name,
                        "model_class": args.model_class,
                        "gpu": args.use_gpu,
                        "precision": args.precision,
                        "optimizer": args.optimize_onnx,
                        "torchscript": args.torchscript,
                        "batch_size": batch_size,
                        "past_sequence_length": past_sequence_length,
                        "torch_latency": f"{torch_latency:.2f}",
                        "ort_latency": f"{ort_latency:.2f}",
                        "ort_io_latency": f"{ort_io_latency:.2f}"
                    }
                    csv_writer.writerow(row)
                except:
                    logger.error(f"Exception", exc_info=True)

    logger.info(f"Results are saved to file {csv_filename}")