コード例 #1
0
def main():
    args = parse_arguments()
    setup_logger(args.verbose)

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.optimize_onnx:
        logger.warn(f'Graph optimization for T5 is not implemented yet.')

    with torch.no_grad():
        merge_encoder_and_decoder_init = True  # Merge encoder and decoder initialization into one model is recommended.
        output_paths = export_onnx_models(args.model_name_or_path, cache_dir, output_dir, args.use_gpu,
                                          args.use_external_data_format, args.optimize_onnx, args.precision,
                                          args.verbose, args.use_decoder_start_token, merge_encoder_and_decoder_init,
                                          args.overwrite)

    logger.info(f"Done! Outputs: {output_paths}")
コード例 #2
0
def main():
    torch.multiprocessing.set_start_method("spawn")

    args = parse_arguments()

    benchmark_helper.setup_logger(args.verbose)

    if len(sys.argv) > 1:
        test_results = launch_test(args)
        time_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
        csv_filename = f"benchmark_detail_{time_stamp}.csv"
        output_details(test_results, csv_filename)
        return

    gpu_list = benchmark_helper.get_gpu_info()
    logger.info("GPU info: %s", gpu_list)
    fp16_batch_sizes = [16, 8, 4, 2, 1]
    fp32_batch_sizes = [4, 2, 1]
    if gpu_list and gpu_list[0]["total"] >= 32 * 1024 * 1024 * 1024:  # 32 GB
        fp16_batch_sizes = [64, 32, 16, 8, 4, 2, 1]
        fp32_batch_sizes = [16, 8, 4, 2, 1]

    gpu_name = re.sub(r"(?u)[^-\w.]", "_",
                      gpu_list[0]["name"]) if gpu_list else "gpu"
    is_baseline = os.environ.get("ORT_LONGFORMER_BASELINE", "0") == "1"
    experiment_name = f"longformer_base_{gpu_name}" + ("_baseline"
                                                       if is_baseline else "")
    logger.info(
        f"experiment_name={experiment_name}, fp16_batch_sizes={fp16_batch_sizes}, fp32_batch_sizes={fp32_batch_sizes}"
    )

    total_runs = 1
    all_results = []
    for _ in range(total_runs):
        for batch_size in fp16_batch_sizes:
            fp16_results = run_experiments(use_fp16=True,
                                           batch_size=batch_size,
                                           is_baseline=is_baseline)
            output_details(fp16_results, "longformer_base_fp16.csv")
            all_results += fp16_results
    for metric_name in [
            "average_latency_ms", "QPS", "memory", "diff_90_percentile"
    ]:
        output_summary(all_results, f"{experiment_name}_{metric_name}.csv",
                       metric_name)

    all_results = []
    for _ in range(total_runs):
        for batch_size in fp32_batch_sizes:
            fp32_results = run_experiments(use_fp16=False,
                                           batch_size=batch_size,
                                           is_baseline=is_baseline)
            output_details(fp32_results, "longformer_base_fp32.csv")
            all_results += fp32_results
    for metric_name in [
            "average_latency_ms", "QPS", "memory", "diff_90_percentile"
    ]:
        output_summary(all_results, f"{experiment_name}_{metric_name}.csv",
                       metric_name)
コード例 #3
0
def main():
    args = parse_arguments()

    setup_logger(args.verbose)

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.optimize_onnx:
        logger.warning("Graph optimization for T5 is not implemented yet.")

    output_paths = export_onnx_models(
        args.model_name_or_path,
        cache_dir,
        output_dir,
        args.use_gpu,
        args.use_external_data_format,
        args.optimize_onnx,
        args.precision,
        args.verbose,
        args.use_decoder_start_token,
        not args.separate_encoder_and_decoder_init,
        args.overwrite,
        args.disable_auto_mixed_precision,
        not args.use_int64_inputs,
        args.model_type,
    )

    logger.info(f"Done! Outputs: {output_paths}")
コード例 #4
0
ファイル: benchmark_gpt2.py プロジェクト: xxjohn/onnxruntime
                            args.precision,
                            "optimizer":
                            args.optimize_onnx,
                            "torchscript":
                            args.torchscript,
                            "batch_size":
                            batch_size,
                            "sequence_length":
                            sequence_length,
                            "past_sequence_length":
                            past_sequence_length,
                            "torch_latency":
                            f"{torch_latency:.2f}",
                            "onnxruntime_latency":
                            f"{ort_latency:.2f}",
                            "onnxruntime_io_binding_latency":
                            f"{ort_io_latency:.2f}"
                        }
                        csv_writer.writerow(row)
                    except:
                        logger.error(f"Exception", exc_info=True)

    logger.info(f"Results are saved to file {csv_filename}")
    return csv_filename


if __name__ == '__main__':
    args = parse_arguments()
    setup_logger(args.verbose)
    main(args)
コード例 #5
0
def main():
    args = parse_arguments()
    setup_logger(args.verbose)

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(
        ".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    model_class = MODEL_CLASSES[args.model_class][0]
    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        cache_dir=cache_dir)
    if hasattr(config, 'return_tuple'):
        config.return_tuple = True
    model = model_class.from_pretrained(args.model_name_or_path,
                                        config=config,
                                        cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    onnx_model_paths = Gpt2Helper.get_onnx_paths(output_dir,
                                                 args.model_name_or_path,
                                                 args.model_class)
    raw_onnx_model = args.output if args.output.endswith(
        '.onnx') else onnx_model_paths["raw"]
    output_path = raw_onnx_model if (
        args.output.endswith('.onnx') or
        (args.precision == Precision.FLOAT32 and not args.optimize_onnx)
    ) else onnx_model_paths[str(args.precision)]

    Gpt2Helper.export_onnx(model, device, raw_onnx_model, args.verbose)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        Gpt2Helper.optimize_onnx(raw_onnx_model, output_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size)

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path, output_path)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")

    session = create_onnxruntime_session(output_path,
                                         args.use_gpu,
                                         enable_all_optimization=False,
                                         verbose=args.verbose)
    if session is not None:
        Gpt2Helper.test_parity(session,
                               model,
                               device,
                               args.precision == Precision.FLOAT16,
                               rtol=args.tolerance,
                               atol=args.tolerance,
                               model_class=args.model_class)

    logger.info(f"Done. Output model: {output_path}")
コード例 #6
0
ファイル: benchmark.py プロジェクト: petterinteon/onnxruntime
def main():
    args = parse_arguments()

    setup_logger(args.verbose)

    if args.precision == Precision.FLOAT16 and not args.use_gpu:
        logger.error("fp16 is for GPU only")
        return

    if args.precision == Precision.INT8 and args.use_gpu:
        logger.error("int8 is for CPU only")
        return

    args.num_threads = sorted(
        set(cpu_count if x <= 0 else x for x in args.num_threads))

    logger.info(f"Arguments: {args}")

    if not os.path.exists(args.cache_dir):
        try:
            os.mkdir(args.cache_dir)
        except OSError:
            logger.error("Creation of the directory %s failed" %
                         args.cache_dir)

    enable_torch = "torch" in args.engines
    enable_torchscript = "torchscript" in args.engines
    enable_onnxruntime = "onnxruntime" in args.engines
    enable_tensorflow = "tensorflow" in args.engines

    results = []

    for num_threads in args.num_threads:
        torch.set_num_threads(num_threads)
        logger.debug(torch.__config__.parallel_info())
        if enable_torch or enable_torchscript:
            if args.input_counts != [1]:
                logger.warning(
                    "--input_counts is not implemented for torch or torchscript engine."
                )

            if enable_torchscript:
                results += run_pytorch(args.use_gpu, args.models,
                                       args.model_class, args.precision,
                                       num_threads, args.batch_sizes,
                                       args.sequence_lengths, args.test_times,
                                       True, args.cache_dir, args.verbose)

            if enable_torch:
                results += run_pytorch(args.use_gpu, args.models,
                                       args.model_class, args.precision,
                                       num_threads, args.batch_sizes,
                                       args.sequence_lengths, args.test_times,
                                       False, args.cache_dir, args.verbose)

        if enable_tensorflow:
            results += run_tensorflow(args.use_gpu, args.models,
                                      args.model_class, args.precision,
                                      num_threads, args.batch_sizes,
                                      args.sequence_lengths, args.test_times,
                                      args.cache_dir, args.verbose)

        model_fusion_statistics = {}
        if enable_onnxruntime:
            try:
                use_raw_attention_mask = True
                results += run_onnxruntime(
                    args.use_gpu, args.models, args.model_class,
                    args.precision, num_threads, args.batch_sizes,
                    args.sequence_lengths, args.test_times, args.input_counts,
                    args.optimize_onnx, args.validate_onnx, args.cache_dir,
                    args.onnx_dir, args.verbose, args.overwrite,
                    args.disable_ort_io_binding, use_raw_attention_mask,
                    model_fusion_statistics, args.model_source)
            except:
                logger.error(f"Exception", exc_info=True)

    time_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
    if model_fusion_statistics:
        csv_filename = args.fusion_csv or f"benchmark_fusion_{time_stamp}.csv"
        output_fusion_statistics(model_fusion_statistics, csv_filename)

    if len(results) == 0:
        if args.batch_sizes != [0]:
            logger.warning("No any result avaiable.")
        return

    csv_filename = args.detail_csv or f"benchmark_detail_{time_stamp}.csv"
    output_details(results, csv_filename)

    csv_filename = args.result_csv or f"benchmark_summary_{time_stamp}.csv"
    output_summary(results, csv_filename, args)
コード例 #7
0
def main():
    from transformers import __version__ as transformers_version
    if version.parse(transformers_version) < version.parse(
            "3.1.0"):  # past_key_values name does not exist in 3.0.2 or older
        raise RuntimeError("This tool requires transformers 3.1.0 or later.")

    args = parse_arguments()
    setup_logger(args.verbose)

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    if args.use_external_data_format:
        assert not args.output.endswith('.onnx'), "output shall be a directory for --use_external_data_format"

    model_class = MODEL_CLASSES[args.model_class][0]
    if args.model_class == "GPT2LMHeadModel_BeamSearchStep":
        model_type = "beam_search_step"
    elif args.model_class == "GPT2LMHeadModel_ConfigurableOneStepSearch":
        model_type = "configurable_one_step_search"
    else:
        model_type = "default"

    gpt2helper = Gpt2HelperFactory.create_helper(model_type)
    gpt2tester = Gpt2TesterFactory.create_tester(model_type)
    config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=cache_dir)
    if model_type == 'beam_search_step':
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            batch_size=1,
                                            beam_size=args.beam_size,
                                            cache_dir=cache_dir)
    elif model_type == 'configurable_one_step_search':
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            batch_size=1,
                                            beam_size=args.beam_size,
                                            ignore_eos=args.ignore_eos,
                                            temperature=args.temperature,
                                            repetition_penalty=args.repetition_penalty,
                                            excluded_token_ids=args.excluded_token_ids,
                                            length_penalty=args.length_penalty,
                                            do_sample=args.do_sample,
                                            do_sample_top_p=args.do_sample_top_p,
                                            do_sample_top_k=args.do_sample_top_k,
                                            cache_dir=cache_dir)
    else:
        model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    if (not args.use_external_data_format) and (config.n_layer > 24):
        logger.info(f"Try --use_external_data_format when model size > 2GB")

    onnx_model_paths = gpt2helper.get_onnx_paths(output_dir,
                                                 args.model_name_or_path,
                                                 args.model_class,
                                                 new_folder=args.use_external_data_format)

    raw_onnx_model = onnx_model_paths["raw"]

    logger.info(f"Exporting ONNX model to {raw_onnx_model}")
    use_padding = MODEL_CLASSES[args.model_class][2]
    gpt2helper.export_onnx(model,
                           device,
                           raw_onnx_model,
                           args.verbose,
                           args.use_external_data_format,
                           has_position_ids=use_padding,
                           has_attention_mask=use_padding)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        output_path = onnx_model_paths[str(args.precision) if args.precision != Precision.INT8 else 'fp32']

        logger.info(f"Optimizing model to {output_path}")
        gpt2helper.optimize_onnx(raw_onnx_model, output_path, args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads, model.config.hidden_size,
                                 args.use_external_data_format)
    else:
        output_path = raw_onnx_model

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path, onnx_model_paths['int8'], args.use_external_data_format)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")
        output_path = onnx_model_paths['int8']

    if args.output.endswith('.onnx') and output_path != args.output and not args.use_external_data_format:
        import shutil
        shutil.move(output_path, args.output)
        output_path = args.output

    logger.info(f"Output path: {output_path}")

    session = create_onnxruntime_session(output_path, args.use_gpu, enable_all_optimization=True, verbose=args.verbose)
    if session is not None:
        gpt2helper.test_parity(session,
                               model,
                               device,
                               args.precision == Precision.FLOAT16,
                               rtol=args.tolerance,
                               atol=args.tolerance,
                               model_class=args.model_class,
                               has_position_ids=use_padding,
                               has_attention_mask=use_padding)

    if args.input_test_file:
        test_inputs = []
        # Each line of test file is a JSON string like:
        # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]}
        with open(args.input_test_file) as read_f:
            for _, line in enumerate(read_f):
                line = line.rstrip()
                data = json.loads(line)
                input_ids = torch.from_numpy(numpy.asarray(data["input_ids"], dtype=numpy.int64)).to(device)

                if use_padding:
                    if "attention_mask" in data:
                        numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32
                        attention_mask = torch.from_numpy(numpy.asarray(data["attention_mask"],
                                                                        dtype=numpy_float)).to(device)
                    else:
                        padding = -1
                        attention_mask = (
                            input_ids !=
                            padding).type(torch.float16 if args.precision == Precision.FLOAT16 else torch.float32)
                        input_ids.masked_fill_(input_ids == padding, 0)

                    if "position_ids" in data:
                        position_ids = torch.from_numpy(numpy.asarray(data["position_ids"],
                                                                      dtype=numpy.int64)).to(device)
                    else:
                        position_ids = (attention_mask.long().cumsum(-1) - 1)
                        position_ids.masked_fill_(position_ids < 0, 0)

                    inputs = {"input_ids": input_ids, "position_ids": position_ids, "attention_mask": attention_mask}
                else:
                    inputs = {"input_ids": input_ids}

                if model_type == "beam_search_step" or model_type == "configurable_one_step_search":
                    beam_select_idx = torch.zeros([1, input_ids.shape[0]]).long()

                    input_log_probs = torch.zeros([input_ids.shape[0], 1])
                    input_unfinished_sents = torch.ones([input_ids.shape[0], 1], dtype=torch.bool)
                    inputs.update({
                        "beam_select_idx": beam_select_idx,
                        "input_log_probs": input_log_probs,
                        "input_unfinished_sents": input_unfinished_sents,
                    })

                test_inputs.append(inputs)

        gpt2tester.test_generation(session,
                                   model,
                                   device,
                                   test_inputs,
                                   precision=args.precision,
                                   model_class=args.model_class,
                                   top_k=20,
                                   top_k_no_order=True,
                                   max_steps=24,
                                   max_inputs=0,
                                   verbose=args.verbose,
                                   save_test_data=3,
                                   save_test_data_dir=Path(output_path).parent)

    logger.info(f"Done. Output model: {output_path}")
コード例 #8
0
    profile_file = run_profile(args.model, args.use_gpu,
                               args.basic_optimization, args.thread_num,
                               all_inputs)

    profile_records = load_profile_json(profile_file)

    lines = parse_profile_results(profile_records, args.kernel_time_only,
                                  args.threshold)

    lines.append("-" * 64)
    lines += group_profile_results(profile_records, args.kernel_time_only,
                                   args.threshold)

    return lines


if __name__ == '__main__':
    arguments = parse_arguments()
    print("Arguments", arguments)

    from benchmark_helper import setup_logger
    setup_logger(arguments.verbose)

    results = run(arguments)

    print("Results:")
    print("-" * 64)
    for line in results:
        print(line)
コード例 #9
0
def main():
    args = parse_arguments()
    setup_logger(args.verbose)

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(
        ".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    model_class = MODEL_CLASSES[args.model_class][0]
    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        cache_dir=cache_dir)
    if hasattr(config, 'return_tuple'):
        config.return_tuple = True
    model = model_class.from_pretrained(args.model_name_or_path,
                                        config=config,
                                        cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    onnx_model_paths = Gpt2Helper.get_onnx_paths(output_dir,
                                                 args.model_name_or_path,
                                                 args.model_class)
    raw_onnx_model = args.output if args.output.endswith(
        '.onnx') else onnx_model_paths["raw"]
    output_path = raw_onnx_model if (
        args.output.endswith('.onnx') or
        (args.precision == Precision.FLOAT32 and not args.optimize_onnx)
    ) else onnx_model_paths[str(args.precision)]

    Gpt2Helper.export_onnx(model, device, raw_onnx_model, args.verbose)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        Gpt2Helper.optimize_onnx(raw_onnx_model, output_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size)

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path, output_path)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")

    session = create_onnxruntime_session(output_path,
                                         args.use_gpu,
                                         enable_all_optimization=False,
                                         verbose=args.verbose)
    if session is not None:
        Gpt2Helper.test_parity(session,
                               model,
                               device,
                               args.precision == Precision.FLOAT16,
                               rtol=args.tolerance,
                               atol=args.tolerance,
                               model_class=args.model_class)

    if args.input_test_file:
        test_inputs = []
        with open(args.input_test_file) as read_f:
            for i, line in enumerate(read_f):
                line = line.rstrip()
                data = json.loads(line)
                input_ids = torch.from_numpy(
                    numpy.asarray(data["input_ids"],
                                  dtype=numpy.int64)).to(device)
                position_ids = torch.from_numpy(
                    numpy.asarray(data["position_ids"],
                                  dtype=numpy.int64)).to(device)
                numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32
                attention_mask = torch.from_numpy(
                    numpy.asarray(data["attention_mask"],
                                  dtype=numpy_float)).to(device)
                inputs = {
                    "input_ids": input_ids,
                    "position_ids": position_ids,
                    "attention_mask": attention_mask
                }
                test_inputs.append(inputs)
        Gpt2Tester.test_generation(session,
                                   model,
                                   device,
                                   test_inputs,
                                   precision=args.precision,
                                   model_class=args.model_class,
                                   top_k=20,
                                   top_k_no_order=True,
                                   max_steps=24,
                                   max_inputs=0,
                                   verbose=args.verbose)

    logger.info(f"Done. Output model: {output_path}")
コード例 #10
0
def main():
    args = parse_arguments()
    setup_logger(args.verbose)

    logger.info(f"Arguments:{args}")
    if args.precision == Precision.FLOAT16:
        assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    torch.set_num_threads(
        psutil.cpu_count(
            logical=True) if args.thread_num <= 0 else args.thread_num)
    print(torch.__config__.parallel_info())

    cache_dir = args.cache_dir
    output_dir = args.onnx_dir
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    model_class = MODEL_CLASSES[args.model_class][0]

    config = AutoConfig.from_pretrained(args.model_name,
                                        torchscript=args.torchscript,
                                        cache_dir=cache_dir)
    if hasattr(config, 'return_tuple'):
        config.return_tuple = True
    model = model_class.from_pretrained(args.model_name,
                                        config=config,
                                        cache_dir=cache_dir)

    # This scirpt does not support float16 for PyTorch.
    #if args.float16:
    #    model.half()

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.to(device)
    use_external_data_format = (config.n_layer > 24
                                )  #TODO: find a way to check model size > 2GB
    onnx_model_paths = Gpt2Helper.get_onnx_paths(
        output_dir,
        args.model_name,
        args.model_class,
        has_past=True,
        new_folder=use_external_data_format)

    onnx_model_path = onnx_model_paths["raw"]
    Gpt2Helper.export_onnx(model, device, onnx_model_path, args.verbose,
                           use_external_data_format)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        onnx_model_path = onnx_model_paths[str(args.precision)]
        Gpt2Helper.optimize_onnx(onnx_model_paths["raw"], onnx_model_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size,
                                 use_external_data_format)

        if args.precision == Precision.INT8:
            logger.info("quantizing model...")
            QuantizeHelper.quantize_onnx_model(onnx_model_path,
                                               onnx_model_path,
                                               use_external_data_format)
            model = QuantizeHelper.quantize_torch_model(model)
            logger.info("finished quantizing model")

    if args.torchscript:
        model = Gpt2Helper.torchscript(model, config, device)

    session = create_onnxruntime_session(onnx_model_path,
                                         args.use_gpu,
                                         enable_all_optimization=False,
                                         num_threads=args.thread_num,
                                         verbose=args.verbose)
    if session is None:
        return

    # One word is generated for each inference. This length does not include that of past state.
    sequence_length = 1

    # Allocate output buffers for IO Binding
    max_output_shapes = Gpt2Helper.get_output_shapes(
        max(args.batch_sizes), max(args.past_sequence_lengths),
        sequence_length, config, args.model_class)
    output_buffers = Gpt2Helper.get_output_buffers(
        max_output_shapes, device, args.precision == Precision.FLOAT16)

    csv_filename = args.result_csv or "benchmark_result_{}.csv".format(
        datetime.now().strftime("%Y%m%d-%H%M%S"))
    with open(csv_filename, mode="a", newline='') as csv_file:
        column_names = [
            "model_name", "model_class", "gpu", "precision", "optimizer",
            "torchscript", "batch_size", "past_sequence_length",
            "torch_latency", "ort_latency", "ort_io_latency"
        ]
        csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
        csv_writer.writeheader()

        for batch_size in args.batch_sizes:
            for past_sequence_length in args.past_sequence_lengths:
                logger.debug(
                    f"Running test for batch_size={batch_size} past_sequence_length={past_sequence_length}..."
                )
                dummy_inputs = Gpt2Helper.get_dummy_inputs(
                    batch_size, past_sequence_length, sequence_length,
                    config.num_attention_heads, config.hidden_size,
                    config.n_layer, config.vocab_size, device,
                    args.precision == Precision.FLOAT16)
                output_shapes = Gpt2Helper.get_output_shapes(
                    batch_size, past_sequence_length, sequence_length, config,
                    args.model_class)

                try:
                    outputs, torch_latency = Gpt2Helper.pytorch_inference(
                        model, dummy_inputs, args.test_times)
                    ort_outputs, ort_latency = Gpt2Helper.onnxruntime_inference(
                        session, dummy_inputs, args.test_times)
                    ort_io_outputs, ort_io_latency = Gpt2Helper.onnxruntime_inference_with_binded_io(
                        session,
                        dummy_inputs,
                        output_buffers,
                        output_shapes,
                        args.test_times,
                        return_numpy=False,
                        include_copy_output_latency=args.
                        include_copy_output_latency)

                    if args.validate_onnx:
                        if Gpt2Helper.compare_outputs(
                                outputs,
                                ort_outputs,
                                rtol=DEFAULT_TOLERANCE[args.precision],
                                atol=DEFAULT_TOLERANCE[args.precision]):
                            logger.info(
                                f'Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).'
                            )

                        for i in ort_io_outputs:
                            ort_io_outputs[i] = ort_io_outputs[i].cpu().numpy()
                        if Gpt2Helper.compare_outputs(
                                outputs,
                                ort_io_outputs,
                                rtol=DEFAULT_TOLERANCE[args.precision],
                                atol=DEFAULT_TOLERANCE[args.precision]):
                            logger.info(
                                f'Pytorch and ONNX Runtime IO Binding outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).'
                            )

                    logger.info(
                        f"batch_size={batch_size}, past_sequence_length={past_sequence_length}, torch_latency={torch_latency:.2f}, ort_latency={ort_latency:.2f}, ort_io_latency={ort_io_latency:.2f}"
                    )

                    row = {
                        "model_name": args.model_name,
                        "model_class": args.model_class,
                        "gpu": args.use_gpu,
                        "precision": args.precision,
                        "optimizer": args.optimize_onnx,
                        "torchscript": args.torchscript,
                        "batch_size": batch_size,
                        "past_sequence_length": past_sequence_length,
                        "torch_latency": f"{torch_latency:.2f}",
                        "ort_latency": f"{ort_latency:.2f}",
                        "ort_io_latency": f"{ort_io_latency:.2f}"
                    }
                    csv_writer.writerow(row)
                except:
                    logger.error(f"Exception", exc_info=True)

    logger.info(f"Results are saved to file {csv_filename}")
コード例 #11
0
def main(argv=None,
         experiment_name="",
         run_id=0,
         csv_filename="gpt2_parity_results.csv"):
    result = {}
    from transformers import __version__ as transformers_version
    if version.parse(transformers_version) < version.parse(
            "3.1.0"):  # past_key_values name does not exist in 3.0.2 or older
        raise RuntimeError("This tool requires transformers 3.1.0 or later.")

    args = parse_arguments(argv)
    setup_logger(args.verbose)

    if not experiment_name:
        import sys
        experiment_name = " ".join(argv if argv else sys.argv[1:])

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(
        ".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    if args.use_external_data_format:
        assert not args.output.endswith(
            '.onnx'
        ), "output shall be a directory for --use_external_data_format"

    model_class = MODEL_CLASSES[args.model_class][0]
    use_padding = MODEL_CLASSES[args.model_class][2]

    if args.model_class == "GPT2LMHeadModel_BeamSearchStep":
        model_type = "beam_search_step"
    elif args.model_class == "GPT2LMHeadModel_ConfigurableOneStepSearch":
        model_type = "configurable_one_step_search"
    else:
        model_type = "default"

    gpt2helper = Gpt2HelperFactory.create_helper(model_type)
    gpt2tester = Gpt2TesterFactory.create_tester(model_type)
    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        cache_dir=cache_dir)
    if model_type == 'beam_search_step':
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            batch_size=1,
                                            beam_size=args.beam_size,
                                            cache_dir=cache_dir)
    elif model_type == 'configurable_one_step_search':
        model = model_class.from_pretrained(
            args.model_name_or_path,
            config=config,
            batch_size=1,
            beam_size=args.beam_size,
            ignore_eos=args.ignore_eos,
            temperature=args.temperature,
            repetition_penalty=args.repetition_penalty,
            excluded_token_ids=args.excluded_token_ids,
            length_penalty=args.length_penalty,
            do_sample=args.do_sample,
            do_sample_top_p=args.do_sample_top_p,
            do_sample_top_k=args.do_sample_top_k,
            cache_dir=cache_dir)
    else:
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    if (not args.use_external_data_format) and (config.n_layer > 24):
        logger.info(f"Try --use_external_data_format when model size > 2GB")

    onnx_model_paths = gpt2helper.get_onnx_paths(
        output_dir,
        args.model_name_or_path,
        args.model_class,
        new_folder=args.use_external_data_format,
        remove_existing=[
            "fp32", "fp16", "int8"
        ])  # Do not remove raw model to save time in parity test

    raw_onnx_model = onnx_model_paths["raw"]

    if os.path.exists(raw_onnx_model):
        logger.warning(
            f"Skip exporting ONNX model since it existed: {raw_onnx_model}")
    else:
        logger.info(f"Exporting ONNX model to {raw_onnx_model}")
        gpt2helper.export_onnx(model,
                               device,
                               raw_onnx_model,
                               args.verbose,
                               args.use_external_data_format,
                               has_position_ids=use_padding,
                               has_attention_mask=use_padding,
                               input_ids_dtype=torch.int32
                               if args.use_int32_inputs else torch.int64,
                               position_ids_dtype=torch.int32
                               if args.use_int32_inputs else torch.int64,
                               attention_mask_dtype=torch.int32
                               if args.use_int32_inputs else torch.int64)

    fp16_params = {"keep_io_types": args.keep_io_types}
    if args.io_block_list:
        fp16_params["keep_io_types"] = args.io_block_list
    if args.node_block_list:
        fp16_params["node_block_list"] = args.node_block_list
    if args.op_block_list:
        fp16_params["op_block_list"] = args.op_block_list
    if args.force_fp16_initializers:
        fp16_params["force_fp16_initializers"] = args.force_fp16_initializers

    is_io_float16 = (args.precision == Precision.FLOAT16
                     and not args.keep_io_types)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        output_path = onnx_model_paths[str(args.precision) if args.
                                       precision != Precision.INT8 else 'fp32']

        logger.info(f"Optimizing model to {output_path}")
        gpt2helper.optimize_onnx(
            raw_onnx_model,
            output_path,
            args.precision == Precision.FLOAT16,
            model.config.num_attention_heads,
            model.config.hidden_size,
            args.use_external_data_format,
            auto_mixed_precision=args.auto_mixed_precision,
            **fp16_params)
    else:
        output_path = raw_onnx_model

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path,
                                           onnx_model_paths['int8'],
                                           args.use_external_data_format)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")
        output_path = onnx_model_paths['int8']

    if args.output.endswith(
            '.onnx'
    ) and output_path != args.output and not args.use_external_data_format:
        import shutil
        shutil.move(output_path, args.output)
        output_path = args.output

    logger.info(f"Output path: {output_path}")
    model_size_in_MB = int(
        get_onnx_model_size(output_path, args.use_external_data_format) /
        1024 / 1024)

    session = create_onnxruntime_session(output_path,
                                         args.use_gpu,
                                         enable_all_optimization=True,
                                         verbose=args.verbose)
    if args.model_class == "GPT2LMHeadModel" and session is not None:
        parity_result = gpt2helper.test_parity(
            session,
            model,
            device,
            is_io_float16,
            rtol=args.tolerance,
            atol=args.tolerance,
            model_class=args.model_class,
            has_position_ids=use_padding,
            has_attention_mask=use_padding,
            input_ids_dtype=torch.int32
            if args.use_int32_inputs else torch.int64,
            position_ids_dtype=torch.int32
            if args.use_int32_inputs else torch.int64,
            attention_mask_dtype=torch.int32
            if args.use_int32_inputs else torch.int64,
            test_cases_per_run=args.test_cases,
            total_runs=args.test_runs,
            verbose=args.verbose)

        latency = gpt2helper.test_performance(
            session,
            model,
            device,
            is_io_float16,
            total_runs=100,
            use_io_binding=True,
            model_class=args.model_class,
            has_position_ids=use_padding,
            has_attention_mask=use_padding,
            input_ids_dtype=torch.int32
            if args.use_int32_inputs else torch.int64,
            position_ids_dtype=torch.int32
            if args.use_int32_inputs else torch.int64,
            attention_mask_dtype=torch.int32
            if args.use_int32_inputs else torch.int64,
            batch_size=8,
            sequence_length=1,
            past_sequence_length=32)

        if args.precision == Precision.FLOAT16:
            logger.info(f"fp16 conversion parameters:{fp16_params}")

        # Write results to file
        import csv
        from onnxruntime import __version__ as ort_version
        latency_name = get_latency_name()
        csv_file_existed = os.path.exists(csv_filename)
        with open(csv_filename, mode="a", newline='') as csv_file:
            column_names = [
                "experiment", "run_id", "model_name", "model_class", "gpu",
                "precision", "optimizer", "test_cases", "runs",
                "keep_io_types", "io_block_list", "op_block_list",
                "node_block_list", "force_fp16_initializers",
                "auto_mixed_precision", "ORT_TRANSFORMER_OPTIONS",
                "ORT_CUDA_GEMM_OPTIONS", "onnxruntime", latency_name,
                "top1_match_rate", "onnx_size_in_MB", "diff_50_percentile",
                "diff_90_percentile", "diff_95_percentile",
                "diff_99_percentile", "diff_pass_rate", "nan_rate",
                "top1_match_rate_per_run"
            ]
            csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
            if not csv_file_existed:
                csv_writer.writeheader()
            row = {
                "experiment": experiment_name,
                "run_id": run_id,
                "model_name": args.model_name_or_path,
                "model_class": args.model_class,
                "gpu": args.use_gpu,
                "precision": args.precision,
                "optimizer": args.optimize_onnx,
                "test_cases": args.test_cases,
                "runs": args.test_runs,
                "keep_io_types": args.keep_io_types,
                "io_block_list": args.io_block_list,
                "op_block_list": args.op_block_list,
                "node_block_list": args.node_block_list,
                "force_fp16_initializers": args.force_fp16_initializers,
                "auto_mixed_precision": args.auto_mixed_precision,
                "ORT_TRANSFORMER_OPTIONS":
                os.getenv('ORT_TRANSFORMER_OPTIONS'),
                "ORT_CUDA_GEMM_OPTIONS": os.getenv('ORT_CUDA_GEMM_OPTIONS'),
                "onnxruntime": ort_version,
                latency_name: f"{latency:.2f}",
                "diff_50_percentile": parity_result["max_diff_percentile_50"],
                "diff_90_percentile": parity_result["max_diff_percentile_90"],
                "diff_95_percentile": parity_result["max_diff_percentile_95"],
                "diff_99_percentile": parity_result["max_diff_percentile_99"],
                "diff_pass_rate": parity_result["diff_pass_rate"],
                "nan_rate": parity_result["nan_rate"],
                "top1_match_rate": parity_result["top1_match_rate"],
                "top1_match_rate_per_run":
                parity_result["top1_match_rate_per_run"],
                "onnx_size_in_MB": "{}".format(model_size_in_MB),
            }
            logger.info(f"result: {row}")
            result.update(row)
            csv_writer.writerow(row)

    if args.input_test_file:
        test_inputs = []
        # Each line of test file is a JSON string like:
        # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]}
        with open(args.input_test_file) as read_f:
            for _, line in enumerate(read_f):
                line = line.rstrip()
                data = json.loads(line)
                input_ids = torch.from_numpy(
                    numpy.asarray(data["input_ids"],
                                  dtype=numpy.int64)).to(device)

                if use_padding:
                    if "attention_mask" in data:
                        numpy_float = numpy.float16 if is_io_float16 else numpy.float32
                        attention_mask = torch.from_numpy(
                            numpy.asarray(data["attention_mask"],
                                          dtype=numpy_float)).to(device)
                    else:
                        padding = -1
                        attention_mask = (input_ids != padding).type(
                            torch.float16 if is_io_float16 else torch.float32)
                        input_ids.masked_fill_(input_ids == padding, 0)

                    if "position_ids" in data:
                        position_ids = torch.from_numpy(
                            numpy.asarray(data["position_ids"],
                                          dtype=numpy.int64)).to(device)
                    else:
                        position_ids = (attention_mask.long().cumsum(-1) - 1)
                        position_ids.masked_fill_(position_ids < 0, 0)

                    inputs = {
                        "input_ids":
                        input_ids.to(torch.int32)
                        if args.use_int32_inputs else input_ids,
                        "position_ids":
                        position_ids.to(torch.int32)
                        if args.use_int32_inputs else position_ids,
                        "attention_mask":
                        attention_mask.to(torch.int32)
                        if args.use_int32_inputs else attention_mask
                    }
                else:
                    inputs = {
                        "input_ids":
                        input_ids.to(torch.int32)
                        if args.use_int32_inputs else input_ids
                    }

                if model_type == "beam_search_step" or model_type == "configurable_one_step_search":
                    beam_select_idx = torch.zeros([1,
                                                   input_ids.shape[0]]).long()

                    input_log_probs = torch.zeros([input_ids.shape[0], 1])
                    input_unfinished_sents = torch.ones(
                        [input_ids.shape[0], 1], dtype=torch.bool)
                    inputs.update({
                        "beam_select_idx":
                        beam_select_idx,
                        "input_log_probs":
                        input_log_probs,
                        "input_unfinished_sents":
                        input_unfinished_sents,
                    })

                test_inputs.append(inputs)

        gpt2tester.test_generation(session,
                                   model,
                                   device,
                                   test_inputs,
                                   precision=args.precision,
                                   model_class=args.model_class,
                                   top_k=20,
                                   top_k_no_order=True,
                                   max_steps=24,
                                   max_inputs=0,
                                   verbose=args.verbose,
                                   save_test_data=3,
                                   save_test_data_dir=Path(output_path).parent)

    logger.info(f"Done. Output model: {output_path}")
    return result
コード例 #12
0
def main():
    args = parse_arguments()
    setup_logger(args.verbose)

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(
        ".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    model_class = MODEL_CLASSES[args.model_class][0]
    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        cache_dir=cache_dir)
    model = model_class.from_pretrained(args.model_name_or_path,
                                        config=config,
                                        cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    use_external_data_format = (config.n_layer > 24
                                )  #TODO: find a way to check model size > 2GB
    onnx_model_paths = Gpt2Helper.get_onnx_paths(
        output_dir,
        args.model_name_or_path,
        args.model_class,
        new_folder=use_external_data_format)
    raw_onnx_model = args.output if args.output.endswith(
        '.onnx') else onnx_model_paths["raw"]
    output_path = raw_onnx_model if (
        args.output.endswith('.onnx') or
        (args.precision == Precision.FLOAT32 and not args.optimize_onnx)
    ) else onnx_model_paths[str(args.precision)]

    logger.info(f"Exporting ONNX model to {raw_onnx_model}")
    use_padding = MODEL_CLASSES[args.model_class][2]
    Gpt2Helper.export_onnx(model,
                           device,
                           raw_onnx_model,
                           args.verbose,
                           use_external_data_format,
                           has_position_ids=use_padding,
                           has_attention_mask=use_padding)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        logger.info(f"Optimizing model to {output_path}")
        Gpt2Helper.optimize_onnx(raw_onnx_model, output_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size)

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path, output_path)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")

    session = create_onnxruntime_session(output_path,
                                         args.use_gpu,
                                         enable_all_optimization=True,
                                         verbose=args.verbose)
    if session is not None:
        Gpt2Helper.test_parity(session,
                               model,
                               device,
                               args.precision == Precision.FLOAT16,
                               rtol=args.tolerance,
                               atol=args.tolerance,
                               model_class=args.model_class,
                               has_position_ids=use_padding,
                               has_attention_mask=use_padding)

    if args.input_test_file:
        test_inputs = []
        # Each line of test file is a JSON string like:
        # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]}
        with open(args.input_test_file) as read_f:
            for i, line in enumerate(read_f):
                line = line.rstrip()
                data = json.loads(line)
                input_ids = torch.from_numpy(
                    numpy.asarray(data["input_ids"],
                                  dtype=numpy.int64)).to(device)

                if use_padding:
                    if "attention_mask" in data:
                        numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32
                        attention_mask = torch.from_numpy(
                            numpy.asarray(data["attention_mask"],
                                          dtype=numpy_float)).to(device)
                    else:
                        padding = -1
                        attention_mask = (
                            input_ids !=
                            padding).type(torch.float16 if args.precision ==
                                          Precision.FLOAT16 else torch.float32)
                        input_ids.masked_fill_(input_ids == padding, 0)

                    if "position_ids" in data:
                        position_ids = torch.from_numpy(
                            numpy.asarray(data["position_ids"],
                                          dtype=numpy.int64)).to(device)
                    else:
                        position_ids = (attention_mask.long().cumsum(-1) - 1)
                        position_ids.masked_fill_(position_ids < 0, 0)

                    inputs = {
                        "input_ids": input_ids,
                        "position_ids": position_ids,
                        "attention_mask": attention_mask
                    }
                else:
                    inputs = {"input_ids": input_ids}

                test_inputs.append(inputs)

        Gpt2Tester.test_generation(session,
                                   model,
                                   device,
                                   test_inputs,
                                   precision=args.precision,
                                   model_class=args.model_class,
                                   top_k=20,
                                   top_k_no_order=True,
                                   max_steps=24,
                                   max_inputs=0,
                                   verbose=args.verbose)

    logger.info(f"Done. Output model: {output_path}")