def validate_and_optimize_onnx(model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx, use_raw_attention_mask, overwrite, config, model_fusion_statistics, onnx_model_path, example_inputs, example_outputs_flatten): is_valid_onnx_model = True if validate_onnx: is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu, False) if optimize_onnx or precision == Precision.FLOAT16 or precision == Precision.INT8: # Use script (optimizer.py) to optimize optimized_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), True, use_gpu, precision, False, use_external_data_format) optimize_onnx_model(onnx_model_path, optimized_model_path, model_type, config.num_attention_heads, config.hidden_size, use_gpu, precision, use_raw_attention_mask, overwrite, model_fusion_statistics) onnx_model_path = optimized_model_path if validate_onnx: is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu, precision == Precision.FLOAT16) if precision == Precision.INT8: logger.info(f"Quantizing model: {onnx_model_path}") QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_path) logger.info(f"Finished quantizing model: {onnx_model_path}") else: # Use OnnxRuntime to optimize if is_valid_onnx_model: ort_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, True, use_external_data_format) optimize_onnx_model_by_ort(onnx_model_path, ort_model_path, use_gpu, overwrite, model_fusion_statistics) return onnx_model_path, is_valid_onnx_model, config.vocab_size
def main(args): logger.info(f"Arguments:{args}") if args.precision == Precision.FLOAT16: assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" torch.set_num_threads( psutil.cpu_count( logical=True) if args.thread_num <= 0 else args.thread_num) print(torch.__config__.parallel_info()) cache_dir = args.cache_dir output_dir = args.onnx_dir prepare_environment(cache_dir, output_dir, args.use_gpu) model_class = MODEL_CLASSES[args.model_class][0] config = AutoConfig.from_pretrained(args.model_name_or_path, torchscript=args.torchscript, cache_dir=cache_dir) model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) # This scirpt does not support float16 for PyTorch. #if args.float16: # model.half() device = torch.device("cuda:0" if args.use_gpu else "cpu") model.to(device) use_external_data_format = (config.n_layer > 24 ) #TODO: find a way to check model size > 2GB onnx_model_paths = Gpt2Helper.get_onnx_paths( output_dir, args.model_name_or_path, args.model_class, has_past=True, new_folder=use_external_data_format) onnx_model_path = onnx_model_paths["raw"] use_padding = MODEL_CLASSES[args.model_class][2] Gpt2Helper.export_onnx(model, device, onnx_model_path, args.verbose, use_external_data_format, has_position_ids=use_padding, has_attention_mask=use_padding) if args.optimize_onnx or args.precision != Precision.FLOAT32: onnx_model_path = onnx_model_paths[str( args.precision) if args.precision != Precision.INT8 else 'fp32'] Gpt2Helper.optimize_onnx(onnx_model_paths["raw"], onnx_model_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size, use_external_data_format) if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_paths["int8"], use_external_data_format) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") onnx_model_path = onnx_model_paths["int8"] if args.torchscript: model = Gpt2Helper.torchscript(model, config, device, has_position_ids=use_padding, has_attention_mask=use_padding) session = create_onnxruntime_session(onnx_model_path, args.use_gpu, enable_all_optimization=False, num_threads=args.thread_num, verbose=args.verbose) if session is None: return # Allocate output buffers for IO Binding max_output_shapes = Gpt2Helper.get_output_shapes( max(args.batch_sizes), max(args.past_sequence_lengths), max(args.sequence_lengths), config, args.model_class) output_buffers = Gpt2Helper.get_output_buffers( max_output_shapes, device, args.precision == Precision.FLOAT16) csv_filename = args.result_csv or "benchmark_result_{}.csv".format( datetime.now().strftime("%Y%m%d-%H%M%S")) with open(csv_filename, mode="a", newline='') as csv_file: column_names = [ "model_name", "model_class", "gpu", "precision", "optimizer", "torchscript", "batch_size", "sequence_length", "past_sequence_length", "torch_latency", "onnxruntime_latency", "onnxruntime_io_binding_latency" ] csv_writer = csv.DictWriter(csv_file, fieldnames=column_names) csv_writer.writeheader() for batch_size in args.batch_sizes: for sequence_length in args.sequence_lengths: for past_sequence_length in args.past_sequence_lengths: assert batch_size > 0 and sequence_length > 0 and past_sequence_length >= 0 logger.debug( f"Running test for batch_size={batch_size} sequence_length={sequence_length} past_sequence_length={past_sequence_length}..." ) dummy_inputs = Gpt2Helper.get_dummy_inputs( batch_size, past_sequence_length, sequence_length, config.num_attention_heads, config.hidden_size, config.n_layer, config.vocab_size, device, float16=(args.precision == Precision.FLOAT16), has_position_ids=use_padding, has_attention_mask=use_padding) output_shapes = Gpt2Helper.get_output_shapes( batch_size, past_sequence_length, sequence_length, config, args.model_class) try: outputs, torch_latency = Gpt2Helper.pytorch_inference( model, dummy_inputs, args.test_times) ort_outputs, ort_latency = Gpt2Helper.onnxruntime_inference( session, dummy_inputs, args.test_times) ort_io_outputs, ort_io_latency = Gpt2Helper.onnxruntime_inference_with_binded_io( session, dummy_inputs, output_buffers, output_shapes, args.test_times, return_numpy=False, include_copy_output_latency=args. include_copy_output_latency) if args.validate_onnx: if Gpt2Helper.compare_outputs( outputs, ort_outputs, rtol=DEFAULT_TOLERANCE[args.precision], atol=DEFAULT_TOLERANCE[args.precision]): logger.info( f'Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).' ) # Results of IO binding might be in GPU. Copy outputs to CPU for comparison. copy_outputs = [] for output in ort_io_outputs: copy_outputs.append(output.cpu().numpy()) if Gpt2Helper.compare_outputs( outputs, copy_outputs, rtol=DEFAULT_TOLERANCE[args.precision], atol=DEFAULT_TOLERANCE[args.precision]): logger.info( f'Pytorch and ONNX Runtime IO Binding outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).' ) logger.info( f"batch_size={batch_size}, sequence_length={sequence_length}, past_sequence_length={past_sequence_length}, torch_latency={torch_latency:.2f}, onnxruntime_latency={ort_latency:.2f}, onnxruntime_io_binding_latency={ort_io_latency:.2f}" ) row = { "model_name": args.model_name_or_path, "model_class": args.model_class, "gpu": args.use_gpu, "precision": args.precision, "optimizer": args.optimize_onnx, "torchscript": args.torchscript, "batch_size": batch_size, "sequence_length": sequence_length, "past_sequence_length": past_sequence_length, "torch_latency": f"{torch_latency:.2f}", "onnxruntime_latency": f"{ort_latency:.2f}", "onnxruntime_io_binding_latency": f"{ort_io_latency:.2f}" } csv_writer.writerow(row) except: logger.error(f"Exception", exc_info=True) logger.info(f"Results are saved to file {csv_filename}") return csv_filename
def export_onnx_model(model_name, opset_version, use_external_data_format, model_type, model_class, cache_dir, onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx, use_raw_attention_mask, overwrite, model_fusion_statistics): config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir) if hasattr(config, 'return_dict'): config.return_dict = False model = load_pretrained_model(model_name, config=config, cache_dir=cache_dir, custom_model_class=model_class) model.cpu() tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir) example_inputs = tokenizer.encode_plus("This is a sample input", return_tensors="pt") example_inputs = filter_inputs(example_inputs, input_names) example_outputs = model(**example_inputs) assert isinstance( example_outputs, (list, tuple )), f"type of output is not list or tuple: {type(example_outputs)}" # Flatten is needed for gpt2 and distilgpt2. example_outputs_flatten = flatten(example_outputs) example_outputs_flatten = update_flatten_list(example_outputs_flatten, []) onnx_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, False, use_external_data_format) if overwrite or not os.path.exists(onnx_model_path): logger.info("Exporting ONNX model to {}".format(onnx_model_path)) dynamic_axes, output_names = build_dynamic_axes( example_inputs, example_outputs_flatten) replace_torch_functions() torch.onnx.export(model=model, args=tuple(example_inputs.values()), f=onnx_model_path, input_names=list(example_inputs.keys()), output_names=output_names, example_outputs=example_outputs, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset_version, use_external_data_format=use_external_data_format) restore_torch_functions() else: logger.info(f"Skip export since model existed: {onnx_model_path}") is_valid_onnx_model = True if validate_onnx: is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu, False) if optimize_onnx or precision == Precision.FLOAT16 or precision == Precision.INT8: # Use script (optimizer.py) to optimize optimized_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), True, use_gpu, precision, False, use_external_data_format) optimize_onnx_model(onnx_model_path, optimized_model_path, model_type, config.num_attention_heads, config.hidden_size, use_gpu, precision, use_raw_attention_mask, overwrite, model_fusion_statistics) onnx_model_path = optimized_model_path if validate_onnx: is_valid_onnx_model = validate_onnx_model( onnx_model_path, example_inputs, example_outputs_flatten, use_gpu, precision == Precision.FLOAT16) if precision == Precision.INT8: logger.info(f"Quantizing model: {onnx_model_path}") QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_path) logger.info(f"Finished quantizing model: {onnx_model_path}") else: # Use OnnxRuntime to optimize if is_valid_onnx_model: ort_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, True, use_external_data_format) optimize_onnx_model_by_ort(onnx_model_path, ort_model_path, use_gpu, overwrite, model_fusion_statistics) max_input_size = tokenizer.max_model_input_sizes[ model_name] if model_name in tokenizer.max_model_input_sizes else 1024 return onnx_model_path, is_valid_onnx_model, config.vocab_size, max_input_size
def main(): from transformers import __version__ as transformers_version if version.parse(transformers_version) < version.parse( "3.1.0"): # past_key_values name does not exist in 3.0.2 or older raise RuntimeError("This tool requires transformers 3.1.0 or later.") args = parse_arguments() setup_logger(args.verbose) if args.tolerance == 0: args.tolerance = DEFAULT_TOLERANCE[args.precision] logger.info(f"Arguments:{args}") cache_dir = args.cache_dir output_dir = args.output if not args.output.endswith(".onnx") else os.path.dirname(args.output) prepare_environment(cache_dir, output_dir, args.use_gpu) if args.precision != Precision.FLOAT32: assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx" if args.precision == Precision.FLOAT16: assert args.use_gpu, "fp16 requires --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" if args.use_external_data_format: assert not args.output.endswith('.onnx'), "output shall be a directory for --use_external_data_format" model_class = MODEL_CLASSES[args.model_class][0] if args.model_class == "GPT2LMHeadModel_BeamSearchStep": model_type = "beam_search_step" elif args.model_class == "GPT2LMHeadModel_ConfigurableOneStepSearch": model_type = "configurable_one_step_search" else: model_type = "default" gpt2helper = Gpt2HelperFactory.create_helper(model_type) gpt2tester = Gpt2TesterFactory.create_tester(model_type) config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=cache_dir) if model_type == 'beam_search_step': model = model_class.from_pretrained(args.model_name_or_path, config=config, batch_size=1, beam_size=args.beam_size, cache_dir=cache_dir) elif model_type == 'configurable_one_step_search': model = model_class.from_pretrained(args.model_name_or_path, config=config, batch_size=1, beam_size=args.beam_size, ignore_eos=args.ignore_eos, temperature=args.temperature, repetition_penalty=args.repetition_penalty, excluded_token_ids=args.excluded_token_ids, length_penalty=args.length_penalty, do_sample=args.do_sample, do_sample_top_p=args.do_sample_top_p, do_sample_top_k=args.do_sample_top_k, cache_dir=cache_dir) else: model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) device = torch.device("cuda:0" if args.use_gpu else "cpu") model.eval().to(device) if (not args.use_external_data_format) and (config.n_layer > 24): logger.info(f"Try --use_external_data_format when model size > 2GB") onnx_model_paths = gpt2helper.get_onnx_paths(output_dir, args.model_name_or_path, args.model_class, new_folder=args.use_external_data_format) raw_onnx_model = onnx_model_paths["raw"] logger.info(f"Exporting ONNX model to {raw_onnx_model}") use_padding = MODEL_CLASSES[args.model_class][2] gpt2helper.export_onnx(model, device, raw_onnx_model, args.verbose, args.use_external_data_format, has_position_ids=use_padding, has_attention_mask=use_padding) if args.optimize_onnx or args.precision != Precision.FLOAT32: output_path = onnx_model_paths[str(args.precision) if args.precision != Precision.INT8 else 'fp32'] logger.info(f"Optimizing model to {output_path}") gpt2helper.optimize_onnx(raw_onnx_model, output_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size, args.use_external_data_format) else: output_path = raw_onnx_model if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(output_path, onnx_model_paths['int8'], args.use_external_data_format) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") output_path = onnx_model_paths['int8'] if args.output.endswith('.onnx') and output_path != args.output and not args.use_external_data_format: import shutil shutil.move(output_path, args.output) output_path = args.output logger.info(f"Output path: {output_path}") session = create_onnxruntime_session(output_path, args.use_gpu, enable_all_optimization=True, verbose=args.verbose) if session is not None: gpt2helper.test_parity(session, model, device, args.precision == Precision.FLOAT16, rtol=args.tolerance, atol=args.tolerance, model_class=args.model_class, has_position_ids=use_padding, has_attention_mask=use_padding) if args.input_test_file: test_inputs = [] # Each line of test file is a JSON string like: # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]} with open(args.input_test_file) as read_f: for _, line in enumerate(read_f): line = line.rstrip() data = json.loads(line) input_ids = torch.from_numpy(numpy.asarray(data["input_ids"], dtype=numpy.int64)).to(device) if use_padding: if "attention_mask" in data: numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32 attention_mask = torch.from_numpy(numpy.asarray(data["attention_mask"], dtype=numpy_float)).to(device) else: padding = -1 attention_mask = ( input_ids != padding).type(torch.float16 if args.precision == Precision.FLOAT16 else torch.float32) input_ids.masked_fill_(input_ids == padding, 0) if "position_ids" in data: position_ids = torch.from_numpy(numpy.asarray(data["position_ids"], dtype=numpy.int64)).to(device) else: position_ids = (attention_mask.long().cumsum(-1) - 1) position_ids.masked_fill_(position_ids < 0, 0) inputs = {"input_ids": input_ids, "position_ids": position_ids, "attention_mask": attention_mask} else: inputs = {"input_ids": input_ids} if model_type == "beam_search_step" or model_type == "configurable_one_step_search": beam_select_idx = torch.zeros([1, input_ids.shape[0]]).long() input_log_probs = torch.zeros([input_ids.shape[0], 1]) input_unfinished_sents = torch.ones([input_ids.shape[0], 1], dtype=torch.bool) inputs.update({ "beam_select_idx": beam_select_idx, "input_log_probs": input_log_probs, "input_unfinished_sents": input_unfinished_sents, }) test_inputs.append(inputs) gpt2tester.test_generation(session, model, device, test_inputs, precision=args.precision, model_class=args.model_class, top_k=20, top_k_no_order=True, max_steps=24, max_inputs=0, verbose=args.verbose, save_test_data=3, save_test_data_dir=Path(output_path).parent) logger.info(f"Done. Output model: {output_path}")
def main(): args = parse_arguments() setup_logger(args.verbose) if args.tolerance == 0: args.tolerance = DEFAULT_TOLERANCE[args.precision] logger.info(f"Arguments:{args}") cache_dir = args.cache_dir output_dir = args.output if not args.output.endswith( ".onnx") else os.path.dirname(args.output) prepare_environment(cache_dir, output_dir, args.use_gpu) if args.precision != Precision.FLOAT32: assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx" if args.precision == Precision.FLOAT16: assert args.use_gpu, "fp16 requires --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" model_class = MODEL_CLASSES[args.model_class][0] config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=cache_dir) if hasattr(config, 'return_tuple'): config.return_tuple = True model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) device = torch.device("cuda:0" if args.use_gpu else "cpu") model.eval().to(device) onnx_model_paths = Gpt2Helper.get_onnx_paths(output_dir, args.model_name_or_path, args.model_class) raw_onnx_model = args.output if args.output.endswith( '.onnx') else onnx_model_paths["raw"] output_path = raw_onnx_model if ( args.output.endswith('.onnx') or (args.precision == Precision.FLOAT32 and not args.optimize_onnx) ) else onnx_model_paths[str(args.precision)] Gpt2Helper.export_onnx(model, device, raw_onnx_model, args.verbose) if args.optimize_onnx or args.precision != Precision.FLOAT32: Gpt2Helper.optimize_onnx(raw_onnx_model, output_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size) if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(output_path, output_path) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") session = create_onnxruntime_session(output_path, args.use_gpu, enable_all_optimization=False, verbose=args.verbose) if session is not None: Gpt2Helper.test_parity(session, model, device, args.precision == Precision.FLOAT16, rtol=args.tolerance, atol=args.tolerance, model_class=args.model_class) logger.info(f"Done. Output model: {output_path}")
def main(): args = parse_arguments() setup_logger(args.verbose) if args.tolerance == 0: args.tolerance = DEFAULT_TOLERANCE[args.precision] logger.info(f"Arguments:{args}") cache_dir = args.cache_dir output_dir = args.output if not args.output.endswith( ".onnx") else os.path.dirname(args.output) prepare_environment(cache_dir, output_dir, args.use_gpu) if args.precision != Precision.FLOAT32: assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx" if args.precision == Precision.FLOAT16: assert args.use_gpu, "fp16 requires --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" model_class = MODEL_CLASSES[args.model_class][0] config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=cache_dir) if hasattr(config, 'return_tuple'): config.return_tuple = True model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) device = torch.device("cuda:0" if args.use_gpu else "cpu") model.eval().to(device) onnx_model_paths = Gpt2Helper.get_onnx_paths(output_dir, args.model_name_or_path, args.model_class) raw_onnx_model = args.output if args.output.endswith( '.onnx') else onnx_model_paths["raw"] output_path = raw_onnx_model if ( args.output.endswith('.onnx') or (args.precision == Precision.FLOAT32 and not args.optimize_onnx) ) else onnx_model_paths[str(args.precision)] Gpt2Helper.export_onnx(model, device, raw_onnx_model, args.verbose) if args.optimize_onnx or args.precision != Precision.FLOAT32: Gpt2Helper.optimize_onnx(raw_onnx_model, output_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size) if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(output_path, output_path) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") session = create_onnxruntime_session(output_path, args.use_gpu, enable_all_optimization=False, verbose=args.verbose) if session is not None: Gpt2Helper.test_parity(session, model, device, args.precision == Precision.FLOAT16, rtol=args.tolerance, atol=args.tolerance, model_class=args.model_class) if args.input_test_file: test_inputs = [] with open(args.input_test_file) as read_f: for i, line in enumerate(read_f): line = line.rstrip() data = json.loads(line) input_ids = torch.from_numpy( numpy.asarray(data["input_ids"], dtype=numpy.int64)).to(device) position_ids = torch.from_numpy( numpy.asarray(data["position_ids"], dtype=numpy.int64)).to(device) numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32 attention_mask = torch.from_numpy( numpy.asarray(data["attention_mask"], dtype=numpy_float)).to(device) inputs = { "input_ids": input_ids, "position_ids": position_ids, "attention_mask": attention_mask } test_inputs.append(inputs) Gpt2Tester.test_generation(session, model, device, test_inputs, precision=args.precision, model_class=args.model_class, top_k=20, top_k_no_order=True, max_steps=24, max_inputs=0, verbose=args.verbose) logger.info(f"Done. Output model: {output_path}")
def main(argv=None, experiment_name="", run_id=0, csv_filename="gpt2_parity_results.csv"): result = {} from transformers import __version__ as transformers_version if version.parse(transformers_version) < version.parse( "3.1.0"): # past_key_values name does not exist in 3.0.2 or older raise RuntimeError("This tool requires transformers 3.1.0 or later.") args = parse_arguments(argv) setup_logger(args.verbose) if not experiment_name: import sys experiment_name = " ".join(argv if argv else sys.argv[1:]) if args.tolerance == 0: args.tolerance = DEFAULT_TOLERANCE[args.precision] logger.info(f"Arguments:{args}") cache_dir = args.cache_dir output_dir = args.output if not args.output.endswith( ".onnx") else os.path.dirname(args.output) prepare_environment(cache_dir, output_dir, args.use_gpu) if args.precision != Precision.FLOAT32: assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx" if args.precision == Precision.FLOAT16: assert args.use_gpu, "fp16 requires --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" if args.use_external_data_format: assert not args.output.endswith( '.onnx' ), "output shall be a directory for --use_external_data_format" model_class = MODEL_CLASSES[args.model_class][0] use_padding = MODEL_CLASSES[args.model_class][2] if args.model_class == "GPT2LMHeadModel_BeamSearchStep": model_type = "beam_search_step" elif args.model_class == "GPT2LMHeadModel_ConfigurableOneStepSearch": model_type = "configurable_one_step_search" else: model_type = "default" gpt2helper = Gpt2HelperFactory.create_helper(model_type) gpt2tester = Gpt2TesterFactory.create_tester(model_type) config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=cache_dir) if model_type == 'beam_search_step': model = model_class.from_pretrained(args.model_name_or_path, config=config, batch_size=1, beam_size=args.beam_size, cache_dir=cache_dir) elif model_type == 'configurable_one_step_search': model = model_class.from_pretrained( args.model_name_or_path, config=config, batch_size=1, beam_size=args.beam_size, ignore_eos=args.ignore_eos, temperature=args.temperature, repetition_penalty=args.repetition_penalty, excluded_token_ids=args.excluded_token_ids, length_penalty=args.length_penalty, do_sample=args.do_sample, do_sample_top_p=args.do_sample_top_p, do_sample_top_k=args.do_sample_top_k, cache_dir=cache_dir) else: model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) device = torch.device("cuda:0" if args.use_gpu else "cpu") model.eval().to(device) if (not args.use_external_data_format) and (config.n_layer > 24): logger.info(f"Try --use_external_data_format when model size > 2GB") onnx_model_paths = gpt2helper.get_onnx_paths( output_dir, args.model_name_or_path, args.model_class, new_folder=args.use_external_data_format, remove_existing=[ "fp32", "fp16", "int8" ]) # Do not remove raw model to save time in parity test raw_onnx_model = onnx_model_paths["raw"] if os.path.exists(raw_onnx_model): logger.warning( f"Skip exporting ONNX model since it existed: {raw_onnx_model}") else: logger.info(f"Exporting ONNX model to {raw_onnx_model}") gpt2helper.export_onnx(model, device, raw_onnx_model, args.verbose, args.use_external_data_format, has_position_ids=use_padding, has_attention_mask=use_padding, input_ids_dtype=torch.int32 if args.use_int32_inputs else torch.int64, position_ids_dtype=torch.int32 if args.use_int32_inputs else torch.int64, attention_mask_dtype=torch.int32 if args.use_int32_inputs else torch.int64) fp16_params = {"keep_io_types": args.keep_io_types} if args.io_block_list: fp16_params["keep_io_types"] = args.io_block_list if args.node_block_list: fp16_params["node_block_list"] = args.node_block_list if args.op_block_list: fp16_params["op_block_list"] = args.op_block_list if args.force_fp16_initializers: fp16_params["force_fp16_initializers"] = args.force_fp16_initializers is_io_float16 = (args.precision == Precision.FLOAT16 and not args.keep_io_types) if args.optimize_onnx or args.precision != Precision.FLOAT32: output_path = onnx_model_paths[str(args.precision) if args. precision != Precision.INT8 else 'fp32'] logger.info(f"Optimizing model to {output_path}") gpt2helper.optimize_onnx( raw_onnx_model, output_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size, args.use_external_data_format, auto_mixed_precision=args.auto_mixed_precision, **fp16_params) else: output_path = raw_onnx_model if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(output_path, onnx_model_paths['int8'], args.use_external_data_format) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") output_path = onnx_model_paths['int8'] if args.output.endswith( '.onnx' ) and output_path != args.output and not args.use_external_data_format: import shutil shutil.move(output_path, args.output) output_path = args.output logger.info(f"Output path: {output_path}") model_size_in_MB = int( get_onnx_model_size(output_path, args.use_external_data_format) / 1024 / 1024) session = create_onnxruntime_session(output_path, args.use_gpu, enable_all_optimization=True, verbose=args.verbose) if args.model_class == "GPT2LMHeadModel" and session is not None: parity_result = gpt2helper.test_parity( session, model, device, is_io_float16, rtol=args.tolerance, atol=args.tolerance, model_class=args.model_class, has_position_ids=use_padding, has_attention_mask=use_padding, input_ids_dtype=torch.int32 if args.use_int32_inputs else torch.int64, position_ids_dtype=torch.int32 if args.use_int32_inputs else torch.int64, attention_mask_dtype=torch.int32 if args.use_int32_inputs else torch.int64, test_cases_per_run=args.test_cases, total_runs=args.test_runs, verbose=args.verbose) latency = gpt2helper.test_performance( session, model, device, is_io_float16, total_runs=100, use_io_binding=True, model_class=args.model_class, has_position_ids=use_padding, has_attention_mask=use_padding, input_ids_dtype=torch.int32 if args.use_int32_inputs else torch.int64, position_ids_dtype=torch.int32 if args.use_int32_inputs else torch.int64, attention_mask_dtype=torch.int32 if args.use_int32_inputs else torch.int64, batch_size=8, sequence_length=1, past_sequence_length=32) if args.precision == Precision.FLOAT16: logger.info(f"fp16 conversion parameters:{fp16_params}") # Write results to file import csv from onnxruntime import __version__ as ort_version latency_name = get_latency_name() csv_file_existed = os.path.exists(csv_filename) with open(csv_filename, mode="a", newline='') as csv_file: column_names = [ "experiment", "run_id", "model_name", "model_class", "gpu", "precision", "optimizer", "test_cases", "runs", "keep_io_types", "io_block_list", "op_block_list", "node_block_list", "force_fp16_initializers", "auto_mixed_precision", "ORT_TRANSFORMER_OPTIONS", "ORT_CUDA_GEMM_OPTIONS", "onnxruntime", latency_name, "top1_match_rate", "onnx_size_in_MB", "diff_50_percentile", "diff_90_percentile", "diff_95_percentile", "diff_99_percentile", "diff_pass_rate", "nan_rate", "top1_match_rate_per_run" ] csv_writer = csv.DictWriter(csv_file, fieldnames=column_names) if not csv_file_existed: csv_writer.writeheader() row = { "experiment": experiment_name, "run_id": run_id, "model_name": args.model_name_or_path, "model_class": args.model_class, "gpu": args.use_gpu, "precision": args.precision, "optimizer": args.optimize_onnx, "test_cases": args.test_cases, "runs": args.test_runs, "keep_io_types": args.keep_io_types, "io_block_list": args.io_block_list, "op_block_list": args.op_block_list, "node_block_list": args.node_block_list, "force_fp16_initializers": args.force_fp16_initializers, "auto_mixed_precision": args.auto_mixed_precision, "ORT_TRANSFORMER_OPTIONS": os.getenv('ORT_TRANSFORMER_OPTIONS'), "ORT_CUDA_GEMM_OPTIONS": os.getenv('ORT_CUDA_GEMM_OPTIONS'), "onnxruntime": ort_version, latency_name: f"{latency:.2f}", "diff_50_percentile": parity_result["max_diff_percentile_50"], "diff_90_percentile": parity_result["max_diff_percentile_90"], "diff_95_percentile": parity_result["max_diff_percentile_95"], "diff_99_percentile": parity_result["max_diff_percentile_99"], "diff_pass_rate": parity_result["diff_pass_rate"], "nan_rate": parity_result["nan_rate"], "top1_match_rate": parity_result["top1_match_rate"], "top1_match_rate_per_run": parity_result["top1_match_rate_per_run"], "onnx_size_in_MB": "{}".format(model_size_in_MB), } logger.info(f"result: {row}") result.update(row) csv_writer.writerow(row) if args.input_test_file: test_inputs = [] # Each line of test file is a JSON string like: # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]} with open(args.input_test_file) as read_f: for _, line in enumerate(read_f): line = line.rstrip() data = json.loads(line) input_ids = torch.from_numpy( numpy.asarray(data["input_ids"], dtype=numpy.int64)).to(device) if use_padding: if "attention_mask" in data: numpy_float = numpy.float16 if is_io_float16 else numpy.float32 attention_mask = torch.from_numpy( numpy.asarray(data["attention_mask"], dtype=numpy_float)).to(device) else: padding = -1 attention_mask = (input_ids != padding).type( torch.float16 if is_io_float16 else torch.float32) input_ids.masked_fill_(input_ids == padding, 0) if "position_ids" in data: position_ids = torch.from_numpy( numpy.asarray(data["position_ids"], dtype=numpy.int64)).to(device) else: position_ids = (attention_mask.long().cumsum(-1) - 1) position_ids.masked_fill_(position_ids < 0, 0) inputs = { "input_ids": input_ids.to(torch.int32) if args.use_int32_inputs else input_ids, "position_ids": position_ids.to(torch.int32) if args.use_int32_inputs else position_ids, "attention_mask": attention_mask.to(torch.int32) if args.use_int32_inputs else attention_mask } else: inputs = { "input_ids": input_ids.to(torch.int32) if args.use_int32_inputs else input_ids } if model_type == "beam_search_step" or model_type == "configurable_one_step_search": beam_select_idx = torch.zeros([1, input_ids.shape[0]]).long() input_log_probs = torch.zeros([input_ids.shape[0], 1]) input_unfinished_sents = torch.ones( [input_ids.shape[0], 1], dtype=torch.bool) inputs.update({ "beam_select_idx": beam_select_idx, "input_log_probs": input_log_probs, "input_unfinished_sents": input_unfinished_sents, }) test_inputs.append(inputs) gpt2tester.test_generation(session, model, device, test_inputs, precision=args.precision, model_class=args.model_class, top_k=20, top_k_no_order=True, max_steps=24, max_inputs=0, verbose=args.verbose, save_test_data=3, save_test_data_dir=Path(output_path).parent) logger.info(f"Done. Output model: {output_path}") return result
def main(): args = parse_arguments() setup_logger(args.verbose) logger.info(f"Arguments:{args}") if args.precision == Precision.FLOAT16: assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" torch.set_num_threads( psutil.cpu_count( logical=True) if args.thread_num <= 0 else args.thread_num) print(torch.__config__.parallel_info()) cache_dir = args.cache_dir output_dir = args.onnx_dir prepare_environment(cache_dir, output_dir, args.use_gpu) model_class = MODEL_CLASSES[args.model_class][0] config = AutoConfig.from_pretrained(args.model_name, torchscript=args.torchscript, cache_dir=cache_dir) if hasattr(config, 'return_tuple'): config.return_tuple = True model = model_class.from_pretrained(args.model_name, config=config, cache_dir=cache_dir) # This scirpt does not support float16 for PyTorch. #if args.float16: # model.half() device = torch.device("cuda:0" if args.use_gpu else "cpu") model.to(device) onnx_model_paths = Gpt2Helper.get_onnx_paths(output_dir, args.model_name, args.model_class) onnx_model_path = onnx_model_paths["raw"] Gpt2Helper.export_onnx(model, device, onnx_model_path, args.verbose) if args.optimize_onnx or args.precision != Precision.FLOAT32: onnx_model_path = onnx_model_paths[str(args.precision)] Gpt2Helper.optimize_onnx(onnx_model_paths["raw"], onnx_model_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size) if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_path) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") if args.torchscript: model = Gpt2Helper.torchscript(model, config, device) session = create_onnxruntime_session(onnx_model_path, args.use_gpu, enable_all_optimization=False, num_threads=args.thread_num, verbose=args.verbose) if session is None: return # One word is generated for each inference. This length does not include that of past state. sequence_length = 1 # Allocate output buffers for IO Binding max_output_shapes = Gpt2Helper.get_output_shapes( max(args.batch_sizes), max(args.past_sequence_lengths), sequence_length, config, args.model_class) output_buffers = Gpt2Helper.get_output_buffers( max_output_shapes, device, args.precision == Precision.FLOAT16) csv_filename = args.result_csv or "benchmark_result_{}.csv".format( datetime.now().strftime("%Y%m%d-%H%M%S")) with open(csv_filename, mode="a", newline='') as csv_file: column_names = [ "model_name", "model_class", "gpu", "precision", "optimizer", "torchscript", "batch_size", "past_sequence_length", "torch_latency", "ort_latency", "ort_io_latency" ] csv_writer = csv.DictWriter(csv_file, fieldnames=column_names) csv_writer.writeheader() for batch_size in args.batch_sizes: for past_sequence_length in args.past_sequence_lengths: logger.debug( f"Running test for batch_size={batch_size} past_sequence_length={past_sequence_length}..." ) dummy_inputs = Gpt2Helper.get_dummy_inputs( batch_size, past_sequence_length, sequence_length, config.num_attention_heads, config.hidden_size, config.n_layer, config.vocab_size, device, args.precision == Precision.FLOAT16) output_shapes = Gpt2Helper.get_output_shapes( batch_size, past_sequence_length, sequence_length, config, args.model_class) try: outputs, torch_latency = Gpt2Helper.pytorch_inference( model, dummy_inputs, args.test_times) ort_outputs, ort_latency = Gpt2Helper.onnxruntime_inference( session, dummy_inputs, args.test_times) ort_io_outputs, ort_io_latency = Gpt2Helper.onnxruntime_inference_with_binded_io( session, dummy_inputs, output_buffers, output_shapes, args.test_times) if args.validate_onnx: if Gpt2Helper.compare_outputs( outputs, ort_outputs, rtol=DEFAULT_TOLERANCE[args.precision], atol=DEFAULT_TOLERANCE[args.precision]): logger.info( f'Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).' ) if Gpt2Helper.compare_outputs( outputs, ort_io_outputs, rtol=DEFAULT_TOLERANCE[args.precision], atol=DEFAULT_TOLERANCE[args.precision]): logger.info( f'Pytorch and ONNX Runtime IO Binding outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]}).' ) logger.info( f"batch_size={batch_size}, past_sequence_length={past_sequence_length}, torch_latency={torch_latency:.2f}, ort_latency={ort_latency:.2f}, ort_io_latency={ort_io_latency:.2f}" ) row = { "model_name": args.model_name, "model_class": args.model_class, "gpu": args.use_gpu, "precision": args.precision, "optimizer": args.optimize_onnx, "torchscript": args.torchscript, "batch_size": batch_size, "past_sequence_length": past_sequence_length, "torch_latency": f"{torch_latency:.2f}", "ort_latency": f"{ort_latency:.2f}", "ort_io_latency": f"{ort_io_latency:.2f}" } csv_writer.writerow(row) except: logger.error(f"Exception", exc_info=True) logger.info(f"Results are saved to file {csv_filename}")
def main(): args = parse_arguments() setup_logger(args.verbose) if args.tolerance == 0: args.tolerance = DEFAULT_TOLERANCE[args.precision] logger.info(f"Arguments:{args}") cache_dir = args.cache_dir output_dir = args.output if not args.output.endswith( ".onnx") else os.path.dirname(args.output) prepare_environment(cache_dir, output_dir, args.use_gpu) if args.precision != Precision.FLOAT32: assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx" if args.precision == Precision.FLOAT16: assert args.use_gpu, "fp16 requires --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" model_class = MODEL_CLASSES[args.model_class][0] config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=cache_dir) model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) device = torch.device("cuda:0" if args.use_gpu else "cpu") model.eval().to(device) use_external_data_format = (config.n_layer > 24 ) #TODO: find a way to check model size > 2GB onnx_model_paths = Gpt2Helper.get_onnx_paths( output_dir, args.model_name_or_path, args.model_class, new_folder=use_external_data_format) raw_onnx_model = args.output if args.output.endswith( '.onnx') else onnx_model_paths["raw"] output_path = raw_onnx_model if ( args.output.endswith('.onnx') or (args.precision == Precision.FLOAT32 and not args.optimize_onnx) ) else onnx_model_paths[str(args.precision)] logger.info(f"Exporting ONNX model to {raw_onnx_model}") use_padding = MODEL_CLASSES[args.model_class][2] Gpt2Helper.export_onnx(model, device, raw_onnx_model, args.verbose, use_external_data_format, has_position_ids=use_padding, has_attention_mask=use_padding) if args.optimize_onnx or args.precision != Precision.FLOAT32: logger.info(f"Optimizing model to {output_path}") Gpt2Helper.optimize_onnx(raw_onnx_model, output_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size) if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(output_path, output_path) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") session = create_onnxruntime_session(output_path, args.use_gpu, enable_all_optimization=True, verbose=args.verbose) if session is not None: Gpt2Helper.test_parity(session, model, device, args.precision == Precision.FLOAT16, rtol=args.tolerance, atol=args.tolerance, model_class=args.model_class, has_position_ids=use_padding, has_attention_mask=use_padding) if args.input_test_file: test_inputs = [] # Each line of test file is a JSON string like: # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]} with open(args.input_test_file) as read_f: for i, line in enumerate(read_f): line = line.rstrip() data = json.loads(line) input_ids = torch.from_numpy( numpy.asarray(data["input_ids"], dtype=numpy.int64)).to(device) if use_padding: if "attention_mask" in data: numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32 attention_mask = torch.from_numpy( numpy.asarray(data["attention_mask"], dtype=numpy_float)).to(device) else: padding = -1 attention_mask = ( input_ids != padding).type(torch.float16 if args.precision == Precision.FLOAT16 else torch.float32) input_ids.masked_fill_(input_ids == padding, 0) if "position_ids" in data: position_ids = torch.from_numpy( numpy.asarray(data["position_ids"], dtype=numpy.int64)).to(device) else: position_ids = (attention_mask.long().cumsum(-1) - 1) position_ids.masked_fill_(position_ids < 0, 0) inputs = { "input_ids": input_ids, "position_ids": position_ids, "attention_mask": attention_mask } else: inputs = {"input_ids": input_ids} test_inputs.append(inputs) Gpt2Tester.test_generation(session, model, device, test_inputs, precision=args.precision, model_class=args.model_class, top_k=20, top_k_no_order=True, max_steps=24, max_inputs=0, verbose=args.verbose) logger.info(f"Done. Output model: {output_path}")
def main(args): from transformers import __version__ as transformers_version if version.parse(transformers_version) < version.parse( "3.1.0"): # past_key_values name does not exist in 3.0.2 or older raise RuntimeError("This tool requires transformers 3.1.0 or later.") logger.info(f"Arguments:{args}") if args.precision == Precision.FLOAT16: assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu" if args.precision == Precision.INT8: assert not args.use_gpu, "quantization only supports CPU" torch.set_num_threads( psutil.cpu_count( logical=True) if args.thread_num <= 0 else args.thread_num) print(torch.__config__.parallel_info()) cache_dir = args.cache_dir output_dir = args.onnx_dir prepare_environment(cache_dir, output_dir, args.use_gpu) model_class = MODEL_CLASSES[args.model_class][0] if args.model_class == "GPT2LMHeadModel_BeamSearchStep": model_type = "beam_search_step" elif args.model_class == "GPT2LMHeadModel_ConfigurableOneStepSearch": model_type = "configurable_one_step_search" else: model_type = "default" gpt2helper = Gpt2HelperFactory.create_helper(model_type) config = AutoConfig.from_pretrained(args.model_name_or_path, torchscript=args.torchscript, cache_dir=cache_dir) if model_type == "beam_search_step": model = model_class.from_pretrained( args.model_name_or_path, config=config, batch_size=1, beam_size=args.beam_size, cache_dir=cache_dir, ) elif model_type == "configurable_one_step_search": model = model_class.from_pretrained( args.model_name_or_path, config=config, batch_size=1, beam_size=args.beam_size, ignore_eos=args.ignore_eos, temperature=args.temperature, repetition_penalty=args.repetition_penalty, excluded_token_ids=args.excluded_token_ids, length_penalty=args.length_penalty, do_sample=args.do_sample, do_sample_top_p=args.do_sample_top_p, do_sample_top_k=args.do_sample_top_k, cache_dir=cache_dir, ) else: model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) # This scirpt does not support float16 for PyTorch. # if args.float16: # model.half() device = torch.device("cuda:0" if args.use_gpu else "cpu") model.to(device) use_external_data_format = config.n_layer > 24 # TODO: find a way to check model size > 2GB onnx_model_paths = gpt2helper.get_onnx_paths( output_dir, args.model_name_or_path, args.model_class, has_past=True, new_folder=use_external_data_format, ) onnx_model_path = onnx_model_paths["raw"] use_padding = MODEL_CLASSES[args.model_class][2] gpt2helper.export_onnx( model, device, onnx_model_path, args.verbose, use_external_data_format, has_position_ids=use_padding, has_attention_mask=use_padding, ) if args.optimize_onnx or args.precision != Precision.FLOAT32: onnx_model_path = onnx_model_paths[str( args.precision) if args.precision != Precision.INT8 else "fp32"] gpt2helper.optimize_onnx( onnx_model_paths["raw"], onnx_model_path, args.precision == Precision.FLOAT16, model.config.num_attention_heads, model.config.hidden_size, use_external_data_format, auto_mixed_precision=True, ) if args.precision == Precision.INT8: logger.info("quantizing model...") QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_paths["int8"], use_external_data_format) model = QuantizeHelper.quantize_torch_model(model) logger.info("finished quantizing model") onnx_model_path = onnx_model_paths["int8"] if args.torchscript: model = gpt2helper.torchscript( model, config, device, has_position_ids=use_padding, has_attention_mask=use_padding, ) session = create_onnxruntime_session( onnx_model_path, args.use_gpu, enable_all_optimization=False, num_threads=args.thread_num, verbose=args.verbose, ) if session is None: return # Allocate output buffers for IO Binding if model_type == "beam_search_step" or model_type == "configurable_one_step_search": max_output_shapes = gpt2helper.get_output_shapes( max(args.batch_sizes), context_len=max(args.past_sequence_lengths), past_sequence_length=max(args.past_sequence_lengths), sequence_length=max(args.sequence_lengths), beam_size=args.beam_size, step=0, config=config, model_class=args.model_class, ) output_buffers = gpt2helper.get_output_buffers( max_output_shapes, device, args.precision == Precision.FLOAT16) else: max_output_shapes = gpt2helper.get_output_shapes( max(args.batch_sizes), max(args.past_sequence_lengths), max(args.sequence_lengths), config, args.model_class, ) output_buffers = gpt2helper.get_output_buffers( max_output_shapes, device, args.precision == Precision.FLOAT16) csv_filename = args.result_csv or "benchmark_result_{}.csv".format( datetime.now().strftime("%Y%m%d-%H%M%S")) with open(csv_filename, mode="a", newline="") as csv_file: column_names = [ "model_name", "model_class", "gpu", "precision", "optimizer", "torchscript", "batch_size", "sequence_length", "past_sequence_length", "torch_latency", "onnxruntime_latency", "onnxruntime_io_binding_latency", ] csv_writer = csv.DictWriter(csv_file, fieldnames=column_names) csv_writer.writeheader() for batch_size in args.batch_sizes: for sequence_length in args.sequence_lengths: for past_sequence_length in args.past_sequence_lengths: assert batch_size > 0 and sequence_length > 0 and past_sequence_length >= 0 logger.debug( f"Running test for batch_size={batch_size} sequence_length={sequence_length} past_sequence_length={past_sequence_length}..." ) if model_type == "beam_search_step" or model_type == "configurable_one_step_search": dummy_inputs = gpt2helper.get_dummy_inputs( batch_size, past_sequence_length, sequence_length, config.num_attention_heads, config.hidden_size, config.n_layer, config.vocab_size, device, float16=(args.precision == Precision.FLOAT16), has_position_ids=use_padding, has_attention_mask=use_padding, ) output_shapes = gpt2helper.get_output_shapes( batch_size, past_sequence_length, past_sequence_length, sequence_length, args.beam_size, 0, config, args.model_class, ) else: dummy_inputs = gpt2helper.get_dummy_inputs( batch_size, past_sequence_length, sequence_length, config.num_attention_heads, config.hidden_size, config.n_layer, config.vocab_size, device, float16=(args.precision == Precision.FLOAT16), has_position_ids=use_padding, has_attention_mask=use_padding, ) output_shapes = gpt2helper.get_output_shapes( batch_size, past_sequence_length, sequence_length, config, args.model_class, ) try: outputs, torch_latency = gpt2helper.pytorch_inference( model, dummy_inputs, args.test_times) # Dump Torch output shape for i, value in enumerate(outputs): if isinstance(value, tuple): logger.debug( f"torch output {i} is tuple of size {len(value)}, shape {value[0].shape}" ) else: logger.debug( f"torch output {i} shape {value.shape}") ort_outputs, ort_latency = gpt2helper.onnxruntime_inference( session, dummy_inputs, args.test_times) ( ort_io_outputs, ort_io_latency, ) = gpt2helper.onnxruntime_inference_with_binded_io( session, dummy_inputs, output_buffers, output_shapes, args.test_times, return_numpy=False, include_copy_output_latency=args. include_copy_output_latency, ) if args.validate_onnx: if gpt2helper.compare_outputs( outputs, ort_outputs, model_class=args.model_class, rtol=DEFAULT_TOLERANCE[args.precision], atol=DEFAULT_TOLERANCE[args.precision], ): logger.info( f"Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]})." ) # Results of IO binding might be in GPU. Copy outputs to CPU for comparison. copy_outputs = [] for output in ort_io_outputs: copy_outputs.append(output.cpu().numpy()) if gpt2helper.compare_outputs( outputs, copy_outputs, model_class=args.model_class, rtol=DEFAULT_TOLERANCE[args.precision], atol=DEFAULT_TOLERANCE[args.precision], ): logger.info( f"Pytorch and ONNX Runtime IO Binding outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]})." ) logger.info( f"batch_size={batch_size}, sequence_length={sequence_length}, past_sequence_length={past_sequence_length}, torch_latency={torch_latency:.2f}, onnxruntime_latency={ort_latency:.2f}, onnxruntime_io_binding_latency={ort_io_latency:.2f}" ) row = { "model_name": args.model_name_or_path, "model_class": args.model_class, "gpu": args.use_gpu, "precision": args.precision, "optimizer": args.optimize_onnx, "torchscript": args.torchscript, "batch_size": batch_size, "sequence_length": sequence_length, "past_sequence_length": past_sequence_length, "torch_latency": f"{torch_latency:.2f}", "onnxruntime_latency": f"{ort_latency:.2f}", "onnxruntime_io_binding_latency": f"{ort_io_latency:.2f}", } csv_writer.writerow(row) except: logger.error(f"Exception", exc_info=True) return None logger.info(f"Results are saved to file {csv_filename}") return csv_filename