def main(): args = get_args() if args.backend == "pytorch": assert not args.quantized, "Quantized model is only supported by onnxruntime backend!" assert not args.profile, "Profiling is only supported by onnxruntime backend!" from pytorch_SUT import get_pytorch_sut sut = get_pytorch_sut() elif args.backend == "tf": assert not args.quantized, "Quantized model is only supported by onnxruntime backend!" assert not args.profile, "Profiling is only supported by onnxruntime backend!" from tf_SUT import get_tf_sut sut = get_tf_sut() elif args.backend == "tf_estimator": assert not args.quantized, "Quantized model is only supported by onnxruntime backend!" assert not args.profile, "Profiling is only supported by onnxruntime backend!" from tf_estimator_SUT import get_tf_estimator_sut sut = get_tf_estimator_sut() elif args.backend == "onnxruntime": from onnxruntime_SUT import get_onnxruntime_sut sut = get_onnxruntime_sut(args) else: raise ValueError("Unknown backend: {:}".format(args.backend)) settings = lg.TestSettings() settings.scenario = scenario_map[args.scenario] settings.FromConfig(args.mlperf_conf, "bert", args.scenario) settings.FromConfig(args.user_conf, "bert", args.scenario) if args.accuracy: settings.mode = lg.TestMode.AccuracyOnly else: settings.mode = lg.TestMode.PerformanceOnly log_path = "build/logs" if not os.path.exists(log_path): os.makedirs(log_path) log_output_settings = lg.LogOutputSettings() log_output_settings.outdir = log_path log_output_settings.copy_summary_to_stdout = True log_settings = lg.LogSettings() log_settings.log_output = log_output_settings print("Running LoadGen test...") lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings) if args.accuracy: cmd = "python3 accuracy-squad.py" subprocess.check_call(cmd, shell=True) print("Done!") print("Destroying SUT...") lg.DestroySUT(sut.sut) print("Destroying QSL...") lg.DestroyQSL(sut.qsl.qsl)
def main(): args = get_args() if args.backend == "pytorch": from pytorch_SUT import get_pytorch_sut sut = get_pytorch_sut(args.model_dir, args.preprocessed_data_dir, args.performance_count) elif args.backend == "onnxruntime": from onnxruntime_SUT import get_onnxruntime_sut sut = get_onnxruntime_sut(args.onnx_model, args.preprocessed_data_dir, args.performance_count) else: raise ValueError("Unknown backend: {:}".format(args.backend)) settings = lg.TestSettings() settings.scenario = scenario_map[args.scenario] settings.FromConfig(args.mlperf_conf, "3d-unet", args.scenario) settings.FromConfig(args.user_conf, "3d-unet", args.scenario) if args.accuracy: settings.mode = lg.TestMode.AccuracyOnly else: settings.mode = lg.TestMode.PerformanceOnly log_path = "build/logs" if not os.path.exists(log_path): os.makedirs(log_path) log_output_settings = lg.LogOutputSettings() log_output_settings.outdir = log_path log_output_settings.copy_summary_to_stdout = True log_settings = lg.LogSettings() log_settings.log_output = log_output_settings print("Running Loadgen test...") lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings) if args.accuracy: print("Running accuracy script...") cmd = "python3 brats_eval.py" subprocess.check_call(cmd, shell=True) print("Done!") print("Destroying SUT...") lg.DestroySUT(sut.sut) print("Destroying QSL...") lg.DestroyQSL(sut.qsl.qsl)
def eval_func(model): args = get_args() if args.backend == "pytorch": from pytorch_SUT import get_pytorch_sut sut = get_pytorch_sut(model, args.preprocessed_data_dir, args.performance_count) elif args.backend == "onnxruntime": from onnxruntime_SUT import get_onnxruntime_sut sut = get_onnxruntime_sut(args.model, args.preprocessed_data_dir, args.performance_count) elif args.backend == "tf": from tf_SUT import get_tf_sut sut = get_tf_sut(args.model, args.preprocessed_data_dir, args.performance_count) elif args.backend == "ov": from ov_SUT import get_ov_sut sut = get_ov_sut(args.model, args.preprocessed_data_dir, args.performance_count) else: raise ValueError("Unknown backend: {:}".format(args.backend)) settings = lg.TestSettings() settings.scenario = scenario_map[args.scenario] settings.FromConfig(args.mlperf_conf, "3d-unet", args.scenario) settings.FromConfig(args.user_conf, "3d-unet", args.scenario) if args.accuracy: settings.mode = lg.TestMode.AccuracyOnly else: settings.mode = lg.TestMode.PerformanceOnly log_path = "build/logs" if not os.path.exists(log_path): os.makedirs(log_path) log_output_settings = lg.LogOutputSettings() log_output_settings.outdir = log_path log_output_settings.copy_summary_to_stdout = True log_settings = lg.LogSettings() log_settings.log_output = log_output_settings print("Running Loadgen test...") if args.benchmark: start = time.time() lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings) if args.benchmark: end = time.time() if args.accuracy: print("Running accuracy script...") process = subprocess.Popen(['python3', 'accuracy-brats.py'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() print(out) print("Done!", float(err)) if args.benchmark: print('Batch size = 1') print('Latency: %.3f ms' % ((end - start) * 1000 / sut.qsl.count)) print('Throughput: %.3f images/sec' % (sut.qsl.count / (end - start))) print('Accuracy: {mean:.5f}'.format(mean=float(err))) print("Destroying SUT...") lg.DestroySUT(sut.sut) print("Destroying QSL...") lg.DestroyQSL(sut.qsl.qsl) return float(err)