def test_quantization_saved(self): from lpot.utils.pytorch import load model = copy.deepcopy(self.model) for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']: if fake_yaml == 'ptq_yaml.yaml': model.eval().fuse_model() quantizer = Quantization(fake_yaml) dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) if fake_yaml == 'qat_yaml.yaml': quantizer.q_func = q_func q_model = quantizer() q_model.save('./saved') # Load configure and weights by lpot.utils saved_model = load("./saved", model) eval_func(saved_model) from lpot import Benchmark evaluator = Benchmark('ptq_yaml.yaml') # Load configure and weights by lpot.model evaluator.model = common.Model(model) evaluator.b_dataloader = common.DataLoader(dataset) results = evaluator() evaluator.model = common.Model(model) fp32_results = evaluator() self.assertTrue( (fp32_results['accuracy'][0] - results['accuracy'][0]) < 0.01)
def test_tuning_ipex(self): from lpot import Quantization model = torchvision.models.resnet18() model = MODELS['pytorch_ipex'](model) quantizer = Quantization('ipex_yaml.yaml') dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) lpot_model = quantizer() lpot_model.save("./saved") new_model = MODELS['pytorch_ipex'](model.model, { "workspace_path": "./saved" }) new_model.model.to(ipex.DEVICE) try: script_model = torch.jit.script(new_model.model) except: script_model = torch.jit.trace( new_model.model, torch.randn(10, 3, 224, 224).to(ipex.DEVICE)) from lpot import Benchmark evaluator = Benchmark('ipex_yaml.yaml') evaluator.model = common.Model(script_model) evaluator.b_dataloader = common.DataLoader(dataset) results = evaluator()
def run(self): """ This is lpot function include tuning and benchmark option """ if self.args.tune: from lpot import Quantization quantizer = Quantization(self.args.config) q_model = quantizer(self.args.input_graph) def save(model, path): from tensorflow.python.platform import gfile f = gfile.GFile(path, 'wb') f.write(model.as_graph_def().SerializeToString()) try: save(q_model, evaluate_opt_graph.args.output_graph) except AttributeError as no_model: print("None of the quantized models fits the \ accuracy criteria: {0}".format(no_model)) except Exception as exc: print("Unexpected error while saving the model: {0}".format(exc)) if self.args.benchmark: from lpot import Benchmark evaluator = Benchmark(self.args.config) results = evaluator(model=self.args.input_graph) for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1./ latency))
def main(): arg_parser = ArgumentParser(description='Parse args') arg_parser.add_argument('--benchmark', action='store_true', help='run benchmark') arg_parser.add_argument('--tune', action='store_true', help='run tuning') args = arg_parser.parse_args() if args.tune: from lpot import Quantization quantizer = Quantization('./conf.yaml') quantized_model = quantizer("./mobilenet_v1_1.0_224_frozen.pb") tf.io.write_graph(graph_or_graph_def=quantized_model, logdir='./', name='int8.pb', as_text=False) if args.benchmark: from lpot import Benchmark evaluator = Benchmark('./conf.yaml') results = evaluator('./int8.pb') batch_size = 1 for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('Accuracy is {:.3f}'.format(acc)) print('Latency: {:.3f} ms'.format(latency * 1000))
def main(_): arg_parser = ArgumentParser(description='Parse args') arg_parser.add_argument("--input-graph", help='Specify the slim model', dest='input_graph') arg_parser.add_argument("--output-graph", help='Specify tune result model save dir', dest='output_graph') arg_parser.add_argument("--config", default=None, help="tuning config") arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') args = arg_parser.parse_args() factory = TFSlimNetsFactory() # user specific model can register to slim net factory input_shape = [None, 299, 299, 3] factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope) if args.tune: from lpot import Quantization quantizer = Quantization(args.config) quantizer.model = args.input_graph q_model = quantizer() q_model.save(args.output_graph) if args.benchmark: from lpot import Benchmark evaluator = Benchmark(args.config) evaluator.model = args.input_graph results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1./ latency))
def benchmark_model( input_graph: str, config: str, benchmark_mode: str, framework: str, datatype: str = "", ) -> List[Dict[str, Any]]: """Execute benchmark.""" from lpot import Benchmark, common benchmark_results = [] if framework == "onnxrt": import onnx input_graph = onnx.load(input_graph) evaluator = Benchmark(config) evaluator.model = common.Model(input_graph) results = evaluator() for mode, result in results.items(): if benchmark_mode == mode: log.info(f"Mode: {mode}") acc, batch_size, result_list = result latency = (sum(result_list) / len(result_list)) / batch_size log.info(f"Batch size: {batch_size}") if mode == "accuracy": log.info(f"Accuracy: {acc:.3f}") elif mode == "performance": log.info(f"Latency: {latency * 1000:.3f} ms") log.info(f"Throughput: {1. / latency:.3f} images/sec") benchmark_results.append( { "precision": datatype, "mode": mode, "batch_size": batch_size, "accuracy": acc, "latency": latency * 1000, "throughput": 1.0 / latency, }, ) return benchmark_results
def main(): import lpot from lpot import common quantizer = lpot.Quantization('./conf.yaml') quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb") quantized_model = quantizer() # Optional, run benchmark from lpot import Benchmark evaluator = Benchmark('./conf.yaml') evaluator.model = common.Model(quantized_model) results = evaluator() batch_size = 1 for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('Accuracy is {:.3f}'.format(acc)) print('Latency: {:.3f} ms'.format(latency * 1000))
def run(self): if self.args.tune: from lpot import Quantization quantizer = Quantization(self.args.config) quantizer.model = self.args.input_graph q_model = quantizer() q_model.save(self.args.output_model) if self.args.benchmark: from lpot import Benchmark evaluator = Benchmark(self.args.config) evaluator.model = self.args.input_graph results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1. / latency))
def main(_): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) if FLAGS.mode == 'benchmark': from lpot import Benchmark evaluator = Benchmark(FLAGS.config) evaluator.model = FLAGS.input_model results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1./ latency)) elif FLAGS.mode == 'tune': from lpot.quantization import Quantization quantizer = Quantization(FLAGS.config) quantizer.model = FLAGS.input_model q_model = quantizer() q_model.save(FLAGS.output_model)
def test_tuning_ipex(self): from lpot import Quantization model = torchvision.models.resnet18() quantizer = Quantization('ipex_yaml.yaml') dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) dataloader = quantizer.dataloader(dataset) quantizer( model, eval_dataloader=dataloader, q_dataloader=dataloader, ) model.to(ipex.DEVICE) try: script_model = torch.jit.script(model) except: script_model = torch.jit.trace( model, torch.randn(10, 3, 224, 224).to(ipex.DEVICE)) from lpot import Benchmark evaluator = Benchmark('ipex_yaml.yaml') results = evaluator(model=script_model, b_dataloader=dataloader)
def test_quantization_saved(self): from lpot import Quantization from lpot.utils.pytorch import load model = copy.deepcopy(self.model) for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']: if fake_yaml == 'ptq_yaml.yaml': model.eval() model.fuse_model() quantizer = Quantization(fake_yaml) dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) dataloader = quantizer.dataloader(dataset) q_model = quantizer( model, q_func=q_func if fake_yaml == 'qat_yaml.yaml' else None, q_dataloader=dataloader, eval_dataloader=dataloader) new_model = load('./saved/checkpoint', model) eval_func(new_model) from lpot import Benchmark evaluator = Benchmark('ptq_yaml.yaml') results = evaluator(model=new_model, b_dataloader=dataloader)
def run(self): if self.args.tune: from lpot import Quantization quantizer = Quantization(self.args.config) q_model = quantizer(self.args.input_graph) try: write_graph(q_model.as_graph_def(), self.args.output_model) except Exception as e: print("Failed to save model due to {}".format(str(e))) if self.args.benchmark: from lpot import Benchmark evaluator = Benchmark(self.args.config) results = evaluator(model=self.args.input_graph) for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1. / latency))
) parser.add_argument( '--tune', action='store_true', \ default=False, help="whether quantize the model" ) parser.add_argument('--config', type=str, help="config yaml path") parser.add_argument('--output_model', type=str, help="output model path") args = parser.parse_args() model = onnx.load(args.model_path) if args.benchmark: from lpot import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(batch_size * 1. / latency)) if args.tune: from lpot import Quantization, common
def test_register_metric_postprocess(self): import PIL.Image image = np.array(PIL.Image.open(self.image_path)) resize_image = np.resize(image, (224, 224, 3)) mean = [123.68, 116.78, 103.94] resize_image = resize_image - mean images = np.expand_dims(resize_image, axis=0) labels = [768] from lpot import Benchmark, Quantization from lpot.experimental.data.transforms.imagenet_transform import LabelShift from lpot.experimental.metric.metric import TensorflowTopK evaluator = Benchmark('fake_yaml.yaml') evaluator.postprocess('label_benchmark', LabelShift, label_shift=1) evaluator.metric('topk_benchmark', TensorflowTopK) # as we supported multi instance, the result will print out instead of return dataloader = evaluator.dataloader(dataset=list(zip(images, labels))) evaluator(self.pb_path, b_dataloader=dataloader) quantizer = Quantization('fake_yaml.yaml') quantizer.postprocess('label_quantize', LabelShift, label_shift=1) quantizer.metric('topk_quantize', TensorflowTopK) evaluator = Benchmark('fake_yaml.yaml') evaluator.metric('topk_second', TensorflowTopK) dataloader = evaluator.dataloader(dataset=list(zip(images, labels))) result = evaluator(self.pb_path, b_dataloader=dataloader)
def test_register_metric_postprocess(self): import PIL.Image image = np.array(PIL.Image.open(self.image_path)) resize_image = np.resize(image, (224, 224, 3)) mean = [123.68, 116.78, 103.94] resize_image = resize_image - mean images = np.expand_dims(resize_image, axis=0) labels = [768] from lpot import Benchmark, Quantization from lpot.experimental.data.transforms.imagenet_transform import LabelShift from lpot.experimental.metric.metric import TensorflowTopK evaluator = Benchmark('fake_yaml.yaml') evaluator.postprocess('label_benchmark', LabelShift, label_shift=1) evaluator.metric('topk_benchmark', TensorflowTopK) dataloader = evaluator.dataloader(dataset=list(zip(images, labels))) result = evaluator(self.pb_path, b_dataloader=dataloader) acc, batch_size, result_list = result['accuracy'] self.assertEqual(acc, 0.0) quantizer = Quantization('fake_yaml.yaml') quantizer.postprocess('label_quantize', LabelShift, label_shift=1) quantizer.metric('topk_quantize', TensorflowTopK) evaluator = Benchmark('fake_yaml.yaml') evaluator.metric('topk_second', TensorflowTopK) dataloader = evaluator.dataloader(dataset=list(zip(images, labels))) result = evaluator(self.pb_path, b_dataloader=dataloader) acc, batch_size, result_list = result['accuracy'] self.assertEqual(acc, 0.0)
def main(_): arg_parser = ArgumentParser(description='Parse args') arg_parser.add_argument("--input-graph", help='Specify the slim model', dest='input_graph') arg_parser.add_argument("--output-graph", help='Specify tune result model save dir', dest='output_graph') arg_parser.add_argument("--config", default=None, help="tuning config") arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') args = arg_parser.parse_args() factory = TFSlimNetsFactory() # user specific model can register to slim net factory input_shape = [None, 299, 299, 3] factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope) if args.input_graph.endswith('.ckpt'): # directly get the topology name from input_graph topology = args.input_graph.rsplit('/', 1)[-1].split('.', 1)[0] # get the model func from net factory assert topology in factory.default_slim_models, \ 'only support topology {}'.format(factory.default_slim_models) net = copy.deepcopy(factory.networks_map[topology]) model_func = net.pop('model') arg_scope = net.pop('arg_scope')() inputs_shape = net.pop('input_shape') kwargs = net images = tf.compat.v1.placeholder(name='input', dtype=tf.float32, \ shape=inputs_shape) from lpot.adaptor.tf_utils.util import get_slim_graph model = get_slim_graph(args.input_graph, model_func, arg_scope, images, **kwargs) else: model = args.input_graph if args.tune: from lpot import Quantization quantizer = Quantization(args.config) q_model = quantizer(model) save(q_model, args.output_graph) if args.benchmark: from lpot import Benchmark evaluator = Benchmark(args.config) results = evaluator(model=model) for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1. / latency))
'--config', type=str, help="config yaml path" ) parser.add_argument( '--output_model', type=str, help="output model path" ) args = parser.parse_args() model = onnx.load(args.model_path) if args.benchmark: from lpot import Benchmark evaluator = Benchmark(args.config) results = evaluator(model=model) for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(batch_size * 1./ latency)) if args.tune: from lpot.quantization import Quantization quantize = Quantization(args.config) q_model = quantize(model)
input_shapes = input_shape.split(',') input_shapes = [input_shapes] if type(input_shapes)!=list else input_shapes input_shapes = [shape.split('x') for shape in input_shapes] shapes = [tuple([args.benchmark_nums] + [int(dim) for dim in shape]) for shape in input_shapes] from lpot.data.datasets.dummy_dataset import DummyDataset from lpot.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader dummy_dataset = DummyDataset(shapes, low=lows, high=highs, dtype=dtypes, label=True) dummy_dataloader = ONNXRTDataLoader(dummy_dataset, batch_size=args.eval_batch_size) def eval_func(model): return evaluate_onnxrt(model, dummy_dataloader, reference) if args.benchmark: from lpot import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator.b_dataloader = dummy_dataloader results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n quantized model {} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(batch_size * 1./ latency)) if args.tune: