def test_tuning_ipex(self): from lpot import Quantization model = torchvision.models.resnet18() model = MODELS['pytorch_ipex'](model) quantizer = Quantization('ipex_yaml.yaml') dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) lpot_model = quantizer() lpot_model.save("./saved") new_model = MODELS['pytorch_ipex'](model.model, { "workspace_path": "./saved" }) new_model.model.to(ipex.DEVICE) try: script_model = torch.jit.script(new_model.model) except: script_model = torch.jit.trace( new_model.model, torch.randn(10, 3, 224, 224).to(ipex.DEVICE)) from lpot import Benchmark evaluator = Benchmark('ipex_yaml.yaml') evaluator.model = common.Model(script_model) evaluator.b_dataloader = common.DataLoader(dataset) results = evaluator()
def test_dump_tensor_to_disk(self): import tensorflow.compat.v1 as tf tf.disable_v2_behavior() from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(100, 30, 30, 1), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph quantizer() with open(self.calibration_log_path) as f: data = f.readlines() found_min_str = False found_max_str = False for i in data: if i.find('__print__;__max') != -1: found_max_str = True if i.find('__print__;__min') != -1: found_min_str = True self.assertEqual(os.path.exists(self.calibration_log_path), True) self.assertGreater(len(data), 1) self.assertEqual(found_min_str, True) self.assertEqual(found_max_str, True)
def test_first_matmul_biasadd_relu_fusion(self): x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) y_data = np.array([[1, 2], [3, 4]], dtype=np.float) x = tf.placeholder(tf.float32, shape=[2, 2], name='x') y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) z = tf.matmul(x, y) z = tf.nn.bias_add(z, [1, 2]) z = tf.nn.relu(z, name='op_to_store') with tf.Session() as sess: sess.run(z, feed_dict={x: x_data, y: y_data}) float_graph_def = sess.graph.as_graph_def() from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(2, 2), label=True) quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.model = float_graph_def output_graph = quantizer() found_quantized_matmul = False for i in output_graph.graph_def.node: if i.op == 'QuantizeV2' and i.name == 'MatMul_eightbit_quantize_x' and i.attr["T"].type == dtypes.quint8: found_quantized_matmul = True break self.assertEqual(found_quantized_matmul, True)
def test_quantization_saved(self): from lpot.utils.pytorch import load model = copy.deepcopy(self.model) for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']: if fake_yaml == 'ptq_yaml.yaml': model.eval().fuse_model() quantizer = Quantization(fake_yaml) dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) if fake_yaml == 'qat_yaml.yaml': quantizer.q_func = q_func q_model = quantizer() q_model.save('./saved') # Load configure and weights by lpot.utils saved_model = load("./saved", model) eval_func(saved_model) from lpot import Benchmark evaluator = Benchmark('ptq_yaml.yaml') # Load configure and weights by lpot.model evaluator.model = common.Model(model) evaluator.b_dataloader = common.DataLoader(dataset) results = evaluator() evaluator.model = common.Model(model) fp32_results = evaluator() self.assertTrue( (fp32_results['accuracy'][0] - results['accuracy'][0]) < 0.01)
def test_disable_matmul_fusion(self): g = tf.Graph() with g.as_default(): x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) y_data = np.array([[1, 2], [3, 4]], dtype=np.float) x = tf.placeholder(tf.float32, shape=[2, 2], name='x') y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) z = tf.matmul(x, y, name='no_quant_matmul') z = tf.nn.relu6(z, name='op_to_store') found_quantized_matmul = False with tf.Session() as sess: sess.run(z, feed_dict={x: x_data, y: y_data}) float_graph_def = sess.graph.as_graph_def() from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(2, 2), label=True) quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.model = float_graph_def output_graph = quantizer() for i in output_graph.graph_def.node: if i.op == 'QuantizedMatMulWithBiasAndDequantize' and i.name == 'op_to_store': found_quantized_matmul = True break self.assertEqual(found_quantized_matmul, False)
def test_loss_calculation(self): from lpot.strategy.tpe import TpeTuneStrategy from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph testObject = TpeTuneStrategy(quantizer.model, quantizer.conf, quantizer.calib_dataloader) testObject._calculate_loss_function_scaling_components( 0.01, 2, testObject.loss_function_config) # check if latency difference between min and max corresponds to 10 points of loss function tmp_val = testObject.calculate_loss(0.01, 2, testObject.loss_function_config) tmp_val2 = testObject.calculate_loss(0.01, 1, testObject.loss_function_config) self.assertTrue(True if int(tmp_val2 - tmp_val) == 10 else False) # check if 1% of acc difference corresponds to 10 points of loss function tmp_val = testObject.calculate_loss(0.02, 2, testObject.loss_function_config) tmp_val2 = testObject.calculate_loss(0.03, 2, testObject.loss_function_config) self.assertTrue(True if int(tmp_val2 - tmp_val) == 10 else False)
def test_autodump(self): from lpot import Quantization, common quantizer = Quantization('fake_yaml3.yaml') dataset = quantizer.dataset('dummy', shape=(100, 3, 3, 1), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph output_graph = quantizer()
def test_ru_mse_max_trials(self): from lpot import Quantization, common quantizer = Quantization('fake_yaml2.yaml') dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph quantizer()
def main(): class CalibrationDL(): def __init__(self): path = os.path.abspath( os.path.expanduser('./brats_cal_images_list.txt')) with open(path, 'r') as f: self.preprocess_files = [line.rstrip() for line in f] self.loaded_files = {} self.batch_size = 1 def __getitem__(self, sample_id): file_name = self.preprocess_files[sample_id] print("Loading file {:}".format(file_name)) with open( os.path.join('build/calib_preprocess/', "{:}.pkl".format(file_name)), "rb") as f: self.loaded_files[sample_id] = pickle.load(f)[0] return torch.from_numpy( self.loaded_files[sample_id][np.newaxis, ...]).float(), None def __len__(self): self.count = len(self.preprocess_files) return self.count args = get_args() assert args.backend == "pytorch" model_path = os.path.join(args.model_dir, "plans.pkl") assert os.path.isfile( model_path), "Cannot find the model file {:}!".format(model_path) trainer, params = load_model_and_checkpoint_files( args.model_dir, folds=1, fp16=False, checkpoint_name='model_final_checkpoint') trainer.load_checkpoint_ram(params[0], False) model = trainer.network if args.tune: quantizer = Quantization('conf.yaml') quantizer.model = common.Model(model) quantizer.eval_func = eval_func calib_dl = CalibrationDL() quantizer.calib_dataloader = calib_dl q_model = quantizer() q_model.save('./lpot_workspace') exit(0) if args.benchmark: model.eval() if args.int8: from lpot.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser('./lpot_workspace')), model) else: new_model = model eval_func(new_model)
def test_tensorflow_graph_meta_pass(self): x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") top_relu = tf.nn.relu(x) conv_weights = tf.compat.v1.get_variable( "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") normed = tf.compat.v1.layers.batch_normalization(conv) relu = tf.nn.relu(normed) sq = tf.squeeze(relu, [0]) reshape = tf.reshape(sq, [1, 27, 27, 16]) conv_weights2 = tf.compat.v1.get_variable( "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv2 = tf.nn.conv2d(reshape, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") normed2 = tf.compat.v1.layers.batch_normalization(conv2) relu6 = tf.nn.relu6(normed2, name='op_to_store') out_name = relu6.name.split(':')[0] with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]) from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = output_graph_def output_graph = quantizer() quantize_count = 0 dequantize_count = 0 for i in output_graph.graph_def.node: if i.op == 'QuantizeV2': quantize_count += 1 if i.op == 'Dequantize': dequantize_count += 1 self.assertEqual(quantize_count, 1) self.assertEqual(dequantize_count, 1)
def test_conv_fusion_with_last_matmul(self): x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") top_relu = tf.nn.relu(x) # paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) # x_pad = tf.pad(top_relu, paddings, "CONSTANT") conv_weights = tf.compat.v1.get_variable( "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") normed = tf.compat.v1.layers.batch_normalization(conv) relu = tf.nn.relu(normed) pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") reshape = tf.reshape(pooling, [-1, 3136]) y_data = np.random.random([3136, 1]) y = tf.constant(y_data, dtype=tf.float32, shape=[3136, 1]) z = tf.matmul(reshape, y) y_data_1 = np.random.random([1, 1]) y_1 = tf.constant(y_data_1, dtype=tf.float32, shape=[1, 1]) z_2nd_matmul = tf.matmul(z, y_1) relu6 = tf.nn.relu6(z_2nd_matmul, name='op_to_store') out_name = relu6.name.split(':')[0] with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]) from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = output_graph_def output_graph = quantizer() quantize_v2_count = 0 for i in output_graph.graph_def.node: if i.op == 'QuantizeV2': quantize_v2_count += 1 break self.assertEqual(quantize_v2_count, 1)
def test_autosave(self): from lpot import Quantization, common from lpot.utils.utility import get_size quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', (100, 256, 256, 1), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph quantizer() q_model = quantizer() quantizer.model = self.constant_graph_1 q_model_1 = quantizer() self.assertTrue((get_size(q_model_1.sess.graph) - get_size(q_model.sess.graph)) > 0)
def test_conv_biasadd_addv2_relu_fusion(self): x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") top_relu = tf.nn.relu(x) paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) x_pad = tf.pad(top_relu, paddings, "CONSTANT") conv_weights = tf.compat.v1.get_variable( "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") normed = tf.compat.v1.layers.batch_normalization(conv) # relu = tf.nn.relu(normed) conv_weights2 = tf.compat.v1.get_variable( "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") normed2 = tf.compat.v1.layers.batch_normalization(conv2) # relu2 = tf.nn.relu(normed2) add = tf.raw_ops.AddV2(x=normed, y=normed2, name='addv2') relu = tf.nn.relu(add) relu6 = tf.nn.relu6(relu, name='op_to_store') out_name = relu6.name.split(':')[0] with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]) from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = output_graph_def output_graph = quantizer() found_conv_fusion = False for i in output_graph.graph_def.node: if i.op == 'QuantizedConv2DWithBiasSignedSumAndReluAndRequantize': found_conv_fusion = True break self.assertEqual(found_conv_fusion, True)
def test_run_bayesian_max_trials(self): from lpot import Quantization, common quantizer = Quantization('fake_yaml2.yaml') dataset = quantizer.dataset('dummy', shape=(1, 224, 224, 3), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = self.test_graph output_graph = quantizer()
def test_run_basic_one_trial(self): from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', (1, 224, 224, 3), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = self.constant_graph quantizer() self.assertTrue(True if len(os.listdir("./runs/eval")) > 2 else False)
def test_disable_scale_propagation(self): x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") conv_weights = tf.compat.v1.get_variable( "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer()) conv_bias = tf.compat.v1.get_variable( "bias", [1], initializer=tf.compat.v1.random_normal_initializer()) x = tf.nn.relu(x) conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last') normed = tf.compat.v1.layers.batch_normalization(conv) relu = tf.nn.relu(normed) pool = tf.nn.avg_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last') conv_bias = tf.nn.bias_add(conv1, conv_bias) x = tf.nn.relu(conv_bias) final_node = tf.nn.relu(x, name='op_to_store') out_name = final_node.name.split(':')[0] with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]) from lpot import Quantization, common quantizer = Quantization( 'fake_yaml_disable_scale_propagation.yaml') dataset = quantizer.dataset('dummy', shape=(100, 30, 30, 1), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = output_graph_def output_graph = quantizer() max_freezed_out = [] for i in output_graph.graph_def.node: if i.op == 'QuantizedConv2DWithBiasAndReluAndRequantize': max_freezed_out.append(i.input[-1]) self.assertEqual(2, len(set(max_freezed_out)))
def test_bf16_rnn(self): os.environ['FORCE_BF16'] = '1' inp = tf.keras.layers.Input(shape=(None, 4)) lstm_1 = tf.keras.layers.LSTM(units=10, return_sequences=True)(inp) dropout_1 = tf.keras.layers.Dropout(0.2)(lstm_1) lstm_2 = tf.keras.layers.LSTM(units=10, return_sequences=False)(dropout_1) dropout_2 = tf.keras.layers.Dropout(0.2)(lstm_2) out = tf.keras.layers.Dense(1)(dropout_2) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile(loss="mse", optimizer=tf.keras.optimizers.RMSprop()) # input_names = [t.name.split(":")[0] for t in model.inputs] output_names = [t.name.split(":")[0] for t in model.outputs] q_data = np.random.randn(64, 10, 4) label = np.random.randn(64, 1) model.predict(q_data) sess = tf.keras.backend.get_session() graph = sess.graph from tensorflow.python.framework import graph_util graph_def = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_names, ) quant_data = (q_data, label) evl_data = (q_data, label) from lpot import Quantization, common quantizer = Quantization('fake_bf16_rnn.yaml') quantizer.calib_dataloader = common.DataLoader( dataset=list(zip(quant_data[0], quant_data[1]))) quantizer.eval_dataloader = common.DataLoader( dataset=list(zip(evl_data[0], evl_data[1]))) quantizer.model = graph_def quantized_model = quantizer() convert_to_bf16_flag = False for i in quantized_model.graph_def.node: if i.name == 'lstm/while/MatMul_3' and \ i.attr['T'].type == dtypes.bfloat16.as_datatype_enum: convert_to_bf16_flag = True self.assertEqual(convert_to_bf16_flag, True)
def test_fold_pad_conv2(self): x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) x_pad = tf.pad(x, paddings, "CONSTANT") conv_weights = tf.compat.v1.get_variable( "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") normed = tf.compat.v1.layers.batch_normalization(conv) relu = tf.nn.relu(normed) paddings2 = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) x_pad2 = tf.pad(x, paddings2, "CONSTANT") conv_weights2 = tf.compat.v1.get_variable( "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv2 = tf.nn.conv2d(x_pad2, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") normed2 = tf.compat.v1.layers.batch_normalization(conv2) relu2 = tf.nn.relu(normed2) add = tf.math.add(relu, relu2, name='op_to_store') out_name = add.name.split(':')[0] with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]) from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = output_graph_def output_graph = quantizer() found_pad = False if tf.__version__ >= "2.0.0": for i in output_graph.graph_def.node: if i.op == 'Pad': found_pad = True break self.assertEqual(found_pad, True)
def test_quantizate(self): from lpot import Quantization, common for fake_yaml in ["static_yaml.yaml", "dynamic_yaml.yaml"]: quantizer = Quantization(fake_yaml) dataset = quantizer.dataset("dummy", (100, 3, 224, 224), low=0., high=1., label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = common.Model(self.rn50_model) q_model = quantizer() eval_func(q_model) for fake_yaml in ["non_MSE_yaml.yaml"]: quantizer = Quantization(fake_yaml) dataset = quantizer.dataset("dummy", (100, 3, 224, 224), low=0., high=1., label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = common.Model(self.mb_v2_model) q_model = quantizer() eval_func(q_model)
def test_no_input_output_config(self): g = GraphAnalyzer() g.graph = self.input_graph g.parse_graph() float_graph_def = g.dump_graph() from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(20, 224, 224, 3), label=True) quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.model = float_graph_def output_graph = quantizer() self.assertGreater(len(output_graph.graph_def.node), 0)
def test_tensor_dump(self): model = copy.deepcopy(self.lpot_model) model.model.eval().fuse_model() quantizer = Quantization('dump_yaml.yaml') dataset = quantizer.dataset('dummy', (100, 3, 256, 256), label=True) quantizer.model = common.Model(model.model) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_func = eval_func quantizer() self.assertTrue( True if os.path.exists('runs/eval/baseline_acc0.0') else False) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer() self.assertTrue( True if os.path.exists('runs/eval/baseline_acc0.0') else False)
def test_bf16_fallback(self): os.environ['FORCE_BF16'] = '1' from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(1, 224, 224, 3), label=True) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.model = self.test_graph output_graph = quantizer() cast_op_count = 0 for node in output_graph.graph_def.node: if node.op == 'Cast': cast_op_count += 1 self.assertTrue(cast_op_count >= 1)
def auto_tune(self): """This is lpot tuning part to generate a quantized pb Returns: graph: it will return a quantized pb """ from lpot import Quantization infer_graph = load_graph(self.args.input_graph) quantizer = Quantization(self.args.config) if self.args.calib_data: quantizer.model = infer_graph quantizer.calib_dataloader = Dataloader(self.args.calib_data, self.args.batch_size) quantizer.eval_func = self.eval_inference q_model = quantizer() return q_model else: print("Please provide calibration dataset!")
def main(_): arg_parser = ArgumentParser(description='Parse args') arg_parser.add_argument("--input-graph", help='Specify the slim model', dest='input_graph') arg_parser.add_argument("--output-graph", help='Specify tune result model save dir', dest='output_graph') arg_parser.add_argument("--config", default=None, help="tuning config") arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') args = arg_parser.parse_args() factory = TFSlimNetsFactory() # user specific model can register to slim net factory input_shape = [None, 299, 299, 3] factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope) if args.tune: from lpot import Quantization quantizer = Quantization(args.config) quantizer.model = args.input_graph q_model = quantizer() q_model.save(args.output_graph) if args.benchmark: from lpot import Benchmark evaluator = Benchmark(args.config) evaluator.model = args.input_graph results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1./ latency))
def test_invalid_input_output_config(self): g = GraphAnalyzer() g.graph = self.input_graph g.parse_graph() float_graph_def = g.dump_graph() from lpot import Quantization, common quantizer = Quantization('fake_yaml_2.yaml') dataset = quantizer.dataset('dummy', shape=(20, 224, 224, 3), label=True) quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.model = float_graph_def model = quantizer() # will detect the right inputs/outputs self.assertNotEqual(model.input_node_names, ['x']) self.assertNotEqual(model.output_node_names, ['op_to_store'])
def test_enable_first_quantization(self): x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") top_relu = tf.nn.relu(x) paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) x_pad = tf.pad(top_relu, paddings, "CONSTANT") conv_weights = tf.compat.v1.get_variable( "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()) conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") normed = tf.compat.v1.layers.batch_normalization(conv) relu = tf.nn.relu(normed) relu6 = tf.nn.relu6(relu, name='op_to_store') out_name = relu6.name.split(':')[0] with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]) from lpot import Quantization, common quantizer = Quantization( 'fake_yaml_enable_first_quantization.yaml') dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True) quantizer.calib_dataloader = common.DataLoader(dataset) quantizer.eval_dataloader = common.DataLoader(dataset) quantizer.model = output_graph_def output_graph = quantizer() found_fp32_conv = False for i in output_graph.graph_def.node: if i.op == 'Conv2D': found_fp32_conv = True break self.assertEqual(found_fp32_conv, False)
def tune_model( input_graph: str, output_graph: str, config: str, framework: str, ) -> None: """Execute tuning.""" from lpot import Quantization, common if framework == "onnxrt": import onnx input_graph = onnx.load(input_graph) quantizer = Quantization(config) quantizer.model = common.Model(input_graph) quantized_model = quantizer() quantized_model.save(output_graph)
def main(_): graph = load_graph(FLAGS.input_graph) if FLAGS.mode == 'tune': from lpot import Quantization, common quantizer = Quantization(FLAGS.config) ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) quantizer.calib_dataloader = common.DataLoader(ds, collate_fn=collate_fn, \ batch_size=FLAGS.batch_size) quantizer.model = common.Model(graph) quantizer.eval_func = eval_func q_model = quantizer() try: q_model.save(FLAGS.output_model) except Exception as e: print("Failed to save model due to {}".format(str(e))) elif FLAGS.mode == 'benchmark': eval_func(graph, FLAGS.iters) elif FLAGS.mode == 'accuracy': eval_func(graph, -1)
def test_matmul_biasadd_requantize_dequantize_fusion_with_softmax(self): g = tf.Graph() with g.as_default(): x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) y_data = np.array([[1, 2], [3, 4]], dtype=np.float) x = tf.placeholder(tf.float32, shape=[2, 2], name='x') y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) z = tf.matmul(x, y) biasadd = tf.nn.bias_add(z, [1, 2]) biasadd1 = tf.nn.bias_add(biasadd, [1, 1]) y1 = tf.constant(x_data, dtype=tf.float32, shape=[2, 2]) matmul1 = tf.matmul(biasadd1, y1) biasadd2 = tf.nn.bias_add(matmul1, [1, 1]) z = tf.nn.softmax(biasadd2, name='op_to_store') found_quantized_matmul = False if tf.version.VERSION < "2.2.0": found_quantized_matmul = False else: with tf.Session() as sess: sess.run(z, feed_dict={x: x_data, y: y_data}) float_graph_def = sess.graph.as_graph_def() from lpot import Quantization, common quantizer = Quantization('fake_yaml.yaml') dataset = quantizer.dataset('dummy', shape=(2, 2), label=True) quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) quantizer.model = float_graph_def output_graph = quantizer() count=0 for i in output_graph.model.node: if i.op == 'QuantizedMatMulWithBiasAndDequantize': count += 1 found_quantized_matmul = bool(count > 1) self.assertEqual(found_quantized_matmul, False)
def run(self): if self.args.tune: from lpot import Quantization quantizer = Quantization(self.args.config) quantizer.model = self.args.input_graph q_model = quantizer() q_model.save(self.args.output_model) if self.args.benchmark: from lpot import Benchmark evaluator = Benchmark(self.args.config) evaluator.model = self.args.input_graph results = evaluator() for mode, result in results.items(): acc, batch_size, result_list = result latency = np.array(result_list).mean() / batch_size print('\n{} mode benchmark result:'.format(mode)) print('Accuracy is {:.3f}'.format(acc)) print('Batch size = {}'.format(batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1. / latency))