예제 #1
0
class TestTFQueryYaml(unittest.TestCase):

    @classmethod
    def setUpClass(self):
        self.tf_yaml_path = os.path.join(os.getcwd() + "/../lpot/adaptor/tensorflow.yaml")

        with open(self.tf_yaml_path) as f:
            self.content = yaml.safe_load(f)
        self.query_handler = TensorflowQuery(local_config_file=self.tf_yaml_path)

    def test_unique_version(self):
        registered_version_name = [i['version']['name'] for i in self.content]

        self.assertEqual(len(registered_version_name), len(set(registered_version_name)))

    def test_int8_sequences(self):
        patterns = self.query_handler.get_eightbit_patterns()

        has_conv2d = bool('Conv2D' in patterns)
        has_matmul = bool('MatMul' in patterns)
        self.assertEqual(has_conv2d, True)
        self.assertEqual(has_matmul, True)
        self.assertGreaterEqual(len(patterns['Conv2D']), 13)
        self.assertGreaterEqual(len(patterns['MatMul']), 3)
        self.assertEqual(len(patterns['ConcatV2']), 1)
        self.assertEqual(len(patterns['MaxPool']), 1)
        self.assertEqual(len(patterns['AvgPool']), 1)

    def test_convert_internal_patterns(self):
        internal_patterns = self.query_handler.generate_internal_patterns()
        self.assertEqual([['MaxPool']] in internal_patterns, True)
        self.assertEqual([['ConcatV2']] in internal_patterns, True)
        self.assertEqual([['AvgPool']] in internal_patterns, True)
        self.assertEqual([['MatMul'], ('BiasAdd',), ('Relu',)] in internal_patterns, True)
예제 #2
0
    def setUpClass(self):
        self.tf_yaml_path = os.path.join(os.getcwd() +
                                         "/../lpot/adaptor/tensorflow.yaml")

        with open(self.tf_yaml_path) as f:
            self.content = yaml.safe_load(f)
        self.query_handler = TensorflowQuery(
            local_config_file=self.tf_yaml_path)
    def test_conv_biasadd_relu_fusion(self):
        tf.compat.v1.disable_eager_execution()

        self._tmp_graph_def = graph_util.remove_training_nodes(
            self.input_graph, self.outputs)

        self._tmp_graph_def = StripUnusedNodesOptimizer(
            self._tmp_graph_def, self.inputs,
            self.outputs).do_transformation()

        self._tmp_graph_def = FoldBatchNormNodesOptimizer(
            self._tmp_graph_def).do_transformation()
        op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
            os.path.dirname(__file__),
            "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()

        output_graph = QuantizeGraphForIntel(self._tmp_graph_def, self.outputs,
                                             self.op_wise_config,
                                             op_wise_sequences,
                                             'cpu').do_transform()

        node_name_type_mapping = {}
        for i in output_graph.node:
            node_name_type_mapping[i.name] = i.op

        should_disable_sum_node_name = 'v0/resnet_v17/conv27/conv2d/Conv2D_eightbit_quantized_conv'
        should_enable_sum_node_name = 'v0/resnet_v13/conv11/conv2d/Conv2D_eightbit_quantized_conv'
        should_disable_sum_flag = should_disable_sum_node_name in node_name_type_mapping and node_name_type_mapping[
            should_disable_sum_node_name] == 'QuantizedConv2DWithBias'
        should_enable_sum_flag = should_enable_sum_node_name in node_name_type_mapping and node_name_type_mapping[
            should_enable_sum_node_name] == 'QuantizedConv2DWithBiasSumAndRelu'
        self.assertEqual(should_enable_sum_flag, True)
        self.assertEqual(should_disable_sum_flag, True)
예제 #4
0
 def setUpClass(self):
     os.system("wget {} -O {} ".format(self.mb_model_url, self.pb_path))
     self.op_wise_sequences = TensorflowQuery(
         local_config_file=os.path.join(
             os.path.dirname(__file__),
             "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()
     build_fake_yaml()
    def test_conv_add_relu(self):
        x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3],
                                     name="input")

        if tf.version.VERSION <= '2.1.0':
            x = tf.nn.relu(x)
        conv_weights = tf.compat.v1.get_variable(
            "weight", [3, 3, 3, 32],
            initializer=tf.compat.v1.random_normal_initializer())
        conv_bias = tf.compat.v1.get_variable(
            "bias", [32], initializer=tf.compat.v1.random_normal_initializer())
        conv1 = tf.nn.conv2d(x,
                             conv_weights,
                             strides=[1, 1, 1, 1],
                             padding="SAME")
        conv_bias = tf.math.add(conv1, conv_bias)
        relu = tf.nn.relu(conv_bias, name='Relu_1')
        op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
            os.path.dirname(__file__),
            "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()
        with tf.compat.v1.Session() as sess:
            sess.run(tf.compat.v1.global_variables_initializer())
            output_graph_def = graph_util.convert_variables_to_constants(
                sess=sess,
                input_graph_def=sess.graph_def,
                output_node_names=[relu.name.split(':')[0]])
            output_graph_def = QuantizeGraphHelper.remove_training_nodes(
                output_graph_def, protected_nodes=[relu.name.split(':')[0]])

            outputs = [relu.name.split(':')[0]]
            op_wise_config = {
                "Conv2D": (False, 'minmax', False, 7.0),
            }

            fold_graph_def = QuantizeGraphForIntel(output_graph_def, outputs,
                                                   op_wise_config,
                                                   op_wise_sequences,
                                                   'cpu').do_transform()
            found_QuantizedConv2DWithBiasAndRelu = False
            for i in fold_graph_def.node:
                if i.op == 'QuantizedConv2DWithBiasAndRelu':
                    found_QuantizedConv2DWithBiasAndRelu = True
                    break
            self.assertEqual(found_QuantizedConv2DWithBiasAndRelu, True)
예제 #6
0
    def test_tensorflow_graph_library_detection(self):

        tf.compat.v1.disable_eager_execution()

        op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
            os.path.dirname(__file__),
            "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()

        qt_config = {'calib_iteration': 1, 'op_wise_config': {}}
        original_graphdef = read_graph(self.pb_path)
        converter = GraphConverter(self.pb_path,
                                   "/tmp/test.pb",
                                   inputs=['input_tensor'],
                                   outputs=['softmax_tensor'],
                                   int8_sequences=op_wise_sequences,
                                   qt_config=qt_config)
        converted_graph = converter.convert()

        self.assertEqual(converted_graph.as_graph_def().library,
                         original_graphdef.library)
    def test_tensorflow_graph_library_detection(self):

        tf.compat.v1.disable_eager_execution()

        op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
            os.path.dirname(__file__),
            "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()

        qt_config = {'calib_iteration': 1, 'op_wise_config': {}}
        original_graphdef = read_graph(self.pb_path)
        framework_info = {
            'name': 'test',
            'input_tensor_names': 'input_tensor',
            'output_tensor_names': 'softmax_tensor',
            'workspace_path': "/tmp/test.pb"
        }
        model = TensorflowModel(self.pb_path, framework_info)
        converter = GraphConverter(model,
                                   int8_sequences=op_wise_sequences,
                                   qt_config=qt_config)
        converted_graph = converter.convert()

        self.assertEqual(converted_graph.graph_def.library,
                         original_graphdef.library)
 def setUpClass(self):
     build_fake_yaml()
     self.op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
         os.path.dirname(__file__), "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()
예제 #9
0
class TestTFQueryYaml(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.tf_yaml_path = os.path.join(os.getcwd() +
                                         "/../lpot/adaptor/tensorflow.yaml")

        with open(self.tf_yaml_path) as f:
            self.content = yaml.safe_load(f)
        self.query_handler = TensorflowQuery(
            local_config_file=self.tf_yaml_path)
        build_fake_yaml_on_grappler()

    @classmethod
    def tearDownClass(self):
        os.remove('fake_yaml_grappler.yaml')

    def test_unique_version(self):
        registered_version_name = [i['version']['name'] for i in self.content]

        self.assertEqual(len(registered_version_name),
                         len(set(registered_version_name)))

    def test_int8_sequences(self):
        patterns = self.query_handler.get_eightbit_patterns()

        has_conv2d = bool('Conv2D' in patterns)
        has_matmul = bool('MatMul' in patterns)
        self.assertEqual(has_conv2d, True)
        self.assertEqual(has_matmul, True)
        self.assertGreaterEqual(len(patterns['Conv2D']), 13)
        self.assertGreaterEqual(len(patterns['MatMul']), 3)
        self.assertEqual(len(patterns['ConcatV2']), 1)
        self.assertEqual(len(patterns['MaxPool']), 1)
        self.assertEqual(len(patterns['AvgPool']), 1)

    def test_convert_internal_patterns(self):
        internal_patterns = self.query_handler.generate_internal_patterns()
        self.assertEqual([['MaxPool']] in internal_patterns, True)
        self.assertEqual([['ConcatV2']] in internal_patterns, True)
        self.assertEqual([['AvgPool']] in internal_patterns, True)
        self.assertEqual([['MatMul'], ('BiasAdd', ), ('Relu', )]
                         in internal_patterns, True)

    @disable_random()
    def test_grappler_cfg(self):
        x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input")
        conv_weights = tf.compat.v1.get_variable(
            "weight", [2, 2, 1, 1],
            initializer=tf.compat.v1.random_normal_initializer())
        conv_bias = tf.compat.v1.get_variable(
            "bias", [1], initializer=tf.compat.v1.random_normal_initializer())

        x = tf.nn.relu(x)
        conv = tf.nn.conv2d(x,
                            conv_weights,
                            strides=[1, 2, 2, 1],
                            padding="SAME",
                            name='last')
        normed = tf.compat.v1.layers.batch_normalization(conv)

        relu = tf.nn.relu(normed)
        relu2 = tf.nn.relu(relu)
        pool = tf.nn.max_pool(relu2,
                              ksize=1,
                              strides=[1, 2, 2, 1],
                              name='maxpool',
                              padding="SAME")
        conv1 = tf.nn.conv2d(pool,
                             conv_weights,
                             strides=[1, 2, 2, 1],
                             padding="SAME",
                             name='last')
        conv_bias = tf.nn.bias_add(conv1, conv_bias)
        x = tf.nn.relu(conv_bias)
        final_node = tf.nn.relu(x, name='op_to_store')

        out_name = final_node.name.split(':')[0]
        with tf.compat.v1.Session() as sess:
            sess.run(tf.compat.v1.global_variables_initializer())
            output_graph_def = graph_util.convert_variables_to_constants(
                sess=sess,
                input_graph_def=sess.graph_def,
                output_node_names=[out_name])
            from lpot.experimental import Quantization, common

            quantizer = Quantization('fake_yaml_grappler.yaml')
            dataset = quantizer.dataset('dummy',
                                        shape=(100, 30, 30, 1),
                                        label=True)
            quantizer.calib_dataloader = common.DataLoader(dataset)
            quantizer.eval_dataloader = common.DataLoader(dataset)
            quantizer.model = output_graph_def
            output_graph = quantizer()

            disable_arithmetic = False
            for i in output_graph.graph_def.node:
                if i.name == 'maxpool_eightbit_quantize_Relu_2' and i.input[
                        0] == 'Relu_2':
                    disable_arithmetic = True
            # if tf.version.VERSION >= '2.3.0':
            #     self.assertEqual(False, disable_arithmetic)
            # else:
            self.assertEqual(True, disable_arithmetic)