コード例 #1
0
    def test_tensorflow_gpu_conversion(self):
        input_graph_def = read_graph(self.pb_path)
        output_node_names = ['MobilenetV1/Predictions/Reshape_1']
        op_wise_config = {
            'MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D': (False, 'minmax', False)}
        tf.compat.v1.disable_eager_execution()

        converter = QuantizeGraphForIntel(
            input_graph_def, output_node_names, op_wise_config, self.op_wise_sequences,  'gpu')
        converted_pb = converter.do_transform()

        target_node_name = 'MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D_eightbit_quantized_conv'

        node_details = {}
        for i in converted_pb.node:
            node_details[i.name] = i

        converted_flag = True if target_node_name in node_details else False

        self.assertEqual(converted_flag, True)

        target_node = node_details[target_node_name]
        weights_min_node = node_details[target_node.input[-2]]
        weights_max_node = node_details[target_node.input[-1]]

        self.assertEqual(weights_max_node.op, "HostConst")
        self.assertEqual(weights_min_node.op, "HostConst")
コード例 #2
0
 def test_parse_pb_contains_share_nodes(self):
     original_graphdef = read_graph(
         os.path.join(self.unzipped_folder_name,
                      "frozen_inference_graph.pb"))
     copied_graphdef = copy.deepcopy(original_graphdef)
     parsed_graphdef = SplitSharedInputOptimizer(
         original_graphdef).do_transformation()
     legacy_graphdef = QuantizeGraphHelper.split_shared_inputs(
         copied_graphdef)
     self.assertGreater(len(parsed_graphdef.node),
                        len(original_graphdef.node))
     self.assertEqual(len(legacy_graphdef.node), len(parsed_graphdef.node))
コード例 #3
0
    def test_tensorflow_graph_library_detection(self):

        tf.compat.v1.disable_eager_execution()
        qt_config = {'calib_iteration':1, 'op_wise_config':{}}
        original_graphdef = read_graph(self.pb_path)
        converter = GraphConverter(self.pb_path,
                                   "/tmp/test.pb",
                                   inputs=['input_tensor'],
                                   outputs=['softmax_tensor'],
                                   qt_config=qt_config)
        converted_graph = converter.convert()

        self.assertEqual(converted_graph.as_graph_def().library, original_graphdef.library)
コード例 #4
0
    def test_tensorflow_graph_library_detection(self):

        tf.compat.v1.disable_eager_execution()

        op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
            os.path.dirname(__file__),
            "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()

        qt_config = {'calib_iteration': 1, 'op_wise_config': {}}
        original_graphdef = read_graph(self.pb_path)
        converter = GraphConverter(self.pb_path,
                                   "/tmp/test.pb",
                                   inputs=['input_tensor'],
                                   outputs=['softmax_tensor'],
                                   int8_sequences=op_wise_sequences,
                                   qt_config=qt_config)
        converted_graph = converter.convert()

        self.assertEqual(converted_graph.as_graph_def().library,
                         original_graphdef.library)
コード例 #5
0
    def test_tensorflow_concat_quantization(self):

        output_graph_def = read_graph(self.pb_path)
        from lpot import Quantization

        quantizer = Quantization('fake_yaml.yaml')
        dataset = quantizer.dataset('dummy',
                                    shape=(100, 299, 299, 3),
                                    label=True)
        dataloader = quantizer.dataloader(dataset)
        output_graph = quantizer(output_graph_def,
                                 q_dataloader=dataloader,
                                 eval_dataloader=dataloader)
        found_quantized_concat_node = False

        target_concat_node_name = 'v0/cg/incept_v3_a0/concat_eightbit_quantized_concatv2'
        from lpot.adaptor.tf_utils.graph_rewriter.graph_util import GraphAnalyzer
        cur_graph = GraphAnalyzer()
        cur_graph.graph = output_graph.as_graph_def()
        graph_info = cur_graph.parse_graph()
        found_quantized_concat_node = target_concat_node_name in graph_info

        self.assertEqual(found_quantized_concat_node, True)
        min_out, max_out = [], []
        for input_conv_name in graph_info[
                target_concat_node_name].node.input[:4]:
            # print (input_conv_name, graph_info[input_conv_name].node.input)
            min_freezed_out_name = graph_info[input_conv_name].node.input[-2]
            max_freezed_out_name = graph_info[input_conv_name].node.input[-1]
            min_freezed_out_value = (graph_info[min_freezed_out_name].node.
                                     attr['value'].tensor.float_val)[0]
            max_freezed_out_value = (graph_info[max_freezed_out_name].node.
                                     attr['value'].tensor.float_val)[0]
            min_out.append(min_freezed_out_value)
            max_out.append(max_freezed_out_value)

        self.assertEqual(len(set(min_out)), 1)
        self.assertEqual(len(set(max_out)), 1)
コード例 #6
0
    def read(self, model_path: str) -> Graph:
        """Read a graph."""
        self.ensure_model_readable(model_path)

        graph_def = read_graph(model_path)
        graph = Graph()

        for node_def in graph_def.node:
            if self._should_hide_node(node_def):
                self._hide_node(node_def)
                continue

            current_node_id = node_def.name

            graph.add_node(
                Node(
                    id=current_node_id,
                    label=node_def.op,
                    properties={
                        "name": node_def.name,
                        "type": node_def.op,
                    },
                    attributes=self._convert_attributes(node_def),
                ), )

            for input_node_name in node_def.input:
                if self._is_node_id_hidden(
                        input_node_name) or self._is_node_id_hidden(
                            current_node_id, ):
                    continue
                graph.add_edge(
                    source_id=input_node_name,
                    target_id=current_node_id,
                )

        return graph
コード例 #7
0
    def test_tensorflow_graph_library_detection(self):

        tf.compat.v1.disable_eager_execution()

        op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(
            os.path.dirname(__file__),
            "../lpot/adaptor/tensorflow.yaml")).get_eightbit_patterns()

        qt_config = {'calib_iteration': 1, 'op_wise_config': {}}
        original_graphdef = read_graph(self.pb_path)
        framework_info = {
            'name': 'test',
            'input_tensor_names': 'input_tensor',
            'output_tensor_names': 'softmax_tensor',
            'workspace_path': "/tmp/test.pb"
        }
        model = TensorflowModel(self.pb_path, framework_info)
        converter = GraphConverter(model,
                                   int8_sequences=op_wise_sequences,
                                   qt_config=qt_config)
        converted_graph = converter.convert()

        self.assertEqual(converted_graph.graph_def.library,
                         original_graphdef.library)