コード例 #1
0
ファイル: onnx_models.py プロジェクト: celidos/TensorRT_study
 def check_list(aclist, exlist):
     G_LOGGER.debug(
         "Actual node list: {:}\n\nExpected node list: {:}".
         format(aclist, exlist))
     assert len(aclist) == len(exlist)
     for acnode, exnode in zip(aclist, exlist):
         assert acnode == exnode
コード例 #2
0
    def cleanup(self, remove_unused_node_outputs=False):
        """
        Removes unused nodes and tensors from the graph.
        A node or tensor is considered unused if it does not contribute to any of the graph outputs.

        Additionally, any producer nodes of graph input tensors are removed from the graph.

        *Note: This function will never modify graph output tensors.*

        Args:
            remove_unused_node_outputs (bool): Whether to remove unused output tensors of nodes. This will never remove
                empty-tensor (i.e. optional, but omitted) outputs. Defaults to False.

        Returns:
            self
        """
        with self.node_ids():
            # Graph inputs cannot have producers
            for inp in self.inputs:
                inp.inputs.clear()

            used_node_ids, used_tensors = self._get_used_node_ids()

            inputs = []
            for inp in self.inputs:
                if inp in used_tensors:
                    inputs.append(inp)
                else:
                    G_LOGGER.debug("Removing unused input: {:}".format(inp))
            self.inputs = inputs

            nodes = []
            for node in self.nodes:
                if self._get_node_id(node) in used_node_ids:
                    nodes.append(node)
                else:
                    node.inputs.clear()
                    node.outputs.clear()
                    G_LOGGER.verbose("Removing unused node: {:}".format(node))

            # Last pass to remove any hanging tensors - tensors without outputs
            if remove_unused_node_outputs:
                graph_output_names = set(
                    [tensor.name for tensor in self.outputs])
                for node in nodes:

                    def is_hanging_tensor(tensor):
                        return not tensor.is_empty() and len(
                            tensor.outputs
                        ) == 0 and tensor.name not in graph_output_names

                    to_remove = [
                        out for out in node.outputs if is_hanging_tensor(out)
                    ]
                    for out in to_remove:
                        if out in node.outputs:
                            node.outputs.remove(out)

            self.nodes = nodes
            return self
コード例 #3
0
 def check_tensor(name: str):
     if name not in tensor_map:
         if name:
             G_LOGGER.debug(
                 "Tensor: {:} was not generated during shape inference, or shape inference was not run on this model. Creating a new Tensor."
                 .format(name))
             tensor_map[name] = Variable(name)
         else:
             # Empty tensors are not tracked by the graph, as these represent optional inputs/outputs that have been omitted.
             G_LOGGER.verbose("Generating empty tensor")
             return Variable.empty()
     return tensor_map[name]
コード例 #4
0
ファイル: onnx_models.py プロジェクト: celidos/TensorRT_study
            def check_tensor_io(actensor, extensor):
                def check_list(aclist, exlist):
                    G_LOGGER.debug(
                        "Actual node list: {:}\n\nExpected node list: {:}".
                        format(aclist, exlist))
                    assert len(aclist) == len(exlist)
                    for acnode, exnode in zip(aclist, exlist):
                        assert acnode == exnode

                G_LOGGER.debug("Checking tensor: {:} inputs".format(
                    actensor.name))
                check_list(actensor.inputs, extensor.inputs)
                G_LOGGER.debug("Checking tensor: {:} outputs".format(
                    actensor.name))
                check_list(actensor.outputs, extensor.outputs)
コード例 #5
0
ファイル: graph.py プロジェクト: npanpaliya/TensorRT-1
    def cleanup(self, remove_unused_node_outputs=True):
        """
        Removes unused nodes and tensors from the graph.
        A node or tensor is considered unused if it does not contribute to any of the graph outputs.

        Note: This function will never modify graph output tensors.

        Optional Args:
            remove_unused_node_outputs (bool): Whether to remove unused output tensors of nodes. This will never remove
                empty tensor outputs. If this is set to False, outputs of nodes kept in the graph will not be modified.

        Returns:
            self
        """
        with self.node_ids():
            used_node_ids, used_tensors = self._get_used_node_ids()

            inputs = []
            for inp in self.inputs:
                if inp in used_tensors:
                    inputs.append(inp)
                else:
                    G_LOGGER.debug("Removing unused input: {:}".format(inp))
            self.inputs = inputs

            nodes = []
            for node in self.nodes:
                if self._get_node_id(node) in used_node_ids:
                    nodes.append(node)
                else:
                    node.inputs.clear()
                    node.outputs.clear()
                    G_LOGGER.verbose("Removing unused node: {:}".format(node))

            # Last pass to remove any hanging tensors - tensors without outputs
            if remove_unused_node_outputs:
                graph_output_names = set([tensor.name for tensor in self.outputs])
                for node in nodes:
                    def is_hanging_tensor(tensor):
                        return not tensor.is_empty() and len(tensor.outputs) == 0 and tensor.name not in graph_output_names

                    [node.outputs.remove(out) for out in node.outputs if is_hanging_tensor(out)]

            self.nodes = nodes
            return self
コード例 #6
0
ファイル: onnx_models.py プロジェクト: celidos/TensorRT_study
    def assert_equal(self, graph: Graph):
        assert graph.inputs == self.inputs
        G_LOGGER.debug("Graph inputs matched")

        # Break down fields to make debugging failures easier.
        for actual, expected in zip(graph.nodes, self.nodes):

            def check_tensor_io(actensor, extensor):
                def check_list(aclist, exlist):
                    G_LOGGER.debug(
                        "Actual node list: {:}\n\nExpected node list: {:}".
                        format(aclist, exlist))
                    assert len(aclist) == len(exlist)
                    for acnode, exnode in zip(aclist, exlist):
                        assert acnode == exnode

                G_LOGGER.debug("Checking tensor: {:} inputs".format(
                    actensor.name))
                check_list(actensor.inputs, extensor.inputs)
                G_LOGGER.debug("Checking tensor: {:} outputs".format(
                    actensor.name))
                check_list(actensor.outputs, extensor.outputs)

            G_LOGGER.debug("Actual Node: {:}\n\nExpected Node: {:}".format(
                actual, expected))
            assert actual.op == expected.op
            assert actual.inputs == expected.inputs
            # Check I/O of input tensors
            for acinp, exinp in zip(actual.inputs, expected.inputs):
                check_tensor_io(acinp, exinp)

            assert actual.outputs == expected.outputs
            # Check I/O of output tensors
            for acout, exout in zip(actual.outputs, expected.outputs):
                check_tensor_io(acout, exout)

            assert actual.name == expected.name
            assert len(actual.attrs) == len(expected.attrs)
            for (ackey, acval), (exkey, exval) in zip(actual.attrs.items(),
                                                      expected.attrs.items()):
                assert ackey == exkey
                assert acval == exval
            assert actual == expected
        G_LOGGER.debug("Graph nodes matched")

        assert graph.outputs == self.outputs
        G_LOGGER.debug("Graph outputs matched")
コード例 #7
0
    def assert_equal(self, graph: Graph):

        assert graph.inputs == self.inputs
        G_LOGGER.debug("Graph inputs matched")

        for actual, expected in zip(graph.nodes, self.nodes):
            G_LOGGER.debug("Actual Node: {:}.\n\nExpected Node: {:}".format(
                actual, expected))
            # Break down fields to make debugging failures easier.
            assert actual.op == expected.op
            assert actual.inputs == expected.inputs
            assert actual.outputs == expected.outputs
            assert actual.name == expected.name
            for (akey, aval), (ekey, eval) in zip(actual.attrs.items(),
                                                  expected.attrs.items()):
                assert akey == ekey
                assert aval == eval
            assert actual == expected
        G_LOGGER.debug("Graph nodes matched")

        assert graph.outputs == self.outputs
        G_LOGGER.debug("Graph outputs matched")
コード例 #8
0
    def import_graph(onnx_graph: onnx.GraphProto,
                     tensor_map: "OrderedDict[str, Tensor]" = None,
                     opset=None) -> Graph:
        """
        Imports a Graph from an ONNX Graph.

        Args:
            onnx_graph (onnx.GraphProto): The ONNX graph to import.

            tensor_map (OrderedDict[str, Tensor]): A mapping of tensor names to Tensors. This is generally only useful for subgraph import.
            opset (int): The ONNX opset to use for this graph.
        """
        tensor_map = copy.copy(misc.default_value(
            tensor_map, OrderedDict()))  # Outer graph tensors, read-only
        subgraph_tensor_map = OrderedDict()  # Tensors in this subgraph

        # Retrieves a Tensor from subgraph_tensor_map or the outer graph (tensor_map) if present, otherwise imports the tensor
        # If overwrite=True, this function will overwrite previously imported tensors
        # if the new tensor has more information available.
        def get_tensor(onnx_tensor: Union[onnx.ValueInfoProto,
                                          onnx.TensorProto],
                       overwrite=False,
                       check_outer_graph=True) -> Tensor:
            # Prioritize the subgraph even if check_outer_graph is set
            if onnx_tensor.name in subgraph_tensor_map:
                if overwrite:
                    tensor = OnnxImporter.import_tensor(onnx_tensor)
                    if isinstance(subgraph_tensor_map[onnx_tensor.name],
                                  Variable):
                        subgraph_tensor_map[
                            onnx_tensor.name].dtype = subgraph_tensor_map[
                                onnx_tensor.name].dtype or tensor.dtype
                        subgraph_tensor_map[
                            onnx_tensor.name].shape = subgraph_tensor_map[
                                onnx_tensor.name].shape or tensor.shape
                return subgraph_tensor_map[onnx_tensor.name]

            if check_outer_graph and onnx_tensor.name in tensor_map:
                return tensor_map[onnx_tensor.name]

            subgraph_tensor_map[onnx_tensor.name] = OnnxImporter.import_tensor(
                onnx_tensor)
            return subgraph_tensor_map[onnx_tensor.name]

        # Import initializers contents into Constants.
        G_LOGGER.debug("Importing initializers")
        for initializer in onnx_graph.initializer:
            get_tensor(initializer)

        # Import all tensors whose shapes are known. Tensors may be repeated, and some of these
        # duplicates may not include shape/dtype information, so overwrite is set to True
        # so that we can capture all the information available about the tensor
        G_LOGGER.debug("Importing tensors with known shapes")
        for tensor in onnx_graph.value_info:
            get_tensor(tensor, overwrite=True)

        # Import graph inputs and outputs. Initializers are not considered to be inputs.
        # Graph inputs and outputs can never come from the outer graph!
        initializer_names = set(
            [tensor.name for tensor in onnx_graph.initializer])
        G_LOGGER.debug("Importing graph inputs")
        graph_inputs = []  # List[Tensor]
        for inp in onnx_graph.input:
            if inp.name not in initializer_names:
                tensor = get_tensor(inp, check_outer_graph=False)
                graph_inputs.append(tensor)

        G_LOGGER.debug("Importing graph outputs")
        graph_outputs = []  # List[Tensor]
        for out in onnx_graph.output:
            tensor = get_tensor(out, check_outer_graph=False)
            graph_outputs.append(tensor)

        G_LOGGER.debug("Importing nodes")
        nodes = []  # List[Node]
        for onnx_node in onnx_graph.node:
            node = OnnxImporter.import_node(onnx_node, tensor_map,
                                            subgraph_tensor_map)
            nodes.append(node)

        return Graph(nodes=nodes,
                     inputs=graph_inputs,
                     outputs=graph_outputs,
                     name=onnx_graph.name,
                     doc_string=onnx_graph.doc_string,
                     opset=opset)