示例#1
0
def build_basic_graph():
    inputs = [Variable(name="x")]
    outputs = [Variable(name="y")]
    nodes = [
        Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
    ]
    return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
示例#2
0
 def make_graph():
     graph, _ = toposort_multi_tier_output_graph()
     graph.outputs.pop()
     # Deep copy should work with empty tensors
     graph.nodes[0].inputs.append(Variable.empty())
     graph.nodes[0].outputs.append(Variable.empty())
     return graph
示例#3
0
def ext_weights():
    path = os.path.join(TEST_ROOT, "models", "ext_weights.onnx")
    model = onnx.load(path)

    inputs = [Variable("input", shape=(1, 3), dtype=np.float32)]
    outputs = [Variable("output", shape=(1, 3), dtype=np.float32)]

    a = Constant("a", values=np.ones((1, 3), dtype=np.float32))
    b = Constant("b", values=np.ones((1, 3), dtype=np.float32))
    d = Constant("d", values=np.ones((1, 3), dtype=np.float32))

    c = Variable("c")
    e = Variable("e")

    nodes = [
        Node(op="Add", inputs=[a, b], outputs=[c]),
        Node(op="Add", inputs=[c, d], outputs=[e]),
        Node(op="Add", inputs=[inputs[0], e], outputs=outputs),
    ]

    return Model(
        path,
        inputs=inputs,
        outputs=outputs,
        nodes=nodes,
        opset=OnnxImporter.get_opset(model),
    )
示例#4
0
def nested_dup_names():
    path = os.path.join(TEST_ROOT, "models", "nested_dup_names.onnx")
    model = onnx.load(path)

    # Inner
    subgraph_inputs = [Variable("X", shape=(2, 2), dtype=np.float32)]
    subgraph_outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)]

    subgraph_node = Node(op="Identity",
                         inputs=subgraph_inputs,
                         outputs=subgraph_outputs)
    subgraph = Graph(nodes=[subgraph_node],
                     inputs=subgraph_inputs,
                     outputs=subgraph_outputs)

    # Outer - problem happens if outer node has same I/O names as subgraph
    inputs = [Variable("X", shape=(2, 2), dtype=np.float32)]
    outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)]

    node = Node(op="Nested",
                inputs=inputs,
                outputs=outputs,
                attrs={"body": subgraph})
    return Model(
        path,
        inputs=inputs,
        outputs=outputs,
        nodes=[node],
        opset=OnnxImporter.get_opset(model),
    )
示例#5
0
    def test_fold_constants_one_hop(self):
        # Graph:
        # c = (a + b)
        # e = (c + d)
        # output = input + e
        # Should fold to:
        # output = input + e
        inp = Variable("input", shape=(1, 3), dtype=np.float32)
        a = Constant("a", values=np.ones(shape=(1, 3), dtype=np.float32))
        b = Constant("b", values=np.ones(shape=(1, 3), dtype=np.float32))
        c = Variable("c", shape=(1, 3), dtype=np.float32)
        d = Constant("d", values=np.ones(shape=(1, 3), dtype=np.float32))
        e = Variable("e", shape=(1, 3), dtype=np.float32)
        out = Variable("output", shape=(1, 3), dtype=np.float32)

        nodes = [
            Node("Add", inputs=[a, b], outputs=[c]),
            Node("Add", inputs=[c, d], outputs=[e]),
            Node("Add", inputs=[inp, e], outputs=[out]),
        ]

        graph = Graph(nodes=nodes, inputs=[inp], outputs=[out])

        graph.fold_constants().cleanup()

        # Extra nodes should be removed
        assert len(graph.nodes) == 1
        assert graph.nodes[0].inputs[0] == inp
        assert graph.nodes[0].inputs[1] == e
        # Value should be computed correctly
        assert np.all(graph.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 3)
示例#6
0
 def setup_method(self):
     self.input_tensor = Variable(name="x")
     self.output_tensor = Variable(name="y")
     self.node = Node(op="Add",
                      name="Test",
                      inputs=[self.input_tensor],
                      outputs=[self.output_tensor])
示例#7
0
 def test_o_multiple_outputs(self):
     intermediate_tensor = Variable(name="intermediate")
     intermediate_tensor2 = Variable(name="intermediate2")
     input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor])
     output_node = Node(op="Add", name="Out", inputs=[intermediate_tensor], outputs=[self.output_tensor])
     output_node2 = Node(op="Add", name="Input2", inputs=[intermediate_tensor], outputs=[intermediate_tensor2])
     assert input_node.o() == output_node
     assert input_node.o(1) == output_node2
示例#8
0
def build_two_layer_graph_multiple_io():
    inputs = [Variable(name="x0"), Variable(name="x1")]
    intermediate_tensor = Variable(name="intermediate")
    outputs = [Variable(name="y0"), Variable(name="y1")]
    nodes = [
        Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]),
        Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs),
    ]
    return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
示例#9
0
    def test_tensors_with_duplicates_check_disabled(self):
        inputs = [Variable(name="x")]
        outputs = [Variable(name="x")]  # Distinct tensors with the same name
        nodes = [
            Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
        ]
        graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs)

        # This should *not* throw
        graph.tensors(check_duplicates=False)
示例#10
0
    def test_tensors_check_duplicates(self):
        inputs = [Variable(name="x")]
        outputs = [Variable(name="x")] # Distinct tensors with the same name
        nodes = [
            Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
        ]
        graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs)

        with pytest.raises(OnnxGraphSurgeonException):
            graph.tensors(check_duplicates=True)
示例#11
0
def toposort_multi_tier_output_graph():
    inputs = [Variable(name="x")]
    outputs = [Variable(name="out0"), Variable(name="out1"), Variable(name="out2")]
    out0, out1, out2 = outputs
    nodes = [
        Node(op="Add", name="Test2", inputs=[out1], outputs=[out2]),
        Node(op="Add", name="Test0", inputs=inputs, outputs=[out0]),
        Node(op="Add", name="Test1", inputs=[out0], outputs=[out1]),
    ]
    expected_node_order = [nodes[1], nodes[2], nodes[0]]
    return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
示例#12
0
    def test_io_cannot_be_sync_list_on_init(self):
        inp = Variable("input0", shape=(1, 3), dtype=np.float32)
        out = Variable("input1", shape=(1, 3), dtype=np.float32)

        node = Node("Add", inputs=[inp], outputs=[out])
        assert isinstance(node.inputs, SynchronizedList)
        assert isinstance(node.outputs, SynchronizedList)

        graph = Graph(nodes=[node], inputs=node.inputs, outputs=node.outputs)
        assert not isinstance(graph.inputs, SynchronizedList)
        assert not isinstance(graph.outputs, SynchronizedList)
示例#13
0
    def test_copy_with_subgraph_dup_tensors(self):
        inp = Variable("input", dtype=np.float32, shape=(4, 5))
        graph = Graph(inputs=[inp])

        # We'll use shape to distinguish inner/outer tensor
        subgraph_inp = Variable("input", dtype=np.float32, shape=(1, 2))
        subgraph = Graph(inputs=[subgraph_inp])

        graph.outputs = [graph.nested(inp, subgraph)]

        graph_copy = graph.copy()
        assert graph_copy.nodes[0].attrs["body"].inputs[0].shape == (1, 2)
示例#14
0
 def check_tensor(name: str):
     if name not in tensor_map:
         if name:
             G_LOGGER.debug(
                 "Tensor: {:} was not generated during shape inference, or shape inference was not run on this model. Creating a new Tensor."
                 .format(name))
             tensor_map[name] = Variable(name)
         else:
             # Empty tensors are not tracked by the graph, as these represent optional inputs/outputs that have been omitted.
             G_LOGGER.verbose("Generating empty tensor")
             return Variable.empty()
     return tensor_map[name]
示例#15
0
    def test_layer_with_tensors(self):
        x0 = Variable("x0")
        x1 = Variable("x1")
        y0 = Variable("y0")
        y1 = Variable("y1")
        graph = Graph()

        outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
        assert outputs == [y0, y1]
        assert len(graph.nodes) == 1
        assert graph.nodes[-1].inputs == [x0, x1]
        assert graph.nodes[-1].outputs == outputs
示例#16
0
    def test_export_node(self):
        name = "TestNode"
        op = "Test"
        inputs = [Variable(name="input")]
        outputs = [Variable(name="output")]
        attrs = OrderedDict()
        attrs["float_attr"] = 4.0
        attrs["int_attr"] = 10
        attrs["str_attr"] = "constant"
        attrs["tensor_attr"] = Constant(
            "testTensor", np.ones(shape=(1, 2, 3, 4), dtype=np.float32))
        attrs["floats_attr"] = [1.0, 2.0, 3.0, 4.0]
        attrs["ints_attr"] = [4, 3, 2, 1]
        attrs["strings_attr"] = ["constant", "and", "variable"]
        node = Node(op=op,
                    name=name,
                    inputs=inputs,
                    outputs=outputs,
                    attrs=attrs)

        onnx_node = OnnxExporter.export_node(node)
        assert onnx_node.name == name
        assert onnx_node.op_type == op
        assert onnx_node.input == ["input"]
        assert onnx_node.output == ["output"]
        for onnx_attr, (name, attr) in zip(onnx_node.attribute, attrs.items()):
            assert onnx_attr.name == name
            if isinstance(attr, float):
                assert onnx_attr.f == attr
            elif isinstance(attr, int):
                assert onnx_attr.i == attr
            elif isinstance(attr, str):
                assert onnx_attr.s.decode() == attr
            elif isinstance(attr, Tensor):
                assert onnx_attr.t.SerializeToString(
                ) == OnnxExporter.export_tensor_proto(
                    attr).SerializeToString()
            elif isinstance(attr, list):
                if isinstance(attr[0], float):
                    assert onnx_attr.floats == attr
                elif isinstance(attr[0], int):
                    assert onnx_attr.ints == attr
                elif isinstance(attr[0], str):
                    assert [s.decode() for s in onnx_attr.strings] == attr
                else:
                    raise AssertionError(
                        "Unrecognized list attribute: ({:}: {:}) of type: {:}".
                        format(name, attr, type(attr)))
            else:
                raise AssertionError(
                    "Unrecognized attribute: ({:}: {:}) of type: {:}".format(
                        name, attr, type(attr)))
示例#17
0
    def test_no_foldable_constants(self):
        inp0 = Variable("input0", shape=(1, 3), dtype=np.float32)
        inp1 = Variable("input1", shape=(1, 3), dtype=np.float32)
        out = Variable("output", shape=(1, 3), dtype=np.float32)

        nodes = [Node("Add", inputs=[inp0, inp1], outputs=[out])]

        graph = Graph(nodes=nodes, inputs=[inp0, inp1], outputs=[out])

        graph.fold_constants().cleanup()

        assert len(graph.nodes) == 1
        assert graph.nodes[0].inputs == [inp0, inp1]
示例#18
0
def identity_model():
    path = os.path.join(TEST_ROOT, "models", "identity.onnx")
    model = onnx.load(path)

    x = Variable(name="x", dtype=np.float32, shape=(1, 1, 2, 2))
    y = Variable(name="y", dtype=np.float32, shape=(1, 1, 2, 2))
    node = Node(op="Identity", inputs=[x], outputs=[y])

    return Model(path,
                 inputs=[x],
                 outputs=[y],
                 nodes=[node],
                 opset=OnnxImporter.get_opset(model))
示例#19
0
def dim_param_model():
    path = os.path.join(TEST_ROOT, "models", "dim_param.onnx")
    model = onnx.load(path)

    x = Variable(name="Input:0", dtype=np.float32, shape=("dim0", 16, 128))
    y = Variable(name="Output:0", dtype=np.float32, shape=("dim0", 16, 128))
    node = Node(op="Identity", inputs=[x], outputs=[y])

    return Model(path,
                 inputs=[x],
                 outputs=[y],
                 nodes=[node],
                 opset=OnnxImporter.get_opset(model))
示例#20
0
    def test_cleanup_independent_path(self):
        graph, _ = toposort_linear_graph()
        # Build out a path totally unrelated to rest of the graph
        indep0 = Variable(name="indep0")
        indep1 = Variable(name="indep1")
        node = Node(op="IndepTest", inputs=[indep0], outputs=[indep1])
        graph.inputs.append(indep0) # Unused inputs should be removed as well
        graph.nodes.append(node)
        graph.cleanup()
        assert indep0 not in graph.inputs
        assert node not in graph.nodes

        tensor_map = graph.tensors()
        assert indep0.name not in tensor_map
        assert indep1.name not in tensor_map
示例#21
0
def toposort_linear_graph():
    inputs = [Variable(name="x")]
    intermediate0 = Variable(name="intermediate0")
    intermediate1 = Variable(name="intermediate1")
    intermediate2 = Variable(name="intermediate2")
    outputs = [Variable(name="y")]
    # Nodes are NOT in topo order.
    nodes = [
        Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]),
        Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]),
        Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs),
        Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]),
    ]
    expected_node_order = [nodes[0], nodes[3], nodes[1], nodes[2]]
    return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
示例#22
0
    def test_independent_path(self, remove_unused_graph_inputs):
        graph, _ = toposort_linear_graph()
        # Build out a path totally unrelated to rest of the graph
        indep0 = Variable(name="indep0")
        indep1 = Variable(name="indep1")
        node = Node(op="IndepTest", inputs=[indep0], outputs=[indep1])
        graph.nodes.append(node)
        graph.inputs.append(indep0)
        graph.cleanup(remove_unused_graph_inputs=remove_unused_graph_inputs)
        assert indep0 not in graph.inputs or not remove_unused_graph_inputs
        assert node not in graph.nodes or not remove_unused_graph_inputs

        tensor_map = graph.tensors()
        assert indep0.name not in tensor_map or not remove_unused_graph_inputs
        assert indep1.name not in tensor_map or not remove_unused_graph_inputs
示例#23
0
 def setup_method(self, field_names):
     self.tensors = [
         Variable(name="test_tensor_{:}".format(i),
                  dtype=np.float32,
                  shape=(1, 3, 224, 224)) for i in range(10)
     ]
     self.node = Node(op="Dummy")
示例#24
0
文件: graph.py 项目: ztt-21/TensorRT
 def process_io(io):
     new_io = []
     for elem in io:
         if isinstance(elem, Tensor):
             new_io.append(elem)
         elif isinstance(elem, str):
             tensor = Variable(name=self._generate_name(elem))
             new_io.append(tensor)
         elif isinstance(elem, np.ndarray):
             new_io.append(
                 Constant(name=self._generate_name(
                     "onnx_graphsurgeon_constant"),
                          values=elem))
         elif isinstance(elem, list) or isinstance(elem, tuple):
             dtype = np.float32 if any(
                 [isinstance(x, float) for x in elem]) else np.int64
             arr = np.array(elem, dtype=dtype)
             new_io.append(
                 Constant(name=self._generate_name(
                     "onnx_graphsurgeon_lst_constant"),
                          values=arr))
         else:
             G_LOGGER.critical(
                 "Unrecognized type passed to Graph.layer: {:}.\n"
                 "\tHint: Did you forget to unpack a list with `*`?\n"
                 "\tPlease use Tensors, strings, or NumPy arrays.".
                 format(elem))
     return new_io
示例#25
0
    def test_shape_gather(self, shape, indices):
        indices = np.array(indices)

        inp = Variable("input", dtype=np.float32, shape=shape)
        graph = Graph(inputs=[inp])

        inp_shape = graph.shape(inp)
        shape_part = graph.gather(inp_shape, indices=indices)
        graph.outputs = [
            graph.add(shape_part, shape_part),
            graph.gather(inp_shape, indices=[0]),
            graph.gather(inp_shape, indices=np.array(0)),
        ]

        graph.fold_constants()

        if shape is not None:
            assert isinstance(graph.outputs[0], Constant)
            expected_shape = np.array(shape)[indices].astype(np.int64) * 2
            assert np.all(graph.outputs[0].values == expected_shape)
        else:
            assert isinstance(graph.outputs[0], Variable)

        assert isinstance(graph.outputs[1], Variable)
        assert isinstance(graph.outputs[2], Variable)
示例#26
0
    def test_equal_outputs_unequal(self):
        g0 = make_nested_graph()
        g1 = make_nested_graph()

        g0.outputs.append(Variable("test"))

        assert not (g0 == g1)
示例#27
0
        def get_tensor(name: str, check_outer_graph=True):
            # Prioritize the subgraph even if check_outer_graph is set
            if name in subgraph_tensor_map:
                return subgraph_tensor_map[name]

            if check_outer_graph and name in tensor_map:
                return tensor_map[name]

            if not name:
                # Empty tensors are not tracked by the graph, as these represent optional inputs/outputs that have been omitted.
                G_LOGGER.verbose("Generating empty tensor")
                return Variable.empty()

            G_LOGGER.verbose("Tensor: {:} was not generated during shape inference, or shape inference was not run on this model. Creating a new Tensor.".format(name))
            subgraph_tensor_map[name] = Variable(name)
            return subgraph_tensor_map[name]
示例#28
0
def tensors_linear_graph():
    inputs = [Variable(name="x")]
    intermediate0 = Variable(name="intermediate0")
    intermediate1 = Variable(name="intermediate1")
    intermediate2 = Variable(name="intermediate2")
    outputs = [Variable(name="y")]

    tensors = inputs + [intermediate0, intermediate1, intermediate2] + outputs
    tensors = {tensor.name: tensor for tensor in tensors}
    # Nodes are NOT in topo order.
    nodes = [
        Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]),
        Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]),
        Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]),
        Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs),
    ]
    return Graph(nodes=nodes, inputs=inputs, outputs=outputs), nodes, tensors
示例#29
0
 def test_setitem(self, field_names):
     nlist, tensor_field = self.get_lists(field_names)
     nlist.append(self.tensors[0])
     new_tensor = Variable("new_tensor")
     nlist[0] = new_tensor
     assert nlist[0] == new_tensor
     assert len(getattr(self.tensors[0], tensor_field)) == 0
     assert getattr(new_tensor, tensor_field)[0] == self.node
示例#30
0
    def test_export_variable_tensor_empty_shape(self):
        shape = None

        tensor = Variable(dtype=np.float32,
                          shape=shape,
                          name="variable_tensor")
        onnx_tensor = OnnxExporter.export_value_info_proto(tensor,
                                                           do_type_check=True)
        assert not onnx_tensor.type.tensor_type.HasField("shape")