def make_nested_graph(): inp = Variable("input") id_out = Variable("id_out") identity = Node(op="Identity", inputs=[inp], outputs=[id_out]) # Subgraph outputs come from the parent node, but nodes in the subgraph # can use nodes from the outer graphs too. subgraph_inputs = [Variable("subgraph_inp")] subgraph_id_out = Variable("subgraph_id_out") subgraph_outputs = [Variable("subgraph_out")] subgraph_identity0 = Node(op="Identity", inputs=[id_out], outputs=[subgraph_id_out]) subgraph_identity1 = Node(op="Identity", inputs=[subgraph_id_out], outputs=subgraph_outputs) subgraph = Graph(nodes=[subgraph_identity0, subgraph_identity1], inputs=subgraph_inputs, outputs=subgraph_outputs) nested_out = Variable("nested_out") nested_node = Node(op="Nested", attrs={"body": subgraph}, inputs=[inp], outputs=[nested_out]) return Graph(nodes=[identity, nested_node], inputs=[inp], outputs=[nested_out])
def setup_method(self): self.tensor = Constant(name="test_tensor", values=np.ones((1, 3, 5, 5), dtype=np.float64)) self.input_node = Node( op="Add", outputs=[self.tensor] ) # Doesn't make sense for Constants, but needed to make base tests happy. self.output_node = Node(op="Add", inputs=[self.tensor])
def test_fold_constants_one_hop(self): # Graph: # c = (a + b) # e = (c + d) # output = input + e # Should fold to: # output = input + e inp = Variable("input", shape=(1, 3), dtype=np.float32) a = Constant("a", values=np.ones(shape=(1, 3), dtype=np.float32)) b = Constant("b", values=np.ones(shape=(1, 3), dtype=np.float32)) c = Variable("c", shape=(1, 3), dtype=np.float32) d = Constant("d", values=np.ones(shape=(1, 3), dtype=np.float32)) e = Variable("e", shape=(1, 3), dtype=np.float32) out = Variable("output", shape=(1, 3), dtype=np.float32) nodes = [ Node("Add", inputs=[a, b], outputs=[c]), Node("Add", inputs=[c, d], outputs=[e]), Node("Add", inputs=[inp, e], outputs=[out]), ] graph = Graph(nodes=nodes, inputs=[inp], outputs=[out]) graph.fold_constants().cleanup() # Extra nodes should be removed assert len(graph.nodes) == 1 assert graph.nodes[0].inputs[0] == inp assert graph.nodes[0].inputs[1] == e # Value should be computed correctly assert np.all(graph.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 3)
def ext_weights(): path = os.path.join(TEST_ROOT, "models", "ext_weights.onnx") model = onnx.load(path) inputs = [Variable("input", shape=(1, 3), dtype=np.float32)] outputs = [Variable("output", shape=(1, 3), dtype=np.float32)] a = Constant("a", values=np.ones((1, 3), dtype=np.float32)) b = Constant("b", values=np.ones((1, 3), dtype=np.float32)) d = Constant("d", values=np.ones((1, 3), dtype=np.float32)) c = Variable("c") e = Variable("e") nodes = [ Node(op="Add", inputs=[a, b], outputs=[c]), Node(op="Add", inputs=[c, d], outputs=[e]), Node(op="Add", inputs=[inputs[0], e], outputs=outputs), ] return Model( path, inputs=inputs, outputs=outputs, nodes=nodes, opset=OnnxImporter.get_opset(model), )
def nested_dup_names(): path = os.path.join(TEST_ROOT, "models", "nested_dup_names.onnx") model = onnx.load(path) # Inner subgraph_inputs = [Variable("X", shape=(2, 2), dtype=np.float32)] subgraph_outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)] subgraph_node = Node(op="Identity", inputs=subgraph_inputs, outputs=subgraph_outputs) subgraph = Graph(nodes=[subgraph_node], inputs=subgraph_inputs, outputs=subgraph_outputs) # Outer - problem happens if outer node has same I/O names as subgraph inputs = [Variable("X", shape=(2, 2), dtype=np.float32)] outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)] node = Node(op="Nested", inputs=inputs, outputs=outputs, attrs={"body": subgraph}) return Model( path, inputs=inputs, outputs=outputs, nodes=[node], opset=OnnxImporter.get_opset(model), )
def test_o_multiple_outputs(self): x = Variable(name="x") y = Variable(name="y") y2 = Variable(name="y2") node = Node(op="Add", name="Input", inputs=[x], outputs=[y]) node2 = Node(op="Add", name="Input", inputs=[x], outputs=[y2]) assert x.o() == y assert x.o(1) == y2
def build_two_layer_graph_multiple_io(): inputs = [Variable(name="x0"), Variable(name="x1")] intermediate_tensor = Variable(name="intermediate") outputs = [Variable(name="y0"), Variable(name="y1")] nodes = [ Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]), Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs), ] return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def test_o(self): intermediate_tensor = Variable(name="intermediate") input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor]) output_node = Node(op="Add", name="Out", inputs=[intermediate_tensor], outputs=[self.output_tensor]) assert input_node.o() == output_node
def toposort_multi_tier_output_graph(): inputs = [Variable(name="x")] outputs = [Variable(name="out0"), Variable(name="out1"), Variable(name="out2")] out0, out1, out2 = outputs nodes = [ Node(op="Add", name="Test2", inputs=[out1], outputs=[out2]), Node(op="Add", name="Test0", inputs=inputs, outputs=[out0]), Node(op="Add", name="Test1", inputs=[out0], outputs=[out1]), ] expected_node_order = [nodes[1], nodes[2], nodes[0]] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
def toposort_multi_tier_input_graph(): inputs = [Variable(name="x0"), Variable(name="x1"), Variable(name="x2"), Variable(name="x3")] int0, int1 = [Variable(name="intermediate0"), Variable(name="intermediate1")] outputs = [Variable(name="out")] x0, x1, x2, x3 = inputs nodes = [ Node(op="Add", name="Test2", inputs=[int1, x3], outputs=outputs), Node(op="Add", name="Test0", inputs=[x2, x1], outputs=[int0]), Node(op="Add", name="Test1", inputs=[int0, x0], outputs=[int1]), ] expected_node_order = [nodes[1], nodes[2], nodes[0]] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
def toposort_linear_graph(): inputs = [Variable(name="x")] intermediate0 = Variable(name="intermediate0") intermediate1 = Variable(name="intermediate1") intermediate2 = Variable(name="intermediate2") outputs = [Variable(name="y")] # Nodes are NOT in topo order. nodes = [ Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]), Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]), Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs), Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]), ] expected_node_order = [nodes[0], nodes[3], nodes[1], nodes[2]] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
def build_basic_graph(): inputs = [Variable(name="x")] outputs = [Variable(name="y")] nodes = [ Node(op="Add", name="Test", inputs=inputs, outputs=outputs), ] return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def setup_method(self, field_names): self.tensors = [ Variable(name="test_tensor_{:}".format(i), dtype=np.float32, shape=(1, 3, 224, 224)) for i in range(10) ] self.node = Node(op="Dummy")
def setup_method(self): self.input_tensor = Variable(name="x") self.output_tensor = Variable(name="y") self.node = Node(op="Add", name="Test", inputs=[self.input_tensor], outputs=[self.output_tensor])
def test_i_multiple_inputs(self): x = Variable(name="x") x2 = Variable(name="x2") y = Variable(name="y") node = Node(op="Add", name="Input", inputs=[x, x2], outputs=[y]) assert y.i() == x assert y.i(1) == x2
def tensors_linear_graph(): inputs = [Variable(name="x")] intermediate0 = Variable(name="intermediate0") intermediate1 = Variable(name="intermediate1") intermediate2 = Variable(name="intermediate2") outputs = [Variable(name="y")] tensors = inputs + [intermediate0, intermediate1, intermediate2] + outputs tensors = {tensor.name: tensor for tensor in tensors} # Nodes are NOT in topo order. nodes = [ Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]), Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]), Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]), Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs), ] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), nodes, tensors
def layer(self, inputs=[], outputs=[], *args, **kwargs): """ Creates a node, adds it to this graph, and optionally creates its input and output tensors. The input and output lists can include various different types: - ``Tensor``: Any Tensors provided will be used as-is in the inputs/outputs of the node created. - ``str``: If a string is provided, this function will generate a new tensor using the string to generate a name. It will append an index to the end of the provided string to attempt to avoid duplicate tensor names, but since this doesn't guarantee that the name will be unique, you should try to ensure that the string provided is as unique as possible. - ``numpy.ndarray``: If a NumPy array is provided, this function will generate a Constant tensor using the name prefix: "onnx_graphsurgeon_constant" - ``Union[List[Number], Tuple[Number]]``: If a list or tuple of numbers (int or float) is provided, this function will generate a Constant tensor using the name prefix: "onnx_graphsurgeon_lst_constant" Args: inputs (List[Union[Tensor, str, numpy.ndarray]]): The list of inputs outputs (List[Union[Tensor, str, numpy.ndarray]]): The list of outputs args/kwargs: These are passed directly to the constructor of Node Returns: List[Tensor]: The output tensors of the node """ def process_io(io): new_io = [] for elem in io: if isinstance(elem, Tensor): new_io.append(elem) elif isinstance(elem, str): tensor = Variable(name=self._generate_name(elem)) new_io.append(tensor) elif isinstance(elem, np.ndarray): new_io.append(Constant(name=self._generate_name("onnx_graphsurgeon_constant"), values=elem)) elif isinstance(elem, list) or isinstance(elem, tuple): dtype = np.float32 if any([isinstance(x, float) for x in elem]) else np.int64 arr = np.array(elem, dtype=dtype) new_io.append(Constant(name=self._generate_name("onnx_graphsurgeon_lst_constant"), values=arr)) else: G_LOGGER.critical("Unrecognized type passed to Graph.layer: {:}.\n" "\tHint: Did you forget to unpack a list with `*`?\n" "\tPlease use Tensors, strings, or NumPy arrays.".format(elem)) return new_io inputs = process_io(inputs) outputs = process_io(outputs) if "name" not in kwargs: kwargs["name"] = self._generate_name("onnx_graphsurgeon_node") node = Node(*args, **kwargs, inputs=inputs, outputs=outputs) self.nodes.append(node) return node.outputs
def test_tensors_with_duplicates_check_disabled(self): inputs = [Variable(name="x")] outputs = [Variable(name="x")] # Distinct tensors with the same name nodes = [ Node(op="Add", name="Test", inputs=inputs, outputs=outputs), ] graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs) # This should *not* throw graph.tensors(check_duplicates=False)
def test_tensors_check_duplicates(self): inputs = [Variable(name="x")] outputs = [Variable(name="x")] # Distinct tensors with the same name nodes = [ Node(op="Add", name="Test", inputs=inputs, outputs=outputs), ] graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs) with pytest.raises(OnnxGraphSurgeonException): graph.tensors(check_duplicates=True)
def test_io_cannot_be_sync_list_on_init(self): inp = Variable("input0", shape=(1, 3), dtype=np.float32) out = Variable("input1", shape=(1, 3), dtype=np.float32) node = Node("Add", inputs=[inp], outputs=[out]) assert isinstance(node.inputs, SynchronizedList) assert isinstance(node.outputs, SynchronizedList) graph = Graph(nodes=[node], inputs=node.inputs, outputs=node.outputs) assert not isinstance(graph.inputs, SynchronizedList) assert not isinstance(graph.outputs, SynchronizedList)
def scan_model(): path = os.path.join(TEST_ROOT, "models", "scan.onnx") model = onnx.load(path) # Body graph sum_in = Variable(name="sum_in", dtype=np.float32, shape=(2, )) next = Variable(name="next", dtype=np.float32, shape=(2, )) sum_out = Variable(name="sum_out", dtype=np.float32, shape=(2, )) scan_out = Variable(name="scan_out", dtype=np.float32, shape=(2, )) body_nodes = [ Node(op="Add", inputs=[sum_in, next], outputs=[sum_out]), Node(op="Identity", inputs=[sum_out], outputs=[scan_out]), ] body_graph = Graph(nodes=body_nodes, inputs=[sum_in, next], outputs=[sum_out, scan_out], name="scan_body") # Outer graph inputs = [ Variable(name="initial", dtype=np.float32, shape=(2, )), Variable(name="x", dtype=np.float32, shape=(3, 2)), ] outputs = [ Variable(name="y", dtype=np.float32, shape=(2, )), Variable(name="z", dtype=np.float32, shape=(3, 2)), ] attrs = OrderedDict() attrs["body"] = body_graph attrs["num_scan_inputs"] = 1 scan_node = Node(op="Scan", inputs=inputs, outputs=outputs, attrs=attrs) return Model( path, inputs=inputs, outputs=outputs, nodes=[scan_node], opset=OnnxImporter.get_opset(model), )
def test_export_node(self): name = "TestNode" op = "Test" inputs = [Variable(name="input")] outputs = [Variable(name="output")] attrs = OrderedDict() attrs["float_attr"] = 4.0 attrs["int_attr"] = 10 attrs["str_attr"] = "constant" attrs["tensor_attr"] = Constant( "testTensor", np.ones(shape=(1, 2, 3, 4), dtype=np.float32)) attrs["floats_attr"] = [1.0, 2.0, 3.0, 4.0] attrs["ints_attr"] = [4, 3, 2, 1] attrs["strings_attr"] = ["constant", "and", "variable"] node = Node(op=op, name=name, inputs=inputs, outputs=outputs, attrs=attrs) onnx_node = OnnxExporter.export_node(node) assert onnx_node.name == name assert onnx_node.op_type == op assert onnx_node.input == ["input"] assert onnx_node.output == ["output"] for onnx_attr, (name, attr) in zip(onnx_node.attribute, attrs.items()): assert onnx_attr.name == name if isinstance(attr, float): assert onnx_attr.f == attr elif isinstance(attr, int): assert onnx_attr.i == attr elif isinstance(attr, str): assert onnx_attr.s.decode() == attr elif isinstance(attr, Tensor): assert onnx_attr.t.SerializeToString( ) == OnnxExporter.export_tensor_proto( attr).SerializeToString() elif isinstance(attr, list): if isinstance(attr[0], float): assert onnx_attr.floats == attr elif isinstance(attr[0], int): assert onnx_attr.ints == attr elif isinstance(attr[0], str): assert [s.decode() for s in onnx_attr.strings] == attr else: raise AssertionError( "Unrecognized list attribute: ({:}: {:}) of type: {:}". format(name, attr, type(attr))) else: raise AssertionError( "Unrecognized attribute: ({:}: {:}) of type: {:}".format( name, attr, type(attr)))
def identity_model(): path = os.path.join(TEST_ROOT, "models", "identity.onnx") model = onnx.load(path) x = Variable(name="x", dtype=np.float32, shape=(1, 1, 2, 2)) y = Variable(name="y", dtype=np.float32, shape=(1, 1, 2, 2)) node = Node(op="Identity", inputs=[x], outputs=[y]) return Model(path, inputs=[x], outputs=[y], nodes=[node], opset=OnnxImporter.get_opset(model))
def test_no_foldable_constants(self): inp0 = Variable("input0", shape=(1, 3), dtype=np.float32) inp1 = Variable("input1", shape=(1, 3), dtype=np.float32) out = Variable("output", shape=(1, 3), dtype=np.float32) nodes = [Node("Add", inputs=[inp0, inp1], outputs=[out])] graph = Graph(nodes=nodes, inputs=[inp0, inp1], outputs=[out]) graph.fold_constants().cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].inputs == [inp0, inp1]
def dim_param_model(): path = os.path.join(TEST_ROOT, "models", "dim_param.onnx") model = onnx.load(path) x = Variable(name="Input:0", dtype=np.float32, shape=("dim0", 16, 128)) y = Variable(name="Output:0", dtype=np.float32, shape=("dim0", 16, 128)) node = Node(op="Identity", inputs=[x], outputs=[y]) return Model(path, inputs=[x], outputs=[y], nodes=[node], opset=OnnxImporter.get_opset(model))
def test_o_multiple_outputs(self): intermediate_tensor = Variable(name="intermediate") intermediate_tensor2 = Variable(name="intermediate2") input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor]) output_node = Node(op="Add", name="Out", inputs=[intermediate_tensor], outputs=[self.output_tensor]) output_node2 = Node(op="Add", name="Input2", inputs=[intermediate_tensor], outputs=[intermediate_tensor2]) assert input_node.o() == output_node assert input_node.o(1) == output_node2
def test_toposort_nested(self, toposort_test_case): subgraph, expected_node_order = toposort_test_case() assert subgraph.nodes != expected_node_order # Wrap the graph within a subgraph inp = Variable("input") id_out = Variable("id_out") identity = Node(op="Identity", inputs=[inp], outputs=[id_out]) # Make the subgraph take an input from the outer graph node # If toposort tries to take the node id, it'll fault. subgraph.nodes[0].inputs.append(id_out) out = Variable("output") nested = Node(op="Nested", inputs=[id_out], outputs=[out], attrs={"subgraph": subgraph}) graph = Graph(nodes=[identity, nested], inputs=[inp], outputs=[out]) graph.toposort(recurse_subgraphs=True) assert subgraph.nodes == expected_node_order
def test_cleanup_independent_path(self): graph, _ = toposort_linear_graph() # Build out a path totally unrelated to rest of the graph indep0 = Variable(name="indep0") indep1 = Variable(name="indep1") node = Node(op="IndepTest", inputs=[indep0], outputs=[indep1]) graph.inputs.append(indep0) # Unused inputs should be removed as well graph.nodes.append(node) graph.cleanup() assert indep0 not in graph.inputs assert node not in graph.nodes tensor_map = graph.tensors() assert indep0.name not in tensor_map assert indep1.name not in tensor_map
def test_get_used_node_ids(self, graph): graph_used_nodes = copy.copy(graph.nodes) graph_used_tensors = copy.copy(list(graph.tensors().values())) unused_tensor = Variable(name="Unused") unused_node = Node(op="Unused", inputs=[graph.inputs[0]], outputs=[unused_tensor]) graph.nodes.append(unused_node) with graph.node_ids(): used_node_ids, used_tensors = graph._get_used_node_ids() assert len(used_node_ids) == len(graph.nodes) - 1 assert all([node.id in used_node_ids for node in graph_used_nodes]) assert unused_node.id not in used_node_ids assert unused_tensor not in used_tensors assert all([used_tensor in used_tensors for used_tensor in graph_used_tensors])
def test_independent_path(self, remove_unused_graph_inputs): graph, _ = toposort_linear_graph() # Build out a path totally unrelated to rest of the graph indep0 = Variable(name="indep0") indep1 = Variable(name="indep1") node = Node(op="IndepTest", inputs=[indep0], outputs=[indep1]) graph.nodes.append(node) graph.inputs.append(indep0) graph.cleanup(remove_unused_graph_inputs=remove_unused_graph_inputs) assert indep0 not in graph.inputs or not remove_unused_graph_inputs assert node not in graph.nodes or not remove_unused_graph_inputs tensor_map = graph.tensors() assert indep0.name not in tensor_map or not remove_unused_graph_inputs assert indep1.name not in tensor_map or not remove_unused_graph_inputs