def test_layer_with_attrs(self): graph = Graph() outputs = graph.layer(op="Add", name="node", attrs={"fake_attr": 0}) assert len(graph.nodes) == 1 assert graph.nodes[-1].op == "Add" assert graph.nodes[-1].name == "node" assert graph.nodes[-1].attrs["fake_attr"] == 0
def test_fold_constants_one_hop(self): # Graph: # c = (a + b) # e = (c + d) # output = input + e # Should fold to: # output = input + e inp = Variable("input", shape=(1, 3), dtype=np.float32) a = Constant("a", values=np.ones(shape=(1, 3), dtype=np.float32)) b = Constant("b", values=np.ones(shape=(1, 3), dtype=np.float32)) c = Variable("c", shape=(1, 3), dtype=np.float32) d = Constant("d", values=np.ones(shape=(1, 3), dtype=np.float32)) e = Variable("e", shape=(1, 3), dtype=np.float32) out = Variable("output", shape=(1, 3), dtype=np.float32) nodes = [ Node("Add", inputs=[a, b], outputs=[c]), Node("Add", inputs=[c, d], outputs=[e]), Node("Add", inputs=[inp, e], outputs=[out]), ] graph = Graph(nodes=nodes, inputs=[inp], outputs=[out]) graph.fold_constants().cleanup() # Extra nodes should be removed assert len(graph.nodes) == 1 assert graph.nodes[0].inputs[0] == inp assert graph.nodes[0].inputs[1] == e # Value should be computed correctly assert np.all(graph.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 3)
def make_nested_graph(): inp = Variable("input") id_out = Variable("id_out") identity = Node(op="Identity", inputs=[inp], outputs=[id_out]) # Subgraph outputs come from the parent node, but nodes in the subgraph # can use nodes from the outer graphs too. subgraph_inputs = [Variable("subgraph_inp")] subgraph_id_out = Variable("subgraph_id_out") subgraph_outputs = [Variable("subgraph_out")] subgraph_identity0 = Node(op="Identity", inputs=[id_out], outputs=[subgraph_id_out]) subgraph_identity1 = Node(op="Identity", inputs=[subgraph_id_out], outputs=subgraph_outputs) subgraph = Graph(nodes=[subgraph_identity0, subgraph_identity1], inputs=subgraph_inputs, outputs=subgraph_outputs) nested_out = Variable("nested_out") nested_node = Node(op="Nested", attrs={"body": subgraph}, inputs=[inp], outputs=[nested_out]) return Graph(nodes=[identity, nested_node], inputs=[inp], outputs=[nested_out])
def test_shape_gather(self, shape, indices): indices = np.array(indices) inp = Variable("input", dtype=np.float32, shape=shape) graph = Graph(inputs=[inp]) inp_shape = graph.shape(inp) shape_part = graph.gather(inp_shape, indices=indices) graph.outputs = [ graph.add(shape_part, shape_part), graph.gather(inp_shape, indices=[0]), graph.gather(inp_shape, indices=np.array(0)), ] graph.fold_constants() if shape is not None: assert isinstance(graph.outputs[0], Constant) expected_shape = np.array(shape)[indices].astype(np.int64) * 2 assert np.all(graph.outputs[0].values == expected_shape) else: assert isinstance(graph.outputs[0], Variable) assert isinstance(graph.outputs[1], Variable) assert isinstance(graph.outputs[2], Variable)
def test_generate_name(self): graph = Graph() names = set() num_names = 100 # This function should not return the same name more than once for idx in range(num_names): names.add(graph._generate_name("name")) assert len(names) == 100
def test_register(self): @Graph.register() def add(self, a, b): return self.layer(op="Add", inputs=[a, b], outputs=["add_out"]) graph = Graph() [output] = graph.add("a", "b") assert "add_out" in output.name assert len(graph.nodes) == 1 assert graph.nodes[-1].op == "Add"
def test_tensors_check_duplicates(self): inputs = [Variable(name="x")] outputs = [Variable(name="x")] # Distinct tensors with the same name nodes = [ Node(op="Add", name="Test", inputs=inputs, outputs=outputs), ] graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs) with pytest.raises(OnnxGraphSurgeonException): graph.tensors(check_duplicates=True)
def test_tensors_with_duplicates_check_disabled(self): inputs = [Variable(name="x")] outputs = [Variable(name="x")] # Distinct tensors with the same name nodes = [ Node(op="Add", name="Test", inputs=inputs, outputs=outputs), ] graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs) # This should *not* throw graph.tensors(check_duplicates=False)
def test_layer_with_tensors(self): x0 = Variable("x0") x1 = Variable("x1") y0 = Variable("y0") y1 = Variable("y1") graph = Graph() outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1]) assert outputs == [y0, y1] assert len(graph.nodes) == 1 assert graph.nodes[-1].inputs == [x0, x1] assert graph.nodes[-1].outputs == outputs
def test_layer_with_strings(self): x0 = "x0" x1 = "x1" y0 = "y0" y1 = "y1" graph = Graph() outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1]) assert len(graph.nodes) == 1 assert [prefix in tensor.name for prefix, tensor in zip([x0, x1], graph.nodes[-1].inputs)] assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)] assert graph.nodes[-1].outputs == outputs
def test_no_foldable_constants(self): inp0 = Variable("input0", shape=(1, 3), dtype=np.float32) inp1 = Variable("input1", shape=(1, 3), dtype=np.float32) out = Variable("output", shape=(1, 3), dtype=np.float32) nodes = [Node("Add", inputs=[inp0, inp1], outputs=[out])] graph = Graph(nodes=nodes, inputs=[inp0, inp1], outputs=[out]) graph.fold_constants().cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].inputs == [inp0, inp1]
def test_layer_with_arrays(self): x0 = np.array([1]) x1 = np.array([1]) y0 = "y0" y1 = "y1" graph = Graph() outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1]) assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)] assert len(graph.nodes) == 1 assert graph.nodes[-1].inputs[0].values == x0 assert graph.nodes[-1].inputs[1].values == x1 assert graph.nodes[-1].outputs == outputs
def test_io_cannot_be_sync_list_on_assign(self): inp = Variable("input0", shape=(1, 3), dtype=np.float32) out = Variable("input1", shape=(1, 3), dtype=np.float32) node = Node("Add", inputs=[inp], outputs=[out]) assert isinstance(node.inputs, SynchronizedList) assert isinstance(node.outputs, SynchronizedList) graph = Graph(nodes=[node], inputs=[], outputs=[]) graph.inputs = node.inputs graph.outputs = node.outputs assert not isinstance(graph.inputs, SynchronizedList) assert not isinstance(graph.outputs, SynchronizedList)
def export_graph(graph: Graph, do_type_check=True) -> onnx.GraphProto: """ Export an onnx-graphsurgeon Graph to an ONNX GraphProto. Args: graph (Graph): The graph to export. do_type_check (bool): Whether to check that input and output tensors have data types defined, and fail if not. """ nodes = [OnnxExporter.export_node(node, do_type_check) for node in graph.nodes] inputs = [OnnxExporter.export_value_info_proto(inp, do_type_check) for inp in graph.inputs] outputs = [OnnxExporter.export_value_info_proto(out, do_type_check) for out in graph.outputs] tensor_map = graph.tensors() initializer = [OnnxExporter.export_tensor_proto(tensor) for tensor in tensor_map.values() if isinstance(tensor, Constant)] # Remove inputs and outputs to export ValueInfoProtos for tensor in graph.inputs + graph.outputs: if tensor.name in tensor_map: del tensor_map[tensor.name] # Omit tensors from value_info if we don't know their shape/dtype def has_value_info(tensor): return isinstance(tensor, Variable) and (tensor.dtype is not None or tensor.shape is not None) value_info = [OnnxExporter.export_value_info_proto(tensor, do_type_check) for tensor in tensor_map.values() if has_value_info(tensor)] return onnx.helper.make_graph(nodes=nodes, name=graph.name, inputs=inputs, outputs=outputs, initializer=initializer, doc_string=graph.doc_string, value_info=value_info)
def build_basic_graph(): inputs = [Variable(name="x")] outputs = [Variable(name="y")] nodes = [ Node(op="Add", name="Test", inputs=inputs, outputs=outputs), ] return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def nested_dup_names(): path = os.path.join(TEST_ROOT, "models", "nested_dup_names.onnx") model = onnx.load(path) # Inner subgraph_inputs = [Variable("X", shape=(2, 2), dtype=np.float32)] subgraph_outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)] subgraph_node = Node(op="Identity", inputs=subgraph_inputs, outputs=subgraph_outputs) subgraph = Graph(nodes=[subgraph_node], inputs=subgraph_inputs, outputs=subgraph_outputs) # Outer - problem happens if outer node has same I/O names as subgraph inputs = [Variable("X", shape=(2, 2), dtype=np.float32)] outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)] node = Node(op="Nested", inputs=inputs, outputs=outputs, attrs={"body": subgraph}) return Model( path, inputs=inputs, outputs=outputs, nodes=[node], opset=OnnxImporter.get_opset(model), )
def build_two_layer_graph_multiple_io(): inputs = [Variable(name="x0"), Variable(name="x1")] intermediate_tensor = Variable(name="intermediate") outputs = [Variable(name="y0"), Variable(name="y1")] nodes = [ Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]), Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs), ] return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def test_copy_with_subgraph_dup_const_tensors(self): inp = Constant("input", values=np.ones(dtype=np.float32, shape=(4, 5))) graph = Graph() # We'll use shape to distinguish inner/outer tensor subgraph_inp = Constant("input", values=np.ones(dtype=np.float32, shape=(1, 2))) subgraph = Graph() subgraph.outputs = [subgraph.identity(subgraph_inp)] graph.outputs = [graph.nested(inp, subgraph)] graph_copy = graph.copy() assert graph_copy.nodes[0].attrs["body"].nodes[0].inputs[0].shape == ( 1, 2)
def test_basic(self): graph = Graph( nodes=[Node(op="Test")], inputs=[Variable("test")], outputs=[Variable("test")], name="test-name", doc_string="test-docstring", import_domains=["fake-import-domain"], opset=-1, ) new_graph = graph.copy() assert new_graph == graph assert new_graph.nodes == graph.nodes assert new_graph.inputs == graph.inputs assert new_graph.outputs == graph.outputs assert new_graph.name == graph.name assert new_graph.doc_string == graph.doc_string assert new_graph.import_domains == graph.import_domains assert new_graph.opset == graph.opset
def toposort_multi_tier_output_graph(): inputs = [Variable(name="x")] outputs = [Variable(name="out0"), Variable(name="out1"), Variable(name="out2")] out0, out1, out2 = outputs nodes = [ Node(op="Add", name="Test2", inputs=[out1], outputs=[out2]), Node(op="Add", name="Test0", inputs=inputs, outputs=[out0]), Node(op="Add", name="Test1", inputs=[out0], outputs=[out1]), ] expected_node_order = [nodes[1], nodes[2], nodes[0]] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
def toposort_multi_tier_input_graph(): inputs = [Variable(name="x0"), Variable(name="x1"), Variable(name="x2"), Variable(name="x3")] int0, int1 = [Variable(name="intermediate0"), Variable(name="intermediate1")] outputs = [Variable(name="out")] x0, x1, x2, x3 = inputs nodes = [ Node(op="Add", name="Test2", inputs=[int1, x3], outputs=outputs), Node(op="Add", name="Test0", inputs=[x2, x1], outputs=[int0]), Node(op="Add", name="Test1", inputs=[int0, x0], outputs=[int1]), ] expected_node_order = [nodes[1], nodes[2], nodes[0]] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
def test_shape_of_constant_node(self): graph = Graph() values = np.ones((1, 3, 3), dtype=np.int64) const = graph.constant(values=values) graph.outputs = [graph.shape(const)] graph.fold_constants().cleanup() assert not graph.nodes assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == (1, 3, 3))
def test_shape_of_variable_tensor_static_shape(self): var = Variable("var", dtype=np.float32, shape=(1, 3, 4)) graph = Graph(inputs=[var]) graph.inputs = [var] graph.outputs = [graph.shape(var)] graph.fold_constants().cleanup() assert not graph.nodes assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == (1, 3, 4))
def test_shape_of_variable_tensor_static_shape_no_fold(self): graph = Graph() var = Variable("var", dtype=np.float32, shape=(1, 3, 4)) graph.inputs = [var] graph.outputs = [graph.shape(var)] graph.fold_constants(fold_shapes=False).cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].op == "Shape" assert isinstance(graph.outputs[0], Variable)
def test_shape_of_variable_tensor_multiple_shapes(self): graph = Graph() var = Variable("var", dtype=np.float32, shape=(1, 3, 4)) var2 = Variable("var2", dtype=np.float32, shape=tuple()) # Scalar graph.inputs = [var, var2] graph.outputs = [ graph.shape(var), graph.identity(var), graph.shape(var2) ] graph.fold_constants().cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].op == "Identity" assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == (1, 3, 4)) assert isinstance(graph.outputs[2], Constant) assert np.all(graph.outputs[2].values == tuple())
def test_toposort_nested(self, toposort_test_case): subgraph, expected_node_order = toposort_test_case() assert subgraph.nodes != expected_node_order # Wrap the graph within a subgraph inp = Variable("input") id_out = Variable("id_out") identity = Node(op="Identity", inputs=[inp], outputs=[id_out]) # Make the subgraph take an input from the outer graph node # If toposort tries to take the node id, it'll fault. subgraph.nodes[0].inputs.append(id_out) out = Variable("output") nested = Node(op="Nested", inputs=[id_out], outputs=[out], attrs={"subgraph": subgraph}) graph = Graph(nodes=[identity, nested], inputs=[inp], outputs=[out]) graph.toposort(recurse_subgraphs=True) assert subgraph.nodes == expected_node_order
def toposort_linear_graph(): inputs = [Variable(name="x")] intermediate0 = Variable(name="intermediate0") intermediate1 = Variable(name="intermediate1") intermediate2 = Variable(name="intermediate2") outputs = [Variable(name="y")] # Nodes are NOT in topo order. nodes = [ Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]), Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]), Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs), Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]), ] expected_node_order = [nodes[0], nodes[3], nodes[1], nodes[2]] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
def test_shape_of_variable_tensor_dynamic_shape(self): var = Variable("var", dtype=np.float32, shape=("", -1, 0, 4)) graph = Graph(inputs=[var]) graph.outputs = [graph.shape(var)] graph.fold_constants().cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].op == "Shape" assert isinstance(graph.outputs[0], Variable)
def tensors_linear_graph(): inputs = [Variable(name="x")] intermediate0 = Variable(name="intermediate0") intermediate1 = Variable(name="intermediate1") intermediate2 = Variable(name="intermediate2") outputs = [Variable(name="y")] tensors = inputs + [intermediate0, intermediate1, intermediate2] + outputs tensors = {tensor.name: tensor for tensor in tensors} # Nodes are NOT in topo order. nodes = [ Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]), Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]), Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]), Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs), ] return Graph(nodes=nodes, inputs=inputs, outputs=outputs), nodes, tensors
def test_const_node(self): graph = Graph() values = np.ones((1, 3, 3), dtype=np.int64) graph.outputs = [graph.constant(values=values)] assert isinstance(graph.outputs[0], Variable) graph.fold_constants().cleanup() assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == values) assert not graph.nodes