def process_io(io): new_io = [] for elem in io: if isinstance(elem, Tensor): new_io.append(elem) elif isinstance(elem, str): tensor = Variable(name=self._generate_name(elem)) new_io.append(tensor) elif isinstance(elem, np.ndarray): new_io.append( Constant(name=self._generate_name( "onnx_graphsurgeon_constant"), values=elem)) elif isinstance(elem, list) or isinstance(elem, tuple): dtype = np.float32 if any( [isinstance(x, float) for x in elem]) else np.int64 arr = np.array(elem, dtype=dtype) new_io.append( Constant(name=self._generate_name( "onnx_graphsurgeon_lst_constant"), values=arr)) else: G_LOGGER.critical( "Unrecognized type passed to Graph.layer: {:}.\n" "\tHint: Did you forget to unpack a list with `*`?\n" "\tPlease use Tensors, strings, or NumPy arrays.". format(elem)) return new_io
def ext_weights(): path = os.path.join(TEST_ROOT, "models", "ext_weights.onnx") model = onnx.load(path) inputs = [Variable("input", shape=(1, 3), dtype=np.float32)] outputs = [Variable("output", shape=(1, 3), dtype=np.float32)] a = Constant("a", values=np.ones((1, 3), dtype=np.float32)) b = Constant("b", values=np.ones((1, 3), dtype=np.float32)) d = Constant("d", values=np.ones((1, 3), dtype=np.float32)) c = Variable("c") e = Variable("e") nodes = [ Node(op="Add", inputs=[a, b], outputs=[c]), Node(op="Add", inputs=[c, d], outputs=[e]), Node(op="Add", inputs=[inputs[0], e], outputs=outputs), ] return Model( path, inputs=inputs, outputs=outputs, nodes=nodes, opset=OnnxImporter.get_opset(model), )
def test_fold_constants_one_hop(self): # Graph: # c = (a + b) # e = (c + d) # output = input + e # Should fold to: # output = input + e inp = Variable("input", shape=(1, 3), dtype=np.float32) a = Constant("a", values=np.ones(shape=(1, 3), dtype=np.float32)) b = Constant("b", values=np.ones(shape=(1, 3), dtype=np.float32)) c = Variable("c", shape=(1, 3), dtype=np.float32) d = Constant("d", values=np.ones(shape=(1, 3), dtype=np.float32)) e = Variable("e", shape=(1, 3), dtype=np.float32) out = Variable("output", shape=(1, 3), dtype=np.float32) nodes = [ Node("Add", inputs=[a, b], outputs=[c]), Node("Add", inputs=[c, d], outputs=[e]), Node("Add", inputs=[inp, e], outputs=[out]), ] graph = Graph(nodes=nodes, inputs=[inp], outputs=[out]) graph.fold_constants().cleanup() # Extra nodes should be removed assert len(graph.nodes) == 1 assert graph.nodes[0].inputs[0] == inp assert graph.nodes[0].inputs[1] == e # Value should be computed correctly assert np.all(graph.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 3)
def test_copy_with_subgraph_dup_const_tensors(self): inp = Constant("input", values=np.ones(dtype=np.float32, shape=(4, 5))) graph = Graph() # We'll use shape to distinguish inner/outer tensor subgraph_inp = Constant("input", values=np.ones(dtype=np.float32, shape=(1, 2))) subgraph = Graph() subgraph.outputs = [subgraph.identity(subgraph_inp)] graph.outputs = [graph.nested(inp, subgraph)] graph_copy = graph.copy() assert graph_copy.nodes[0].attrs["body"].nodes[0].inputs[0].shape == ( 1, 2)
def setup_method(self): self.tensor = Constant(name="test_tensor", values=np.ones((1, 3, 5, 5), dtype=np.float64)) self.input_node = Node( op="Add", outputs=[self.tensor] ) # Doesn't make sense for Constants, but needed to make base tests happy. self.output_node = Node(op="Add", inputs=[self.tensor])
def import_tensor( onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto]) -> Tensor: try: values = onnx.numpy_helper.to_array(onnx_tensor) return Constant(name=onnx_tensor.name, values=values) except ValueError: return Variable(name=onnx_tensor.name, dtype=get_onnx_tensor_dtype(onnx_tensor), shape=get_onnx_tensor_shape(onnx_tensor))
def test_shape_of_constant_tensor(self): graph = Graph() values = np.ones((1, 3, 3), dtype=np.int64) const = Constant("const", values=values) graph.outputs = [graph.shape(const)] graph.fold_constants().cleanup() assert not graph.nodes assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == (1, 3, 3))
def test_export_constant_tensor_to_tensor_proto(self): name = "constant_tensor" shape = (3, 224, 224) values = np.random.random_sample(size=shape).astype(np.float32) tensor = Constant(name=name, values=values) onnx_tensor = OnnxExporter.export_tensor_proto(tensor) assert onnx_tensor.name == name assert np.all(onnx.numpy_helper.to_array(onnx_tensor) == values) assert onnx_tensor.data_type == onnx.TensorProto.FLOAT assert tuple(onnx_tensor.dims) == shape
def initializer_is_output_model(): path = os.path.join(TEST_ROOT, "models", "initializer_is_output.onnx") model = onnx.load(path) X = Constant(name="X", values=np.ones((64, 64), dtype=np.float32)) return Model(path, inputs=[], outputs=[X], nodes=[], opset=OnnxImporter.get_opset(model))
def test_export_constant_tensor_lazy_values_to_tensor_proto(self): name = "constant_tensor" shape = (3, 3, 3) dtype = np.float32 onnx_tensor = onnx.numpy_helper.from_array( np.ones(shape=shape, dtype=dtype)) tensor = Constant(name=name, values=LazyValues(onnx_tensor)) # Exporter should *not* load LazyValues into a numpy array. onnx_tensor = OnnxExporter.export_tensor_proto(tensor) assert isinstance(tensor._values, LazyValues)
def test_export_node(self): name = "TestNode" op = "Test" inputs = [Variable(name="input")] outputs = [Variable(name="output")] attrs = OrderedDict() attrs["float_attr"] = 4.0 attrs["int_attr"] = 10 attrs["str_attr"] = "constant" attrs["tensor_attr"] = Constant( "testTensor", np.ones(shape=(1, 2, 3, 4), dtype=np.float32)) attrs["floats_attr"] = [1.0, 2.0, 3.0, 4.0] attrs["ints_attr"] = [4, 3, 2, 1] attrs["strings_attr"] = ["constant", "and", "variable"] node = Node(op=op, name=name, inputs=inputs, outputs=outputs, attrs=attrs) onnx_node = OnnxExporter.export_node(node) assert onnx_node.name == name assert onnx_node.op_type == op assert onnx_node.input == ["input"] assert onnx_node.output == ["output"] for onnx_attr, (name, attr) in zip(onnx_node.attribute, attrs.items()): assert onnx_attr.name == name if isinstance(attr, float): assert onnx_attr.f == attr elif isinstance(attr, int): assert onnx_attr.i == attr elif isinstance(attr, str): assert onnx_attr.s.decode() == attr elif isinstance(attr, Tensor): assert onnx_attr.t.SerializeToString( ) == OnnxExporter.export_tensor_proto( attr).SerializeToString() elif isinstance(attr, list): if isinstance(attr[0], float): assert onnx_attr.floats == attr elif isinstance(attr[0], int): assert onnx_attr.ints == attr elif isinstance(attr[0], str): assert [s.decode() for s in onnx_attr.strings] == attr else: raise AssertionError( "Unrecognized list attribute: ({:}: {:}) of type: {:}". format(name, attr, type(attr))) else: raise AssertionError( "Unrecognized attribute: ({:}: {:}) of type: {:}".format( name, attr, type(attr)))
def process_io(io): new_io = [] for elem in io: if isinstance(elem, Tensor): new_io.append(elem) elif isinstance(elem, str): tensor = Variable(name=self._generate_name(elem)) new_io.append(tensor) elif isinstance(elem, np.ndarray): new_io.append(Constant(name=self._generate_name("onnx_graphsurgeon_constant"), values=elem)) else: G_LOGGER.critical("Unrecognized type passed to Graph.layer: {:}.\n\tHint: Did you forget to unpack a list with `*`?\n\tPlease use Tensors, strings, or NumPy arrays.".format(elem)) return new_io
def lstm_model(): path = os.path.join(TEST_ROOT, "models", "lstm.onnx") model = onnx.load(path) onnx_graph = model.graph def load_initializer(index: int) -> np.ndarray: return onnx.numpy_helper.to_array(onnx_graph.initializer[index]) # Optional inputs are represented by empty tensors X = Variable(name="X", dtype=np.float32, shape=(4, 3, 6)) W = Constant(name="W", values=load_initializer(0)) R = Constant(name="R", values=load_initializer(1)) B = Constant(name="B", values=load_initializer(2)) initial_c = Constant(name="initial_c", values=load_initializer(3)) Y = Variable(name="Y", dtype=np.float32, shape=(4, 1, 3, 5)) Y_h = Variable(name="Y_h", dtype=np.float32, shape=(1, 3, 5)) Y_c = Variable(name="Y_c", dtype=np.float32, shape=(1, 3, 5)) attrs = OrderedDict() attrs["direction"] = "forward" attrs["hidden_size"] = 5 node = Node( op="LSTM", attrs=attrs, inputs=[X, W, R, B, Variable.empty(), Variable.empty(), initial_c], outputs=[Y, Y_h, Y_c], ) # Initializers will not be included in the graph inputs. return Model( path, inputs=[X], outputs=[Y, Y_h, Y_c], nodes=[node], opset=OnnxImporter.get_opset(model), )
def import_tensor( onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto]) -> Tensor: if isinstance(onnx_tensor, onnx.TensorProto): data_location = int( onnx_tensor.data_location) if onnx_tensor.HasField( "data_location") else None return Constant(name=onnx_tensor.name, values=LazyValues(onnx_tensor), data_location=data_location) else: return Variable(name=onnx_tensor.name, dtype=get_onnx_tensor_dtype(onnx_tensor), shape=get_onnx_tensor_shape(onnx_tensor))
def test_export_constant_tensor_to_value_info_proto(self): name = "constant_tensor" shape = (3, 224, 224) values = np.random.random_sample(size=shape).astype(np.float32) tensor = Constant(name=name, values=values) onnx_tensor = OnnxExporter.export_value_info_proto(tensor, do_type_check=True) assert onnx_tensor.name == name assert onnx_tensor.type.tensor_type.elem_type == onnx.TensorProto.FLOAT onnx_shape = [] for dim in onnx_tensor.type.tensor_type.shape.dim: onnx_shape.append(dim.dim_value) assert tuple(onnx_shape) == shape
def test_tensors_includes_non_node_tensors(self): X = Constant("X", values=np.ones(shape=(64, 64), dtype=np.float32)) graph = Graph(inputs=[], outputs=[X]) tensor_map = graph.tensors() assert "X" in tensor_map assert tensor_map["X"] == X
def import_tensor(onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto]) -> Tensor: if isinstance(onnx_tensor, onnx.TensorProto): return Constant(name=onnx_tensor.name, values=LazyValues(onnx_tensor)) else: return Variable(name=onnx_tensor.name, dtype=get_onnx_tensor_dtype(onnx_tensor), shape=get_onnx_tensor_shape(onnx_tensor))
def constant(self, values): return self.layer(op="Constant", inputs=[], outputs=["constant_out"], attrs={"value": Constant("values", values)})[0]