def _create_model(vocab, merges, padding_length, domain='ai.onnx.contrib', opset=None): nodes = [] mkv = helper.make_tensor_value_info nodes.append( helper.make_node('GPT2Tokenizer', inputs=['inputs'], outputs=['input_ids', 'attention_mask'], vocab=vocab, merges=merges, padding_length=padding_length, name='GPT2TokenizerName', domain='ai.onnx.contrib')) inputs = [mkv('inputs', TensorProto.STRING, [None])] graph = helper.make_graph(nodes, 'GPT2TokenizerTransformer', inputs, [ mkv('input_ids', TensorProto.INT64, [None, None]), mkv('attention_mask', TensorProto.INT64, [None, None]) ]) if opset is None: opset = min(__max_supported_opset__, onnx_opset_version()) model = helper.make_model( graph, opset_imports=[helper.make_operatorsetid('', opset)]) model.opset_import.extend([helper.make_operatorsetid(domain, 1)]) return model
def test_graph_distance_bigger(self): from mlstatpy.graph.graphviz_helper import draw_graph_graphviz X = helper.make_tensor_value_info('X', TensorProto.FLOAT, None) # pylint: disable=E1101 Z = helper.make_tensor_value_info('Z', TensorProto.FLOAT, None) # pylint: disable=E1101 node_def = helper.make_node('Neg', ['X'], ['Z'], name='A') graph_def = helper.make_graph([node_def], 'test-model', [X], [Z]) model_def = helper.make_model( graph_def, producer_name='mlprodict', ir_version=7, producer_version='0.1', opset_imports=[helper.make_operatorsetid('', 13)]) node_def1 = helper.make_node('Neg', ['X'], ['Y'], name='A') node_def2 = helper.make_node('Neg', ['Y'], ['Z'], name='B') graph_def = helper.make_graph([node_def1, node_def2], 'test-model', [X], [Z]) model_def2 = helper.make_model( graph_def, producer_name='mlprodict', ir_version=7, producer_version='0.1', opset_imports=[helper.make_operatorsetid('', 13)]) d, graph = onnx_graph_distance(model_def, model_def2) self.assertLess(d, 1) vertices, edges = graph.draw_vertices_edges() gv = draw_graph_graphviz(vertices, edges) self.assertIn("->", gv)
def _create_model(model_b64, domain='ai.onnx.contrib', opset=None): nodes = [] mkv = helper.make_tensor_value_info nodes.append( helper.make_node('SentencepieceTokenizer', inputs=[ 'inputs', 'nbest_size', 'alpha', 'add_bos', 'add_eos', 'reverse' ], outputs=['out0', 'out1'], model=model_b64, name='SentencepieceTokenizeOpName', domain='ai.onnx.contrib')) inputs = [ mkv('inputs', TensorProto.STRING, [None]), mkv('nbest_size', TensorProto.INT64, [None]), mkv('alpha', TensorProto.FLOAT, [None]), mkv('add_bos', TensorProto.BOOL, [None]), mkv('add_eos', TensorProto.BOOL, [None]), mkv('reverse', TensorProto.BOOL, [None]) ] graph = helper.make_graph(nodes, 'SentencePieceTokenizerTransformer', inputs, [ mkv('out0', TensorProto.INT32, [None]), mkv('out1', TensorProto.INT64, [None]) ]) if opset is None: opset = min(__max_supported_opset__, onnx_opset_version()) model = helper.make_model( graph, opset_imports=[helper.make_operatorsetid('', opset)]) model.opset_import.extend([helper.make_operatorsetid(domain, 1)]) return model
def create_model(model_name): graph_def = helper.make_graph( nodes=[ helper.make_node( op_type="TopK", inputs=["X", "K"], outputs=["Values", "Indices"], name="topk", # attributes are also key-value pairs using the attribute name and appropriate type largest=1, ), ], name="test-model", inputs=[ # create inputs with symbolic dims so we can use any input sizes helper.make_tensor_value_info("X", TensorProto.FLOAT, ["batch", "items"]), helper.make_tensor_value_info("K", TensorProto.INT64, [1]), ], outputs=[ helper.make_tensor_value_info("Values", TensorProto.FLOAT, ["batch", "k"]), helper.make_tensor_value_info("Indices", TensorProto.INT64, ["batch", "k"]), ], initializer=[], ) model = helper.make_model(graph_def, opset_imports=[helper.make_operatorsetid("", 11)]) onnx.checker.check_model(model) onnx.save_model(model, model_name)
def onnx_linear_regression(coefs, intercept): if len(coefs.shape) == 1: coefs = coefs.reshape((1, -1)) coefs = coefs.T # input and output X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, coefs.shape[0]]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, coefs.shape[1]]) # inference node_matmul = helper.make_node('MatMul', ['X', 'coefs'], ['y1'], name='N1') node_add = helper.make_node('Add', ['y1', 'intercept'], ['Y'], name='N2') # initializer init_coefs = numpy_helper.from_array(coefs, name="coefs") init_intercept = numpy_helper.from_array(intercept, name="intercept") # graph graph_def = helper.make_graph([node_matmul, node_add], 'lr', [X], [Y], [init_coefs, init_intercept]) model_def = helper.make_model( graph_def, producer_name='orttrainer', ir_version=7, producer_version=ort_version, opset_imports=[helper.make_operatorsetid('', 14)]) return model_def
def test_constant_9_8(self): # type: () -> None from_opset = 9 to_opset = 8 data_type = TensorProto.UINT64 output_shape = [2, 3, 4] output_value = np.arange(24) nodes = [ helper.make_node("Constant", inputs=[], outputs=["Y"], value=helper.make_tensor("", data_type, output_shape, output_value)) ] graph = helper.make_graph( nodes, "test_constant", [], [onnx.helper.make_tensor_value_info("Y", data_type, output_shape)]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[0].op_type == "Constant" assert converted_model.graph.output[ 0].type.tensor_type.elem_type == data_type assert converted_model.opset_import[0].version == to_opset
def test_less_9_8(self): # type: () -> None from_opset = 9 to_opset = 8 data_type = TensorProto.UINT64 nodes = [ onnx.helper.make_node("Less", inputs=["X1", "X2"], outputs=["Y"]) ] input_shape = [2, 3, 4] graph = helper.make_graph(nodes, "test_less", [ onnx.helper.make_tensor_value_info("X1", data_type, input_shape), onnx.helper.make_tensor_value_info("X2", data_type, input_shape) ], [ onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape) ]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[2].op_type == "Less" assert converted_model.graph.output[ 0].type.tensor_type.elem_type == TensorProto.BOOL assert converted_model.opset_import[0].version == to_opset
def _make_einsum_model(equation, opset=__max_supported_opset__): from skl2onnx.common._topology import OPSET_TO_IR_VERSION # pylint: disable=E0611,E0001 inputs = equation.split('->')[0].split(',') model = helper.make_model( opset_imports=[helper.make_operatorsetid('', opset)], ir_version=OPSET_TO_IR_VERSION.get(opset, 7), producer_name='mlprodict', producer_version='0.1', graph=helper.make_graph( name='einsum_test', inputs=[ helper.make_tensor_value_info("X%d" % i, TensorProto.FLOAT, None) # pylint: disable=E1101 for i in range(len(inputs)) ], outputs=[ helper.make_tensor_value_info("Y", TensorProto.FLOAT, None) ], # pylint: disable=E1101 nodes=[ helper.make_node("Einsum", ["X%d" % i for i in range(len(inputs))], ["Y"], equation=equation) ])) return model
def test_upsample_8_9(self): # type: () -> None from_opset = 8 to_opset = 9 data_type = TensorProto.FLOAT nodes = [ onnx.helper.make_node( "Upsample", inputs=["X"], outputs=["Y"], mode="nearest", scales=[1.0, 1.0, 2.0, 3.0], ) ] graph = helper.make_graph( nodes, "test_upsample_8_9", [onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])], [onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert len(converted_model.graph.node) == 1 assert converted_model.graph.node[0].op_type == "Upsample" assert len(converted_model.graph.node[0].attribute) == 1 assert converted_model.graph.node[0].attribute[0].name == "mode" assert converted_model.opset_import[0].version == to_opset
def test_cast_8_9(self): # type: () -> None from_opset = 8 to_opset = 9 data_type_from = TensorProto.FLOAT data_type_to = TensorProto.UINT32 nodes = [ onnx.helper.make_node("Cast", inputs=["X"], outputs=["Y"], to=TensorProto.UINT32) ] graph = helper.make_graph( nodes, "test_cast", [onnx.helper.make_tensor_value_info("X", data_type_from, [2, 3])], [onnx.helper.make_tensor_value_info("Y", data_type_to, [2, 3])]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[0].op_type == "Cast" assert converted_model.graph.output[ 0].type.tensor_type.elem_type == data_type_to assert converted_model.opset_import[0].version == to_opset
def _create_test_model(**kwargs): vocab_file = kwargs["vocab_file"] merges_file = kwargs["merges_file"] max_length = kwargs["max_length"] node = [ helper.make_node('GPT2Tokenizer', ['string_input'], ['input_ids', 'attention_mask'], vocab=_get_file_content(vocab_file), merges=_get_file_content(merges_file), name='bpetok', padding_length=max_length, domain='ai.onnx.contrib') ] input1 = helper.make_tensor_value_info('string_input', onnx_proto.TensorProto.STRING, [None]) output1 = helper.make_tensor_value_info('input_ids', onnx_proto.TensorProto.INT64, [None, None]) output2 = helper.make_tensor_value_info('attention_mask', onnx_proto.TensorProto.INT64, [None, None]) graph = helper.make_graph(node, 'test0', [input1], [output1, output2]) model = helper.make_model( graph, opset_imports=[helper.make_operatorsetid('', 12)]) return model
def test(): # type: () -> None nodes = [helper.make_node('Cos', ["X"], ["Y"])] graph = helper.make_graph( nodes, "test", [helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, ))], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, ))]) self._converted(graph, helper.make_operatorsetid("", 8), 6)
def helper_upsample_with_initializer(self, raw_scale=False ): # type: (bool) -> None from_opset = 9 to_opset = 8 data_type = TensorProto.FLOAT nodes = [ onnx.helper.make_node("Upsample", inputs=["X", "Scales"], outputs=["Y"], mode="nearest") ] scale_value = [1.0, 1.0, 2.0, 3.0] scale_tensor = onnx.helper.make_tensor( "Scales", onnx.TensorProto.FLOAT, [4], bytes(struct.pack("4f", *scale_value)) if raw_scale else scale_value, raw_scale) graph = helper.make_graph(nodes, "test_upsample", [ onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2]), onnx.helper.make_tensor_value_info("Scales", data_type, [4]) ], [onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])], [scale_tensor]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[0].op_type == "Upsample" assert len(converted_model.graph.initializer) == 0 assert len(converted_model.graph.node[0].attribute) == 2 assert converted_model.graph.node[0].attribute[1].name == "scales" assert converted_model.opset_import[0].version == to_opset
def test_onnx_inference_so(self): X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 2]) # pylint: disable=E1101 Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 2]) # pylint: disable=E1101 Z = helper.make_tensor_value_info('Z', TensorProto.FLOAT, [None, 2]) # pylint: disable=E1101 node_def = helper.make_node('Add', ['X', 'Y'], ['Zt'], name='Zt') node_def2 = helper.make_node('Add', ['X', 'Zt'], ['Z'], name='Z') graph_def = helper.make_graph([node_def, node_def2], 'test-model', [X, Y], [Z]) model_def = helper.make_model( graph_def, producer_name='mlprodict', ir_version=6, producer_version='0.1', opset_imports=[helper.make_operatorsetid('', TARGET_OPSET)]) for rt in ['onnxruntime1', 'onnxruntime2']: with self.subTest(runtime=rt): so = SessionOptions() oinf = OnnxInference(model_def, runtime_options={'session_options': so}, runtime=rt) X = numpy.random.randn(4, 2).astype( # pylint: disable=E1101 numpy.float32) # pylint: disable=E1101 Y = numpy.random.randn(4, 2).astype( # pylint: disable=E1101 numpy.float32) # pylint: disable=E1101 exp = (X * 2 + Y).astype(numpy.float32) res = oinf.run({'X': X, 'Y': Y}) got = res['Z'] self.assertEqualArray(exp, got, decimal=6)
def test_onnx_micro_runtime_exc2(self): "test OnnxMicroRuntime" opset = self.config.opset x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2)) model_def = helper.make_model( opset_imports=[helper.make_operatorsetid('', opset)], ir_version=constants.OPSET_TO_IR_VERSION[opset], producer_name='tf2onnx', producer_version='0.0.1', graph=helper.make_graph( name='einsum', inputs=[ helper.make_tensor_value_info('X', TensorProto.FLOAT, None) ], outputs=[ helper.make_tensor_value_info("Y", TensorProto.FLOAT, None) ], initializer=[ numpy_helper.from_array(np.array([1], dtype=np.float32), name="C1"), numpy_helper.from_array(np.array([2], dtype=np.float32), name="C2"), ], nodes=[ helper.make_node('Add', ["X", "C1"], ["temp"]), helper.make_node('Pow', ["temp", "C2"], ["Y"]), ])) rt = OnnxMicroRuntime(model_def) with self.assertRaises(NotImplementedError): rt.run({'X': x}) with self.assertRaises(TypeError): rt.run(x)
def test_batchnormalization_9_8(self): # type: () -> None from_opset = 9 to_opset = 8 data_type = TensorProto.FLOAT nodes = [ onnx.helper.make_node( 'BatchNormalization', inputs=['X', 'scale', 'B', 'mean', 'var'], outputs=['Y'], ) ] input_shape = (2, 3, 4, 5) x = onnx.helper.make_tensor_value_info("X", data_type, input_shape) scale = onnx.helper.make_tensor_value_info("scale", data_type, [input_shape[1]]) B = onnx.helper.make_tensor_value_info("B", data_type, [input_shape[1]]) mean = onnx.helper.make_tensor_value_info("mean", data_type, [input_shape[1]]) var = onnx.helper.make_tensor_value_info("var", data_type, [input_shape[1]]) y = onnx.helper.make_tensor_value_info("Y", data_type, input_shape) graph = onnx.helper.make_graph(nodes, "test_batchnormalization", [x, scale, B, mean, var], [y]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[0].op_type == "BatchNormalization" assert converted_model.opset_import[0].version == to_opset
def test_onnx_micro_runtime_shape(self): "test OnnxMicroRuntime" opset = self.config.opset x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2)) model_def = helper.make_model( opset_imports=[helper.make_operatorsetid('', opset)], ir_version=constants.OPSET_TO_IR_VERSION[opset], producer_name='tf2onnx', producer_version='0.0.1', graph=helper.make_graph( name='einsum', inputs=[ helper.make_tensor_value_info('X', TensorProto.FLOAT, None) ], outputs=[ helper.make_tensor_value_info("Y", TensorProto.INT64, None) ], nodes=[ helper.make_node('Shape', ["X"], ["Y"]), ])) rt = OnnxMicroRuntime(model_def) out = rt.run({'X': x}) assert_almost_equal(np.array(x.shape, dtype=np.int64), out['Y'])
def test_batch_normalization_8_9(self): # type: () -> None from_opset = 8 to_opset = 9 data_type = TensorProto.FLOAT nodes = [ helper.make_node('BatchNormalization', inputs=["x", "s", "bias", "mean", "var"], outputs=["y"]) ] input_shape = (1, 2, 1, 3) x = helper.make_tensor_value_info("x", data_type, input_shape) scale = helper.make_tensor_value_info("s", data_type, [input_shape[1]]) B = helper.make_tensor_value_info("bias", data_type, [input_shape[1]]) mean = helper.make_tensor_value_info("mean", data_type, [input_shape[1]]) var = helper.make_tensor_value_info("var", data_type, [input_shape[1]]) y = helper.make_tensor_value_info("y", data_type, input_shape) graph = helper.make_graph(nodes, "test_batchnormalization_8_9", [x, scale, B, mean, var], [y]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[0].op_type == "BatchNormalization" assert converted_model.opset_import[0].version == to_opset
def expect(node, inputs, outputs, name): ginputs = [ make_sequence_value_info(node.input[0], TensorProto.FLOAT, []), # pylint: disable=E1101, make_sequence_value_info(node.input[1], TensorProto.FLOAT, []), # pylint: disable=E1101, ] if len(node.input) > 2: ginputs.append( make_tensor_value_info(node.input[2], TensorProto.INT64, []), # pylint: disable=E1101 ) goutputs = [ make_sequence_value_info(node.output[0], TensorProto.FLOAT, []), # pylint: disable=E1101, ] model_def = make_model( opset_imports=[make_operatorsetid('', TARGET_OPSET)], graph=make_graph(name=name, inputs=ginputs, outputs=goutputs, nodes=[node])) oinf = OnnxInference(model_def) got = oinf.run({n: v for n, v in zip(node.input, inputs)}) self.assertEqual(len(got), 1) oseq = got['output_sequence'] self.assertEqual(len(oseq), len(outputs)) for e, g in zip(outputs, oseq): self.assertEqualArray(e, g) del model_def.opset_import[:] # pylint: disable=E1101 op_set = model_def.opset_import.add() # pylint: disable=E1101 op_set.domain = '' op_set.version = 15 model_def.ir_version = 8
def test_add_5_8(self): # type: () -> None nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])] graph = helper.make_graph(nodes, "test", [ helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5, )), helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1, )) ], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, ))]) converted_model = self._converted(graph, helper.make_operatorsetid("", 5), 8) # Assert equality of graph and converted_model assert converted_model.graph.node[0].op_type == "Add" assert converted_model.opset_import[0].version == 8
def test_reshape_4_6(self): # type: () -> None nodes = [helper.make_node('Reshape', ["X"], ["Y"], shape=[5])] graph = helper.make_graph( nodes, "test", [helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, ))], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, ))]) converted_model = self._converted(graph, helper.make_operatorsetid("", 4), 6) # Assert equality of graph and converted_model assert converted_model.graph.node[0].op_type == "Reshape" assert converted_model.opset_import[0].version == 6
def _ensure_opset_domain(model): op_domain_name = default_opset_domain() domain_missing = True for oi_ in model.opset_import: if oi_.domain == op_domain_name: domain_missing = False if domain_missing: model.opset_import.extend( [helper.make_operatorsetid(op_domain_name, 1)]) return model
def test(): # type: () -> None nodes = [ helper.make_node('Add', ["W", "Z"], ["shape"]), helper.make_node('Reshape', ["X", "shape"], ["A"]), helper.make_node('Add', ["A", "W"], ["Y"]) ] graph = helper.make_graph(nodes, "test", [ helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, )), helper.make_tensor_value_info("W", TensorProto.FLOAT, (1, )), helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, )) ], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, ))]) self._converted(graph, helper.make_operatorsetid("", 8), 2)
def test_gemm_down(self): # type: () -> None nodes = [helper.make_node('Gemm', ["A", "B", "C"], ["Y"])] graph = helper.make_graph( nodes, "test", [helper.make_tensor_value_info("A", TensorProto.FLOAT, (5, 5,)), helper.make_tensor_value_info("B", TensorProto.FLOAT, (5, 5,)), helper.make_tensor_value_info("C", TensorProto.FLOAT, (5, 5,))], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))]) converted_model = self._converted(graph, helper.make_operatorsetid( "", 8), 1) # Assert equality of graph and converted_model assert converted_model.graph.node[0].op_type == "Gemm" assert converted_model.opset_import[0].version == 1
def test_scan_8_9(self): # type: () -> None from_opset = 8 to_opset = 9 data_type = TensorProto.FLOAT node1 = onnx.helper.make_node( "Add", inputs=["sum_in", "next"], outputs=["sum_out"], ) node2 = onnx.helper.make_node( "Identity", inputs=["sum_out"], outputs=["scan_out"], ) g = onnx.helper.make_graph([node1, node2], "scan_body", [ onnx.helper.make_tensor_value_info("sum_in", data_type, [2]), onnx.helper.make_tensor_value_info("next", data_type, [2]) ], [ onnx.helper.make_tensor_value_info("sum_out", data_type, [2]), onnx.helper.make_tensor_value_info("scan_out", data_type, [2]) ]) nodes = [ onnx.helper.make_node( "Scan", inputs=["", "initial", "x"], outputs=["y", "z"], body=g, num_scan_inputs=1, ) ] seq_lens = onnx.helper.make_empty_tensor_value_info(" ") initial = onnx.helper.make_tensor_value_info("initial", data_type, [1, 2]) x = onnx.helper.make_tensor_value_info("x", data_type, [1, 3, 2]) y = onnx.helper.make_tensor_value_info("y", data_type, [1, 2]) z = onnx.helper.make_tensor_value_info("z", data_type, [1, 3, 2]) graph = onnx.helper.make_graph(nodes, "test_scan_8_9", [seq_lens, initial, x], [y, z]) converted_model = self._converted( graph, helper.make_operatorsetid("", from_opset), to_opset) assert converted_model.graph.node[0].op_type == "Scan" assert converted_model.opset_import[0].version == to_opset
def convert(self, explicit_layouts): self.parse() logger.debug("Converting...") for g in self.graphes: g.convert(explicit_layouts) # ONNXRuntime restrictions opset = helper.make_operatorsetid(onnx.defs.ONNX_DOMAIN, 11) attrs = { 'producer_name': 'tflite2onnx', 'ir_version': 6, 'opset_imports': [opset], } self.onnx = helper.make_model(self.graphes[0].onnx, **attrs) self.setConverted()
def test_bind_input_types(self): opset = onnx_opset_version() devices = [(C_OrtDevice(C_OrtDevice.cpu(), C_OrtDevice.default_memory(), 0), ['CPUExecutionProvider'])] if "CUDAExecutionProvider" in onnxrt.get_all_providers(): devices.append((C_OrtDevice(C_OrtDevice.cuda(), C_OrtDevice.default_memory(), 0), ['CUDAExecutionProvider'])) for device, provider in devices: for dtype in [np.float32, np.float64, np.int32, np.uint32, np.int64, np.uint64, np.int16, np.uint16, np.int8, np.uint8, np.float16, np.bool_]: with self.subTest(dtype=dtype, device=str(device)): x = np.arange(8).reshape((-1, 2)).astype(dtype) proto_dtype = NP_TYPE_TO_TENSOR_TYPE[x.dtype] X = helper.make_tensor_value_info('X', proto_dtype, [None, x.shape[1]]) Y = helper.make_tensor_value_info('Y', proto_dtype, [None, x.shape[1]]) # inference node_add = helper.make_node('Identity', ['X'], ['Y']) # graph graph_def = helper.make_graph([node_add], 'lr', [X], [Y], []) model_def = helper.make_model( graph_def, producer_name='dummy', ir_version=7, producer_version="0", opset_imports=[helper.make_operatorsetid('', opset)]) sess = onnxrt.InferenceSession(model_def.SerializeToString(), providers=provider) bind = SessionIOBinding(sess._sess) ort_value = C_OrtValue.ortvalue_from_numpy(x, device) bind.bind_ortvalue_input('X', ort_value) bind.bind_output('Y', device) sess._sess.run_with_iobinding(bind, None) ortvalue = bind.get_outputs()[0] y = ortvalue.numpy() assert_almost_equal(x, y) bind = SessionIOBinding(sess._sess) bind.bind_input('X', device, dtype, x.shape, ort_value.data_ptr()) bind.bind_output('Y', device) sess._sess.run_with_iobinding(bind, None) ortvalue = bind.get_outputs()[0] y = ortvalue.numpy() assert_almost_equal(x, y)
def onnx_linear_regression_training(coefs, intercept): if len(coefs.shape) == 1: coefs = coefs.reshape((1, -1)) coefs = coefs.T # input X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, coefs.shape[0]]) # expected input label = helper.make_tensor_value_info('label', TensorProto.FLOAT, [None, coefs.shape[1]]) # output Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, coefs.shape[1]]) # loss loss = helper.make_tensor_value_info('loss', TensorProto.FLOAT, []) # inference node_matmul = helper.make_node('MatMul', ['X', 'coefs'], ['y1'], name='N1') node_add = helper.make_node('Add', ['y1', 'intercept'], ['Y'], name='N2') # loss node_diff = helper.make_node('Sub', ['Y', 'label'], ['diff'], name='L1') node_square = helper.make_node('Mul', ['diff', 'diff'], ['diff2'], name='L2') node_square_sum = helper.make_node('ReduceSum', ['diff2'], ['loss'], name='L3') # initializer init_coefs = numpy_helper.from_array(coefs, name="coefs") init_intercept = numpy_helper.from_array(intercept, name="intercept") # graph graph_def = helper.make_graph( [node_matmul, node_add, node_diff, node_square, node_square_sum], 'lrt', [X, label], [loss, Y], [init_coefs, init_intercept]) model_def = helper.make_model( graph_def, producer_name='orttrainer', ir_version=7, producer_version=ort_version, opset_imports=[helper.make_operatorsetid('', 14)]) return model_def
def test_batch_normalization_5_8(self): # type: () -> None nodes = [ helper.make_node('BatchNormalization', ["X", "scale", "B", "mean", "var"], ["Y"]) ] graph = helper.make_graph(nodes, "test", [ helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, )), helper.make_tensor_value_info("scale", TensorProto.FLOAT, (1, )), helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, )), helper.make_tensor_value_info("mean", TensorProto.FLOAT, (1, )), helper.make_tensor_value_info("var", TensorProto.FLOAT, (1, )) ], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, ))]) converted_model = self._converted(graph, helper.make_operatorsetid("", 5), 8) # Assert equality of graph and converted_model assert converted_model.graph.node[0].op_type == "BatchNormalization" assert converted_model.opset_import[0].version == 8
def test_dropout_down(self): # type: () -> None nodes = [helper.make_node('Dropout', ["data"], ["output"])] graph = helper.make_graph(nodes, "test", [ helper.make_tensor_value_info("data", TensorProto.FLOAT, ( 5, 5, )) ], [ helper.make_tensor_value_info("output", TensorProto.FLOAT, ( 5, 5, )) ]) converted_model = self._converted(graph, helper.make_operatorsetid("", 8), 1) # Assert equality of graph and converted_model assert converted_model.graph.node[0].op_type == "Dropout" assert converted_model.opset_import[0].version == 1