def build_ort_where_add(op_version=12): node = OnnxSub( OnnxMul('x', 'cond', op_version=op_version), OnnxMul('y', OnnxSub('cond', numpy.array([1], dtype=numpy.float32), op_version=op_version), op_version=op_version), op_version=op_version, output_names=['z']) onx = node.to_onnx(inputs=[('cond', FloatTensorType()), ('x', FloatTensorType()), ('y', FloatTensorType())], target_opset=op_version) sess = InferenceSession(onx.SerializeToString()) return lambda cond, x, y: sess.run(None, {'cond': cond, 'x': x, 'y': y})
def test_if2(self): opv = TARGET_OPSET x1 = numpy.array([[0, 3], [7, 0]], dtype=numpy.float32) x2 = numpy.array([[1, 0], [2, 0]], dtype=numpy.float32) node = OnnxAdd( 'x1', 'x2', output_names=['absxythen'], op_version=opv) then_body = node.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('absxythen', FloatTensorType())]) node = OnnxSub( 'x1', 'x2', output_names=['absxyelse'], op_version=opv) else_body = node.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('absxyelse', FloatTensorType())]) del else_body.graph.input[:] del then_body.graph.input[:] cond = OnnxGreater( OnnxReduceSum('x1', op_version=opv), OnnxReduceSum('x2', op_version=opv), op_version=opv) ifnode = OnnxIf(cond, then_branch=then_body.graph, else_branch=else_body.graph, op_version=opv, output_names=['y']) model_def = ifnode.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('y', FloatTensorType())]) oinf = OnnxInference(model_def) dot = oinf.to_dot() self.assertIn("Gr_Greater -> Gr_C0;", dot)
def decorrelate_transformer_converter(scope, operator, container): op = operator.raw_operator opv = container.target_opset out = operator.outputs X = operator.inputs[0] dtype = guess_numpy_type(X.type) options = container.get_options(op, dict(use_gemm=False)) use_gemm = options['use_gemm'] print('conversion: use_gemm=', use_gemm) if use_gemm: Y = OnnxGemm(X, op.coef_.astype(dtype), (-op.mean_ @ op.coef_).astype(dtype), op_version=opv, alpha=1., beta=1., output_names=out[:1]) else: Y = OnnxMatMul(OnnxSub(X, op.mean_.astype(dtype), op_version=opv), op.coef_.astype(dtype), op_version=opv, output_names=out[:1]) Y.add_to(scope, container)
def to_onnx_operator(self, inputs=None, outputs=('Y', )): if inputs is None: raise RuntimeError("inputs should contain one name") i0 = self.get_inputs(inputs, 0) W = self.W_ S = self.S_ return OnnxDiv(OnnxSub(i0, W), S, output_names=outputs)
def test_onnx_simple_text_plot_toy(self): x = numpy.random.randn(10, 3).astype(numpy.float32) node1 = OnnxAdd('X', x, op_version=15) node2 = OnnxSub('X', x, op_version=15) node3 = OnnxAbs(node1, op_version=15) node4 = OnnxAbs(node2, op_version=15) node5 = OnnxDiv(node3, node4, op_version=15) node6 = OnnxAbs(node5, output_names=['Y'], op_version=15) onx = node6.to_onnx({'X': x.astype(numpy.float32)}, outputs={'Y': x}, target_opset=15) text = onnx_simple_text_plot(onx, verbose=False) expected = textwrap.dedent(""" Add(X, Ad_Addcst) -> Ad_C0 Abs(Ad_C0) -> Ab_Y0 Identity(Ad_Addcst) -> Su_Subcst Sub(X, Su_Subcst) -> Su_C0 Abs(Su_C0) -> Ab_Y02 Div(Ab_Y0, Ab_Y02) -> Di_C0 Abs(Di_C0) -> Y """).strip(" \n") self.assertIn(expected, text) text2, out, err = self.capture( lambda: onnx_simple_text_plot(onx, verbose=True)) self.assertEqual(text, text2) self.assertIn('BEST:', out) self.assertEmpty(err)
def test_onnx_if_algebra_direct(self): opv = TARGET_OPSET x1 = np.array([[0, 3], [7, 0]], dtype=np.float32) x2 = np.array([[1, 0], [2, 0]], dtype=np.float32) node = OnnxAdd( 'x1', 'x2', output_names=['absxythen'], op_version=opv) then_body = node.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('absxythen', FloatTensorType())]) node = OnnxSub( 'x1', 'x2', output_names=['absxyelse'], op_version=opv) else_body = node.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('absxyelse', FloatTensorType())]) del else_body.graph.input[:] del then_body.graph.input[:] cond = OnnxGreater( OnnxReduceSum('x1', op_version=opv), OnnxReduceSum('x2', op_version=opv), op_version=opv) ifnode = OnnxIf(cond, then_branch=then_body.graph, else_branch=else_body.graph, op_version=opv, output_names=['y']) model_def = ifnode.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('y', FloatTensorType())]) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'x1': x1, 'x2': x2}) assert_almost_equal(x1 + x2, res[0])
def squareform_pdist(X, **kwargs): opv = TARGET_OPSET diff = OnnxSub('next_in', 'next', output_names=['diff'], op_version=opv) id_next = OnnxIdentity('next_in', output_names=['next_out'], op_version=opv) norm = OnnxReduceSumSquare(diff, output_names=['norm'], axes=[1], op_version=opv) flat = OnnxSqueezeApi11(norm, output_names=['scan_out'], axes=[1], op_version=opv) scan_body = id_next.to_onnx( OrderedDict([('next_in', FloatTensorType()), ('next', FloatTensorType())]), outputs=[('next_out', FloatTensorType([None, None])), ('scan_out', FloatTensorType([None]))], other_outputs=[flat]) node = OnnxScan(X, X, output_names=['scan0_{idself}', 'scan1_{idself}'], num_scan_inputs=1, body=scan_body.graph, op_version=opv, **kwargs) return node[1]
def test_onnx_rename_names_type(self): rows = [] def flog(*s): rows.append(" ".join(map(str, s))) dtype = numpy.float32 x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) cop = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=TARGET_OPSET) cop2 = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=TARGET_OPSET) cop3 = OnnxAdd('X', numpy.array([2], dtype=dtype), op_version=TARGET_OPSET, output_names=['inter']) cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET), cop2, output_names=['final'], op_version=TARGET_OPSET) model_def = cop4.to_onnx({'X': x}) oinf1 = OnnxInference(model_def) new_model = onnx_rename_names(model_def, verbose=1, fLOG=flog, strategy='type') total = "\n".join(rows) self.assertIn("'Ad_Addcst' -> 'i_05'", total) oinf2 = OnnxInference(new_model) y1 = oinf1.run({'X': x}) y2 = oinf2.run({'X': x}) self.assertEqualArray(y1['final'], y2['final'])
def test_onnx_if_algebra_indirect_unnamed_clear_input(self): opv = TARGET_OPSET x1 = np.array([[0, 3], [7, 0]], dtype=np.float32) x2 = np.array([[1, 0], [2, 0]], dtype=np.float32) node_xy = OnnxMul('x1', 'x2', op_version=opv) node_then = OnnxAdd( 'x1', 'xy', output_names=['absxythen'], op_version=opv) then_body = node_then.to_onnx( {'x1': x1, 'xy': x2}, target_opset=opv, outputs=[('absxythen', FloatTensorType())]) node_else = OnnxSub( 'x1', 'x2', output_names=['absxyelse'], op_version=opv) else_body = node_else.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('absxyelse', FloatTensorType())]) cond = OnnxGreater( OnnxReduceSum('x1', op_version=opv), OnnxReduceSum('x2', op_version=opv), op_version=opv) ifnode = OnnxIf(cond, then_branch=then_body.graph, else_branch=else_body.graph, op_version=opv, output_names=['y'], global_context={'xy': node_xy}, clear_subgraph_inputs=True) model_def = ifnode.to_onnx( {'x1': x1, 'x2': x2}, target_opset=opv, outputs=[('y', FloatTensorType())]) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'x1': x1, 'x2': x2}) assert_almost_equal(x1 + x1 * x2, res[0])
def conv(scope, operator, container): W = operator.raw_operator.W S = operator.raw_operator.S X = operator.inputs[0] out = operator.outputs op = OnnxDiv(OnnxSub(X, W), S, output_names=out) op.add_to(scope, container)
def to_onnx_operator(self, inputs=None, outputs=('Y', )): if inputs is None: raise RuntimeError("inputs should contain one name") i0 = self.get_inputs(inputs, 0) W = self.W_.astype(np.float32) S = self.S_.astype(np.float32) # case if there are multiple output nodes return OnnxDiv(OnnxSub(i0, W, op_version=self.op_version), S, output_names=outputs, op_version=self.op_version)
def test_onnx_example_pdist(self): x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2)) opv = _TARGET_OPSET_ diff = OnnxSub('next_in', 'next', output_names=['diff'], op_version=opv) id_next = OnnxIdentity('next_in', output_names=['next_out'], op_version=opv) norm = OnnxReduceSumSquare(diff, output_names=['norm'], axes=[1], op_version=opv) flat = OnnxSqueezeApi11(norm, output_names=['scan_out'], axes=[1], op_version=opv) scan_body = id_next.to_onnx(OrderedDict([('next_in', x), ('next', FloatTensorType())]), outputs=[ ('next_out', FloatTensorType([3, 2])), ('scan_out', FloatTensorType([3])) ], other_outputs=[flat], target_opset=opv) sess = InferenceSession(scan_body.SerializeToString()) res = sess.run(None, {'next_in': x, 'next': x[:1]}) assert_almost_equal(x, res[0]) exp = np.array([0., 18., 20.], dtype=np.float32) assert_almost_equal(exp, res[1]) node = OnnxScan('x', 'x', output_names=['y', 'z'], num_scan_inputs=1, body=scan_body.graph, op_version=opv) model_def = node.to_onnx({'x': x}, outputs=[('y', FloatTensorType([3, 2])), ('z', FloatTensorType([3, 3]))]) try: onnx.checker.check_model(model_def) except ValidationError as e: if StrictVersion(onnx__version__) <= StrictVersion("1.5.0"): warnings.warn(e) else: raise e sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'x': x}) exp = squareform(pdist(x, metric="sqeuclidean")) assert_almost_equal(x, res[0]) assert_almost_equal(exp, res[1])
def conv(scope, operator, container): W = operator.raw_operator.W S = operator.raw_operator.S X = operator.inputs[0] out = operator.outputs op = OnnxDiv( OnnxSub(X, W, op_version=container.target_opset), S, output_names=out, op_version=container.target_opset) op.add_to(scope, container)
def to_onnx_operator(self, inputs=None, outputs=('Y', )): if inputs is None: raise RuntimeError("Parameter inputs should contain at least " "one name.") i0 = self.get_inputs(inputs, 0) W = self.W_.astype(np.float32) S = self.S_.astype(np.float32) return OnnxDiv(OnnxSub(i0, W, op_version=12), S, output_names=outputs, op_version=12)
def test_onnx_text_plot(self): idi = numpy.identity(2).astype(numpy.float32) opv = TARGET_OPSET A = OnnxAdd('X', idi, op_version=opv) B = OnnxSub(A, 'W', output_names=['Y'], op_version=opv) onx = B.to_onnx({ 'X': idi.astype(numpy.float32), 'W': idi.astype(numpy.float32) }) res = onnx_text_plot(onx) self.assertIn("Init", res)
def decorrelate_transformer_converter2(scope, operator, container): op = operator.raw_operator opv = container.target_opset out = operator.outputs X = operator.inputs[0] dtype = guess_numpy_type(X.type) m = OnnxMatMul(OnnxSub(X, op.pca_.mean_.astype(dtype), op_version=opv), op.pca_.components_.T.astype(dtype), op_version=opv) Y = OnnxIdentity(m, op_version=opv, output_names=out[:1]) Y.add_to(scope, container)
def _onnx_grad_loss_absolute_error(target_opset=None, dtype=numpy.float32, weight_name=None): """ Returns the ONNX graph for function :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert` or :math:`Y = f(X1, X2) = \\lVert (X1 - X2)w \\rVert` if *weight_name* is not None and its gradient. .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_loss_absolute_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxReduceSum, OnnxReshape, OnnxSign, OnnxAbs) diff = OnnxSub('X1', 'X2', op_version=target_opset) abs_diff = OnnxAbs(diff, op_version=target_opset) if weight_name is None: res = OnnxReduceSum(abs_diff, op_version=target_opset) res2 = OnnxSign(diff, op_version=target_opset, output_names=['Y_grad']) else: resh = OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset) mul = OnnxMul(abs_diff, resh, op_version=target_opset) res = OnnxReduceSum(mul, op_version=target_opset) res2 = OnnxMul(OnnxSign(diff, op_version=target_opset), resh, op_version=target_opset, output_names=['Y_grad']) res = OnnxReshape(res, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], target_opset=target_opset, other_outputs=[res2]) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx
def test_algebra_converter(self): coef = numpy.array([[1, 2], [3, 4]], dtype=numpy.float64) intercept = 1 X_test = numpy.array([[1, -2], [3, -4]], dtype=numpy.float64) onnx_fct = OnnxSub(OnnxMatMul('X', coef), numpy.array([intercept], dtype=numpy.float64), output_names=['Y']) onnx_model = onnx_fct.to_onnx({'X': X_test}, dtype=numpy.float64) sess = InferenceSession(onnx_model.SerializeToString()) ort_pred = sess.run(None, {'X': X_test})[0] assert_almost_equal(ort_pred, numpy.array([[-6., -7.], [-10., -11.]]))
def to_onnx_operator(self, inputs=None, outputs=('Y', ), target_opset=None, **kwargs): if inputs is None: raise RuntimeError("inputs should contain one name") i0 = self.get_inputs(inputs, 0) W = self.W_.astype(np.float32) S = self.S_.astype(np.float32) return OnnxDiv(OnnxSub(i0, W, op_version=self.op_version), S, output_names=outputs, op_version=self.op_version)
def test_onnx_remove_unused_outputs_new(self): dtype = numpy.float32 x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) cop = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=TARGET_OPSET) cop2 = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=TARGET_OPSET) cop3 = OnnxAdd('X', numpy.array([2], dtype=dtype), op_version=TARGET_OPSET, output_names=['inter']) cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET), cop2, output_names=['final'], op_version=TARGET_OPSET) model_def0 = cop4.to_onnx({'X': x}) model_def = select_model_inputs_outputs(model_def0, "inter", infer_shapes=True, remove_unused=False) stats = onnx_statistics(model_def, optim=True) c1 = model_def.SerializeToString() new_model = select_model_inputs_outputs(model_def0, "inter", infer_shapes=True) c2 = model_def.SerializeToString() self.assertEqual(c1, c2) stats2 = onnx_statistics(model_def, optim=True) stats3 = onnx_statistics(new_model, optim=False) self.assertEqual(stats['ninits'], 2) self.assertEqual(stats2['ninits'], 2) self.assertEqual(stats3['ninits'], 1) self.assertEqual(stats2['nnodes'], 1) self.assertEqual(stats3['nnodes'], 1) oinf1 = OnnxInference(model_def) y1 = oinf1.run({'X': x}) oinf2 = OnnxInference(new_model) y2 = oinf2.run({'X': x}) self.assertNotIn('final', y1) self.assertNotIn('final', y2) self.assertIn('inter', y1) self.assertIn('inter', y2) self.assertEqualArray(y1['inter'], y2['inter'])
def _onnx_grad_square_error(target_opset=None, dtype=numpy.float32, weight_name=None): """ Returns the ONNX graph for the gradient of function :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2` or :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2 w` if *weight_name* is not None .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_square_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import OnnxSub, OnnxMul, OnnxReshape diff = OnnxSub('X1', 'X2', op_version=target_opset) if weight_name is None: res = OnnxMul(diff, numpy.array([-2], dtype=dtype), op_version=target_opset, output_names=['Y_grad']) else: res = OnnxMul(OnnxMul(diff, numpy.array([-2], dtype=dtype), op_version=target_opset), OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset), op_version=target_opset, output_names=['Y_grad']) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y_grad', var_type())], target_opset=target_opset) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx
def test_pipe_graph_display_text(self): idi = numpy.identity(2).astype(numpy.float32) opv = TARGET_OPSET A = OnnxAdd('X', idi, op_version=opv) B = OnnxSub(A, 'W', output_names=['Y'], op_version=opv) onx = B.to_onnx({ 'X': idi.astype(numpy.float32), 'W': idi.astype(numpy.float32) }) bigraph = onnx2bigraph(onx) graph = bigraph.display_structure() text = graph.to_text() for c in [ 'Input-1', 'Input-0', 'Output-0', 'W', 'W', 'I0', 'I1', 'inout', 'O0 I0', 'A S' ]: self.assertIn(c, text)
def test_onnx_simple_text_plot_if(self): opv = TARGET_OPSET x1 = numpy.array([[0, 3], [7, 0]], dtype=numpy.float32) x2 = numpy.array([[1, 0], [2, 0]], dtype=numpy.float32) node = OnnxAdd('x1', 'x2', output_names=['absxythen'], op_version=opv) then_body = node.to_onnx({ 'x1': x1, 'x2': x2 }, target_opset=opv, outputs=[('absxythen', FloatTensorType())]) node = OnnxSub('x1', 'x2', output_names=['absxyelse'], op_version=opv) else_body = node.to_onnx({ 'x1': x1, 'x2': x2 }, target_opset=opv, outputs=[('absxyelse', FloatTensorType())]) del else_body.graph.input[:] del then_body.graph.input[:] cond = OnnxGreater(OnnxReduceSum('x1', op_version=opv), OnnxReduceSum('x2', op_version=opv), op_version=opv) ifnode = OnnxIf(cond, then_branch=then_body.graph, else_branch=else_body.graph, op_version=opv, output_names=['y']) model_def = ifnode.to_onnx({ 'x1': x1, 'x2': x2 }, target_opset=opv, outputs=[('y', FloatTensorType())]) text = onnx_simple_text_plot(model_def) expected = textwrap.dedent(""" input: """).strip(" \n") self.assertIn(expected, text) self.assertIn("If(Gr_C0) -> y", text) oinf = OnnxInference(model_def) text2 = oinf.to_text(kind="seq") self.assertEqual(text, text2)
def conv(scope, operator, container): W = operator.raw_operator.W op = OnnxSub(operator.inputs[0], W, output_names=operator.outputs) op.add_to(scope, container) text = str(container) if 'name:"Sub"' not in text: raise AssertionError("Unnamed operator:\n".format(text)) nin = list(op.enumerate_initial_types()) nno = list(op.enumerate_nodes()) nva = list(op.enumerate_variables()) assert len(nin) == 1 assert nin[0][0] == 'input' assert nin[0][1].shape == [1, 2] assert len(nno) == 1 assert nno[0].output_names == ['variable'] assert len(nva) == 1 assert isinstance(nva[0], tuple) assert nva[0][1] == 0
def test_onnx_remove_two_outputs(self): dtype = numpy.float32 x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) cop = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=get_opset_number_from_onnx()) cop2 = OnnxAdd('X', numpy.array([1], dtype=dtype), output_names=['keep'], op_version=get_opset_number_from_onnx()) cop3 = OnnxAdd('X', numpy.array([2], dtype=dtype), op_version=get_opset_number_from_onnx()) cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=get_opset_number_from_onnx()), cop2, output_names=['final'], op_version=get_opset_number_from_onnx()) model_def = cop4.to_onnx({'X': x}, outputs=[('keep', FloatTensorType([None, 2])), ('final', FloatTensorType([None, 2]))]) c1 = model_def.SerializeToString() self.assertEqual(len(model_def.graph.output), 2) c2 = model_def.SerializeToString() self.assertEqual(c1, c2) stats = onnx_statistics(model_def, optim=True) new_model = onnx_remove_node_redundant(model_def, max_hash_size=10) stats2 = onnx_statistics(model_def, optim=True) stats3 = onnx_statistics(new_model, optim=False) self.assertEqual(stats['ninits'], 2) self.assertEqual(stats2['ninits'], 2) self.assertEqual(stats3['ninits'], 2) self.assertEqual(stats2['nnodes'], 6) self.assertEqual(stats3['nnodes'], 6) oinf1 = OnnxInference(model_def) y1 = oinf1.run({'X': x}) oinf2 = OnnxInference(new_model) y2 = oinf2.run({'X': x}) self.assertEqualArray(y1['final'], y2['final']) self.assertEqualArray(y1['keep'], y2['keep'])
def test_onnx_example_pdist(self): x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2)) diff = OnnxSub('next_in', 'next', output_names=['diff']) id_next = OnnxIdentity('next_in', output_names=['next_out']) norm = OnnxReduceSumSquare(diff, output_names=['norm'], axes=[1]) flat = OnnxSqueeze(norm, output_names=['scan_out'], axes=[1]) scan_body = id_next.to_onnx(OrderedDict([('next_in', x), ('next', FloatTensorType())]), outputs=[ ('next_out', FloatTensorType([3, 2])), ('scan_out', FloatTensorType([3])) ], other_outputs=[flat]) sess = InferenceSession(scan_body.SerializeToString()) res = sess.run(None, {'next_in': x, 'next': x[:1]}) assert_almost_equal(x, res[0]) exp = np.array([0., 18., 20.], dtype=np.float32) assert_almost_equal(exp, res[1]) node = OnnxScan('x', 'x', output_names=['y', 'z'], num_scan_inputs=1, body=scan_body.graph) model_def = node.to_onnx({'x': x}, outputs=[('y', FloatTensorType([3, 2])), ('z', FloatTensorType([3, 3]))]) try: onnx.checker.check_model(model_def) except ValidationError as e: if sys.platform.startswith("win"): # schema information in onnx is incomplete on Windows warnings.warn(e) else: raise e sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'x': x}) exp = squareform(pdist(x, metric="sqeuclidean")) assert_almost_equal(x, res[0]) assert_almost_equal(exp, res[1])
def decorrelate_transformer_converter(scope, operator, container): op = operator.raw_operator opv = container.target_opset out = operator.outputs X = operator.inputs[0] dtype = guess_numpy_type(X.type) Y1 = OnnxMatMul( OnnxSub(X, op.mean_.astype(dtype), op_version=opv), op.coef_.astype(dtype), op_version=opv, output_names=out[:1]) Y2 = OnnxGemm(X, op.coef_.astype(dtype), (- op.mean_ @ op.coef_).astype(dtype), op_version=opv, alpha=1., beta=1., output_names=out[1:2]) Y1.add_to(scope, container) Y2.add_to(scope, container)
def decorrelate_transformer_converter(scope, operator, container): op = operator.raw_operator opv = container.target_opset out = operator.outputs # We retrieve the unique input. X = operator.inputs[0] # In most case, computation happen in floats. # But it might be with double. ONNX is very strict # about types, every constant should have the same # type as the input. dtype = guess_numpy_type(X.type) # We tell in ONNX language how to compute the unique output. # op_version=opv tells which opset is requested Y = OnnxMatMul(OnnxSub(X, op.mean_.astype(dtype), op_version=opv), op.coef_.astype(dtype), op_version=opv, output_names=out[:1]) Y.add_to(scope, container)
def test_onnx_rename_names_exc(self): dtype = numpy.float32 x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) cop = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=TARGET_OPSET) cop2 = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=TARGET_OPSET) cop3 = OnnxAdd('X', numpy.array([2], dtype=dtype), op_version=TARGET_OPSET, output_names=['inter']) cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET), cop2, output_names=['final'], op_version=TARGET_OPSET) model_def = cop4.to_onnx({'X': x}) self.assertRaise(lambda: onnx_rename_names(model_def, strategy="none"), ValueError)
def _onnx_update_penalty_elastic_error(target_opset=None, dtype=numpy.float32, l1=1e-4, l2=1e-4): """ Returns the ONNX graph for function :math:`Y = f(W) = W - 2 \\beta W - \\alpha sign(W)` *l1* is :math:`\\beta` and *l2* is :math:`\\alpha`. .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph( 'update_penalty_elastic_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSign) res = OnnxSub(OnnxMul('X', numpy.array([1 - 2 * l2], dtype=dtype), op_version=target_opset), OnnxMul(OnnxSign('X', op_version=target_opset), numpy.array([l1], dtype=dtype), op_version=target_opset), op_version=target_opset, output_names=['Y']) var_type = dtype_to_var_type(dtype) varsx = [('X', var_type())] onx = res.to_onnx(varsx, outputs=[('Y', var_type())], target_opset=target_opset) return onx