def test_onnx_ml(self): def generate_onnx_graph(opv): node = OnnxAdd(('X1', FloatTensorType()), np.array([0.1], dtype=np.float32), op_version=opv) out = OnnxLinearRegressor(node, coefficients=[0.3, 0.3, 0.4, 0.5, 0.6], intercepts=[-50.], op_version=1) last = OnnxIdentity(out, output_names=['Y'], op_version=opv) onx = last.to_onnx([('X1', FloatTensorType((None, 5)))], outputs=[('Y', FloatTensorType())], target_opset=opv) return onx, (node, out, last) for opv in ({'': 10}, 9, 10, 11, 12, TARGET_OPSET): if isinstance(opv, dict): if opv[''] > get_latest_tested_opset_version(): continue elif opv is not None and opv > get_latest_tested_opset_version(): continue for i, nbnode in enumerate((1, 2, 3, 100)): onx, nodes = generate_onnx_graph(opv=opv) if opv == {'': 10}: for im in onx.opset_import: if im.version > 10: raise AssertionError( "Wrong final opset\nopv={}\n{}".format( opv, onx)) else: for im in onx.opset_import: if im.version > opv: raise AssertionError( "Wrong final opset\nopv={}\n{}".format( opv, onx)) as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except (InvalidGraph, InvalidArgument) as e: if (isinstance(opv, dict) and opv[''] >= onnx_opset_version()): continue if (isinstance(opv, int) and opv >= onnx_opset_version()): continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format( opv, onx)) from e X = (np.ones((1, 5)) * nbnode).astype(np.float32) res_out = ort.run(None, {'X1': X}) assert len(res_out) == 1 res = res_out[0] self.assertEqual(res.shape, (1, 1)) inputs = None expected = [[('Ad_C0', FloatTensorType(shape=[]))], [('Li_Y0', FloatTensorType(shape=[]))], [('Y', FloatTensorType(shape=[]))]] for i, node in enumerate(nodes): shape = node.get_output_type_inference(inputs) self.assertEqual(str(expected[i]), str(shape)) inputs = shape
def test_cascade_add(self): def generate_onnx_graph(dim, nbnode, input_name='X1', opv=None): i1 = input_name for i in range(nbnode - 1): i2 = (np.ones((1, dim)) * nbnode * 10).astype(np.float32) node = OnnxAdd(i1, i2, op_version=opv) i1 = node i2 = (np.ones((1, dim)) * nbnode * 10).astype(np.float32) node = OnnxAdd(i1, i2, output_names=['Y'], op_version=opv) onx = node.to_onnx([(input_name, FloatTensorType((None, dim)))], outputs=[('Y', FloatTensorType())], target_opset=opv) return onx exp = [ np.array([[11., 11., 11., 11., 11.]]), np.array([[42., 42., 42., 42., 42.]]), np.array([[93., 93., 93., 93., 93.]]), np.array([[100100., 100100., 100100., 100100., 100100.]]) ] for opv in ({'': 10}, 9, 10, 11, 12, onnx_opset_version()): if isinstance(opv, dict): if opv[''] > get_latest_tested_opset_version(): continue elif opv is not None and opv > get_latest_tested_opset_version(): continue for i, nbnode in enumerate((1, 2, 3, 100)): onx = generate_onnx_graph(5, nbnode, opv=opv) as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except InvalidGraph as e: if opv >= onnx_opset_version(): continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format( opv, onx)) from e X = (np.ones((1, 5)) * nbnode).astype(np.float32) res_out = ort.run(None, {'X1': X}) assert len(res_out) == 1 res = res_out[0] assert_almost_equal(exp[i], res) dim = 10 onx = generate_onnx_graph(dim, 300, opv=11) as_string = onx.SerializeToString() ort = InferenceSession(as_string) X = (np.ones((1, dim)) * nbnode).astype(np.float32) res_out = ort.run(None, {'X1': X}) assert len(res_out) == 1 res = res_out[0] assert res.shape[1] == dim
def test_cascade_scaler(self): def generate_onnx_graph(dim, nbnode, input_name='X1', opv=1): i1 = input_name scale = list(np.ones((1, dim)).ravel()) for i in range(nbnode - 1): i2 = list( map(float, np.ones((1, dim)).astype(np.float32).ravel())) node = OnnxScaler(i1, offset=i2, scale=scale, op_version=opv) i1 = node i2 = list(map(float, np.ones((1, dim)).astype(np.float32).ravel())) node = OnnxScaler(i1, offset=i2, scale=scale, output_names=['Y'], op_version=opv) onx = node.to_onnx([(input_name, FloatTensorType((None, dim)))], outputs=[('Y', FloatTensorType((None, dim)))], target_opset=TARGET_OPSET) return onx exp = [ np.zeros((1, 5)), np.zeros((1, 5)), np.zeros((1, 5)), np.zeros((1, 5)) ] for opv in (1, 2, 3): if opv > get_latest_tested_opset_version(): continue for i, nbnode in enumerate((1, 2, 3, 100)): onx = generate_onnx_graph(5, nbnode, opv=opv) as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except InvalidGraph as e: if opv in (3, ): continue if opv >= onnx_opset_version(): continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format( opv, onx)) from e X = (np.ones((1, 5)) * nbnode).astype(np.float32) res_out = ort.run(None, {'X1': X}) assert len(res_out) == 1 res = res_out[0] assert_almost_equal(exp[i], res) dim = 10 onx = generate_onnx_graph(dim, 300) as_string = onx.SerializeToString() ort = InferenceSession(as_string) X = (np.ones((1, dim)) * nbnode).astype(np.float32) res_out = ort.run(None, {'X1': X}) assert len(res_out) == 1 res = res_out[0] assert res.shape[1] == dim
def test_scaler_converted(self): st = StandardScaler() X = np.array([[0, 1.5], [6.1, 2.3]]) st.fit(X) exp = st.transform(X) for opv in (1, 2, 10, 11, 12, onnx_opset_version()): if opv > get_latest_tested_opset_version(): continue try: onx = to_onnx(st, X.astype(np.float32), target_opset=opv) except RuntimeError as e: if ("is higher than the number of the " "installed onnx package") in str(e): continue raise e as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except InvalidGraph as e: if opv > onnx_opset_version(): continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format(opv, onx)) from e res_out = ort.run(None, {'X': X.astype(np.float32)}) assert len(res_out) == 1 res = res_out[0] assert_almost_equal(exp, res) for opv in (1, 2, 10, 11, 12, onnx_opset_version()): onx = to_onnx(st, X.astype(np.float32), target_opset={'ai.onnx.ml': opv}) as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except InvalidGraph as e: if opv > onnx_opset_version(): continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format(opv, onx)) from e res_out = ort.run(None, {'X': X.astype(np.float32)}) assert len(res_out) == 1 res = res_out[0] assert_almost_equal(exp, res)
def test_model_mlp_regressor_default(self): model, X_test = fit_regression_model(MLPRegressor(random_state=42)) exp = model.predict(X_test) for opv in (1, 2, 7, 8, 9, 10, 11, 12, onnx_opset_version()): if opv is not None and opv > get_latest_tested_opset_version(): continue try: onx = convert_sklearn( model, "scikit-learn MLPRegressor", [("input", FloatTensorType([None, X_test.shape[1]]))], target_opset=opv) except RuntimeError as e: if ("is higher than the number of the " "installed onnx package") in str(e): continue raise e as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except (RuntimeError, InvalidGraph, Fail) as e: if opv in (None, 1, 2): continue if opv >= onnx_opset_version(): continue if ("No suitable kernel definition found for " "op Cast(9)") in str(e): # too old onnxruntime continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format(opv, onx)) from e res_out = ort.run(None, {'input': X_test}) assert len(res_out) == 1 res = res_out[0] assert_almost_equal(exp.ravel(), res.ravel(), decimal=4)
) from sklearn.model_selection import train_test_split from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType from skl2onnx import to_onnx from skl2onnx.proto import get_latest_tested_opset_version from skl2onnx.operator_converters.gaussian_process import (convert_kernel, convert_kernel_diag) from onnxruntime import InferenceSession, SessionOptions try: from onnxruntime import GraphOptimizationLevel except ImportError: GraphOptimizationLevel = None from onnxruntime import __version__ as ort_version from test_utils import dump_data_and_model, fit_regression_model, TARGET_OPSET _TARGET_OPSET_ = min(get_latest_tested_opset_version(), TARGET_OPSET) Xtrain_ = pd.read_csv(StringIO(""" 1.000000000000000000e+02,1.158972369426435591e+02,5.667579938823991137e-01,2.264397682069040421e-02,1.182166076334919581e-02,2.600819340784729095e-01 1.000000000000000000e+02,8.493978168996618194e+01,2.775702708579337874e-01,1.887456201351307358e-02,2.912599235354124821e-02,2.327206144705836199e-01 1.000000000000000000e+02,8.395765637241281354e+01,7.760226193410907358e-01,2.139558949508506974e-02,1.944769253403489523e-02,5.462612465817335838e-01 1.000000000000000000e+02,1.251224039142802411e+02,1.085922727328213266e+00,1.650449428041057126e-02,2.006508371199252141e-02,3.925044939686896939e-01 1.000000000000000000e+02,7.292655293041464404e+01,1.310113459857209950e+00,2.422656953481223258e-02,3.328909433367271964e-02,4.321979372794531593e-01 1.000000000000000000e+02,1.002649729946309094e+02,1.105327461462607630e+00,2.148827969317553335e-02,3.148001380372193736e-02,1.684894130082370545e-01 1.000000000000000000e+02,9.628657457451673451e+01,3.460979367851939603e-01,1.538570748635538499e-02,3.597376501128631693e-02,5.345963757636325031e-01 1.000000000000000000e+02,8.121250906502669409e+01,1.865077048426986073e+00,2.182149790268794742e-02,4.300530595437276893e-02,5.083327963416256479e-01 1.000000000000000000e+02,8.612638714481262525e+01,2.717895097207565502e-01,2.029318789405683970e-02,2.387016690377936207e-02,1.889736980423707968e-01 1.000000000000000000e+02,7.377491009582655579e+01,7.210994150180145557e-01,2.239484250704669444e-02,1.642684033674572316e-02,4.341188586319142395e-01 """.strip("\n\r ")), header=None).values
def common_test_sub_graph(self, first_input, model, options=None, cls_type=FloatTensorType, start=9): def generate_onnx_graph(opv): dtype = np.float32 if cls_type == FloatTensorType else np.float64 node = OnnxAdd(first_input, np.array([0.1], dtype=dtype), op_version=opv) lr = model() lr.fit(np.ones([10, 5]), np.arange(0, 10) % 3) out = OnnxSubEstimator(lr, node, op_version=1, options=options) if model == LogisticRegression: last = OnnxIdentity(out[1], output_names=['Y'], op_version=opv) else: last = OnnxIdentity(out, output_names=['Y'], op_version=opv) onx = last.to_onnx([('X1', cls_type((None, 5)))], outputs=[('Y', cls_type())], target_opset=opv) return onx dtype = np.float32 if cls_type == FloatTensorType else np.float64 opsets = list(range(start, TARGET_OPSET + 1)) for opv in [{'': TARGET_OPSET}] + opsets: with self.subTest(opv=opv): if isinstance(opv, dict): if opv[''] > get_latest_tested_opset_version(): continue elif (opv is not None and opv > get_latest_tested_opset_version()): continue for i, nbnode in enumerate((1, 2, 3, 100)): onx = generate_onnx_graph(opv=opv) if opv == {'': TARGET_OPSET}: for im in onx.opset_import: if im.version > TARGET_OPSET: raise AssertionError( "Wrong final opset\nopv={}\n{}".format( opv, onx)) else: for im in onx.opset_import: if im.version > opv: raise AssertionError( "Wrong final opset\nopv={}\n{}".format( opv, onx)) self.assertNotIn('zipmap', str(onx).lower()) as_string = onx.SerializeToString() try: ort = InferenceSession(as_string) except (InvalidGraph, InvalidArgument, Fail, NotImplemented) as e: if (isinstance(opv, dict) and opv[''] >= onnx_opset_version()): continue if (isinstance(opv, int) and opv >= onnx_opset_version()): continue raise AssertionError( "Unable to load opv={}\n---\n{}\n---".format( opv, onx)) from e X = (np.ones((1, 5)) * nbnode).astype(dtype) res_out = ort.run(None, {'X1': X}) assert len(res_out) == 1 res = res_out[0] if model == LogisticRegression: self.assertEqual(res.shape, (1, 3)) else: self.assertEqual(res.shape, (1, 1))