Example #1
0
    def test_onnxt_idi(self):
        idi = numpy.identity(2)
        onx = OnnxAdd('X', idi, output_names=['Y'],
                      op_version=get_opset_number_from_onnx())
        model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
                                target_opset=get_opset_number_from_onnx())

        oinf = OnnxInference(model_def)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)

        sb = model_def.SerializeToString()
        oinf = OnnxInference(sb)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)

        sb = BytesIO(model_def.SerializeToString())
        oinf = OnnxInference(sb)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)

        temp = get_temp_folder(__file__, "temp_onnxrt_idi")
        name = os.path.join(temp, "m.onnx")
        with open(name, "wb") as f:
            f.write(model_def.SerializeToString())

        oinf = OnnxInference(name)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)
Example #2
0
    def _setup_onnx(self):
        try:
            self._setup_to_onnx()
        except RuntimeError as e:
            self.estimator_onnx = None
        if self.estimator_onnx is not None:
            from onnxruntime import InferenceSession
            try:
                self.estimator_onnx_ort = InferenceSession(
                    self.estimator_onnx.SerializeToString())
            except RuntimeError as e:
                self.estimator_onnx_ort = None

            try:
                from mlprodict.onnxrt import OnnxInference
            except ImportError:
                self.estimator_onnx_pyrt = None
                return
            try:
                self.estimator_onnx_pyrt = OnnxInference(
                    self.estimator_onnx, runtime="python_compiled")
            except RuntimeError as e:
                self.estimator_onnx_pyrt = None
        else:
            self.estimator_onnx_ort = None
            self.estimator_onnx_pyrt = None
        def expect(node, inputs, outputs, name):
            ginputs = [
                make_sequence_value_info(node.input[0], TensorProto.FLOAT, []),  # pylint: disable=E1101,
                make_sequence_value_info(node.input[1], TensorProto.FLOAT, []),  # pylint: disable=E1101,
            ]
            if len(node.input) > 2:
                ginputs.append(
                    make_tensor_value_info(node.input[2], TensorProto.INT64,
                                           []),  # pylint: disable=E1101
                )
            goutputs = [
                make_sequence_value_info(node.output[0], TensorProto.FLOAT,
                                         []),  # pylint: disable=E1101,
            ]
            model_def = make_model(
                opset_imports=[make_operatorsetid('', TARGET_OPSET)],
                graph=make_graph(name=name,
                                 inputs=ginputs,
                                 outputs=goutputs,
                                 nodes=[node]))
            oinf = OnnxInference(model_def)
            got = oinf.run({n: v for n, v in zip(node.input, inputs)})
            self.assertEqual(len(got), 1)
            oseq = got['output_sequence']
            self.assertEqual(len(oseq), len(outputs))
            for e, g in zip(outputs, oseq):
                self.assertEqualArray(e, g)

            del model_def.opset_import[:]  # pylint: disable=E1101
            op_set = model_def.opset_import.add()  # pylint: disable=E1101
            op_set.domain = ''
            op_set.version = 15
            model_def.ir_version = 8
Example #4
0
 def test_onnx_inference_verbose_intermediate(self):
     iris = load_iris()
     X, y = iris.data, iris.target
     X_train, X_test, __, _ = train_test_split(X, y, random_state=11)
     clr = KMeans()
     clr.fit(X_train)
     model_def = to_onnx(clr, X_train.astype(numpy.float32))
     for runtime in ['python', 'python_compiled']:
         with self.subTest(runtime=runtime):
             oinf = OnnxInference(model_def, inplace=False)
             buf = BufferedPrint()
             got = oinf.run({'X': X_test.astype(numpy.float32)},
                            verbose=15,
                            fLOG=buf.fprint,
                            intermediate=True)
             self.assertIsInstance(got, dict)
             res = str(buf)
             self.assertIn('+kr', res)
             self.assertIn('+ki', res)
             self.assertIn('Onnx-Gemm', res)
             self.assertIn('min=', res)
             self.assertIn('max=', res)
             self.assertIn('dtype=', res)
             inp = oinf.input_names_shapes
             self.assertIsInstance(inp, list)
             inp = oinf.input_names_shapes_types
             self.assertIsInstance(inp, list)
             out = oinf.output_names_shapes
             self.assertIsInstance(out, list)
             out = oinf.output_names_shapes_types
             self.assertIsInstance(out, list)
    def __init__(self, model, dataset, norm):
        BenchPerfTest.__init__(self)
        self.model_name = model
        self.dataset_name = dataset
        self.datas = common_datasets[dataset]
        skl_model = get_model(model)
        if norm:
            if 'NB' in model:
                self.model = make_pipeline(MinMaxScaler(), skl_model)
            else:
                self.model = make_pipeline(StandardScaler(), skl_model)
        else:
            self.model = skl_model
        self.model.fit(self.datas[0], self.datas[2])
        self.data_test = self.datas[1]

        if '-cdist' in model:
            options = {id(skl_model): {'optim': 'cdist'}}
        else:
            options = None
        self.onx = to_onnx(self.model,
                           self.datas[0].astype(numpy.float32),
                           options=options,
                           target_opset=__max_supported_opset__)
        self.onx.ir_version = get_ir_version(__max_supported_opset__)
        logger = getLogger("skl2onnx")
        logger.propagate = False
        logger.disabled = True
        self.ort = InferenceSession(self.onx.SerializeToString())
        self.oinf = OnnxInference(self.onx, runtime='python')
        self.oinfc = OnnxInference(self.onx, runtime='python_compiled')
        self.output_name = self.oinf.output_names[-1]
        self.input_name = self.ort.get_inputs()[0].name
        self.model_info = analyze_model(self.model)
Example #6
0
    def test_mock_lightgbm(self):

        tree = copy.deepcopy(tree2)
        nb1 = sum(count_nodes(t['tree_structure']) for t in tree['tree_info'])
        model = MockWrappedLightGbmBoosterClassifier(tree)
        nb2 = sum(count_nodes(t['tree_structure']) for t in tree['tree_info'])
        self.assertEqual(nb1, nb2)
        self.assertEqual(nb1, 16)
        onx = to_onnx(model, initial_types=[('x', FloatTensorType([None, 4]))])
        self.assertTrue(model.visited)

        for n in onx.graph.node:
            if n.op_type != 'TreeEnsembleClassifier':
                continue
            att = n.attribute
            for k in att:
                if k.name != 'nodes_modes':
                    continue
                values = k.strings
                nbnodes = len(values)
        self.assertEqual(nbnodes, 18)

        iris = load_iris()
        X = iris.data
        X = (X * 10).astype(numpy.int32)

        oif = OnnxInference(onx)
        pred = oif.run({'x': X})
        label = pred["output_label"]
        self.assertEqual(label.shape, (X.shape[0], ))
        prob = DataFrame(pred["output_probability"]).values
        self.assertEqual(prob.shape, (X.shape[0], 2))
Example #7
0
    def test_onnxt_lrc_iris_run_node_time(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        clr = LogisticRegression(solver="liblinear")
        clr.fit(X_train, y_train)

        model_def = to_onnx(clr, X_train.astype(numpy.float32),
                            target_opset=get_opset_number_from_onnx())
        oinf = OnnxInference(model_def)
        _, mt = oinf.run({'X': X_test}, node_time=True)
        self.assertIsInstance(mt, list)
        self.assertGreater(len(mt), 1)
        self.assertIsInstance(mt[0], dict)

        rows = []

        def myprint(*args):
            rows.append(' '.join(map(str, args)))

        _, mt = oinf.run({'X': X_test}, node_time=True,
                         verbose=1, fLOG=myprint)
        self.assertIsInstance(mt, list)
        self.assertGreater(len(mt), 1)
        self.assertIsInstance(mt[0], dict)
Example #8
0
    def test_export_sklearn_kernel_dot_product_default(self):
        def kernel_call_ynone(X, sigma_0=2.):
            t_sigma_0 = py_make_float_array(py_pow(sigma_0, 2))
            K = X @ numpy.transpose(X, axes=[1, 0]) + t_sigma_0
            return K

        x = numpy.array([[1, 2], [3, 4], [5, 6]], dtype=float)
        kernel = DotProduct(sigma_0=2.)
        exp = kernel(x, None)
        got = kernel_call_ynone(x, sigma_0=2.)
        self.assertEqualArray(exp, got)

        fct = translate_fct2onnx(kernel_call_ynone,
                                 cpl=True,
                                 output_names=['Z'])

        r = fct('X', op_version=TARGET_OPSET)
        self.assertIsInstance(r, OnnxIdentity)
        inputs = {'X': x.astype(numpy.float32)}
        onnx_g = r.to_onnx(inputs)
        oinf = OnnxInference(onnx_g)
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])

        exp = kernel(x.T, None)
        got = kernel_call_ynone(x.T)
        self.assertEqualArray(exp, got)
        inputs = {'X': x.T.astype(numpy.float32)}
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])
Example #9
0
    def test_pipeline_add(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        pca = PCA(n_components=2)
        pca.fit(X)

        add = OnnxAdd('X',
                      numpy.full((1, X.shape[1]), 1, dtype=numpy.float32),
                      output_names=['Yadd'])
        onx = add.to_onnx(inputs=[('X', FloatTensorType((None, X.shape[1])))],
                          outputs=[('Yadd', FloatTensorType(
                              (None, X.shape[1])))])

        tr = OnnxTransformer(onx)
        tr.fit()

        pipe = make_pipeline(tr, LogisticRegression())
        pipe.fit(X, y)
        pred = pipe.predict(X)
        self.assertEqual(pred.shape, (150, ))
        model_onnx = to_onnx(pipe, X.astype(numpy.float32))

        oinf = OnnxInference(model_onnx)
        y1 = pipe.predict(X)
        y2 = oinf.run({'X': X.astype(numpy.float32)})
        self.assertEqual(list(y2), ['output_label', 'output_probability'])
        self.assertEqualArray(y1, y2['output_label'])
        y1 = pipe.predict_proba(X)
        probas = DataFrame(list(y2['output_probability'])).values
        self.assertEqualArray(y1, probas, decimal=5)
Example #10
0
    def test_onnxt_runtime_cdist(self):
        for metric in ['sqeuclidean', 'euclidean']:
            with self.subTest(metric=metric):
                X = numpy.array([[2, 1], [0, 1]], dtype=float)
                Y = numpy.array([[2, 1, 5], [0, 1, 3]], dtype=float).T
                Z = cdist(X, Y, metric=metric)

                onx = OnnxCDist('X',
                                'Y',
                                output_names=['Z'],
                                metric=metric,
                                op_version=TARGET_OPSET)
                model_def = onx.to_onnx(
                    {
                        'X': X.astype(numpy.float32),
                        'Y': Y.astype(numpy.float32)
                    },
                    outputs={'Z': Z.astype(numpy.float32)},
                    target_opset=TARGET_OPSET)
                self.assertIn('s: "%s"' % metric, str(model_def))
                oinf = OnnxInference(model_def)
                got = oinf.run({'X': X, 'Y': Y})
                self.assertEqual(list(sorted(got)), ['Z'])
                self.assertEqualArray(Z, got['Z'], decimal=6)

                oinfpy = OnnxInference(model_def,
                                       runtime="python",
                                       inplace=True)
                validate_python_inference(oinfpy, {
                    'X': X.astype(numpy.float32),
                    'Y': Y.astype(numpy.float32)
                },
                                          tolerance=1e-6)
        python_tested.append(OnnxCDist)
Example #11
0
    def test_onnxrt_python_DecisionTreeRegressor(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        clr = DecisionTreeRegressor()
        clr.fit(X_train, y_train)

        model_def = to_onnx(clr, X_train.astype(numpy.float32))
        oinf = OnnxInference(model_def)
        text = "\n".join(map(lambda x: str(x.ops_), oinf.sequence_))
        self.assertIn("TreeEnsembleRegressor", text)

        for i in range(0, 20):
            y = oinf.run({'X': X_test.astype(numpy.float32)[i:i + 1]})
            self.assertEqual(list(sorted(y)), ['variable'])
            lexp = clr.predict(X_test[i:i + 1])
            self.assertEqual(lexp.shape, y['variable'].shape)
            self.assertEqualArray(lexp, y['variable'])

        for i in range(0, 20):
            y = oinf.run({'X': X_test.astype(numpy.float32)[i:i + 2]})
            self.assertEqual(list(sorted(y)), ['variable'])
            lexp = clr.predict(X_test[i:i + 2])
            self.assertEqual(lexp.shape, y['variable'].shape)
            self.assertEqualArray(lexp, y['variable'])

        y = oinf.run({'X': X_test.astype(numpy.float32)})
        self.assertEqual(list(sorted(y)), ['variable'])
        lexp = clr.predict(X_test)
        self.assertEqual(lexp.shape, y['variable'].shape)
        self.assertEqualArray(lexp, y['variable'])
Example #12
0
    def test_model_knn_regressor_equal____(self):
        X, y = make_regression(  # pylint: disable=W0632
            n_samples=1000,
            n_features=100,
            random_state=42)
        X = X.astype(numpy.int64)
        X_train, X_test, y_train, _ = train_test_split(X,
                                                       y,
                                                       test_size=0.5,
                                                       random_state=42)
        model = KNeighborsRegressor(algorithm='brute',
                                    metric='manhattan').fit(X_train, y_train)
        model_onnx = convert_sklearn(
            model, 'knn',
            [('input', Int64TensorType([None, X_test.shape[1]]))])
        exp = model.predict(X_test)

        sess = OnnxInference(model_onnx)
        res = sess.run({'input': numpy.array(X_test)})['variable']

        # The conversion has discrepencies when
        # neighbours are at the exact same distance.
        maxd = 1000
        accb = numpy.abs(exp.ravel() - res.ravel()) > maxd
        ind = [i for i, a in enumerate(accb) if a == 1]
        self.assertEqual(len(ind), 0)

        accp = numpy.abs(exp - res) < maxd
        acc = numpy.sum(accp)
        ratio = acc * 1.0 / res.shape[0]
        self.assertGreater(ratio, 0.7)
        # Explainable discrepencies.
        # self.assertEqualArray(exp, res)
        self.assertEqual(numpy.squeeze(exp).shape, numpy.squeeze(res).shape)
Example #13
0
    def test_onnxt_runtime_solve(self):
        for transposed in [False, True]:
            with self.subTest(transposed=transposed):
                A = numpy.array([[2, 1], [0, 1]], dtype=float)
                Y = numpy.array([2, 1], dtype=float)
                X = solve(A, Y, transposed=transposed)

                onx = OnnxSolve('A',
                                'Y',
                                output_names=['X'],
                                transposed=transposed,
                                op_version=TARGET_OPSET)
                model_def = onx.to_onnx(
                    {
                        'A': A.astype(numpy.float32),
                        'Y': Y.astype(numpy.float32)
                    },
                    outputs={'X': X.astype(numpy.float32)},
                    target_opset=TARGET_OPSET)
                oinf = OnnxInference(model_def)
                got = oinf.run({'A': A, 'Y': Y})
                self.assertEqual(list(sorted(got)), ['X'])
                self.assertEqualArray(X, got['X'], decimal=6)

                python_tested.append(OnnxCDist)
                oinfpy = OnnxInference(model_def,
                                       runtime="python",
                                       inplace=True)
                validate_python_inference(oinfpy, {
                    'A': A.astype(numpy.float32),
                    'Y': Y.astype(numpy.float32)
                })
                python_tested.append(OnnxSolve)
Example #14
0
    def test_onnx_test_knn_transform(self):
        iris = load_iris()
        X, _ = iris.data, iris.target

        X_train, X_test = train_test_split(X, random_state=11)
        clr = NearestNeighbors(n_neighbors=3)
        clr.fit(X_train)

        for to in (10, 11, 12):
            if to > get_opset_number_from_onnx():
                break
            try:
                model_def = to_onnx(
                    clr,
                    X_train.astype(numpy.float32),
                    rewrite_ops=True,
                    options={NearestNeighbors: {
                        'largest0': False
                    }},
                    target_opset=to)
            except NameError as e:
                if "Option 'largest0' not in" in str(e):
                    continue
            oinf = OnnxInference(model_def, runtime='python')

            X_test = X_test[:3]
            y = oinf.run({'X': X_test.astype(numpy.float32)})
            dist, ind = clr.kneighbors(X_test)

            self.assertEqual(list(sorted(y)), ['distance', 'index'])
            self.assertEqualArray(ind, y['index'])
            self.assertEqualArray(dist,
                                  DataFrame(y['distance']).values,
                                  decimal=5)
Example #15
0
    def test_change_input_first_dimension(self):
        iris = load_iris()
        X, _ = iris.data, iris.target
        clr = KMeans()
        clr.fit(X)

        model_onnx = to_onnx(clr, X.astype(numpy.float32))
        oinf0 = OnnxInference(model_onnx, runtime='onnxruntime1')

        for inp in model_onnx.graph.input:
            dim = inp.type.tensor_type.shape.dim[0].dim_value
            self.assertEqual(dim, 0)
        new_model = change_input_first_dimension(model_onnx, 2)
        for inp in model_onnx.graph.input:
            dim = inp.type.tensor_type.shape.dim[0].dim_value
            self.assertEqual(dim, 0)
        for inp in new_model.graph.input:
            dim = inp.type.tensor_type.shape.dim[0].dim_value
            self.assertEqual(dim, 2)

        oinf = OnnxInference(new_model, runtime='onnxruntime1')
        self.assertRaise(lambda: oinf.run({'X': X.astype(numpy.float32)}),
                         InvalidArgument)
        res0 = oinf0.run({'X': X[:2].astype(numpy.float32)})
        res = oinf.run({'X': X[:2].astype(numpy.float32)})
        for k, v in res.items():
            self.assertEqual(v.shape[0], 2)
            self.assertEqualArray(res0[k], v)

        new_model = change_input_first_dimension(new_model, 0)
        oinf = OnnxInference(new_model, runtime='onnxruntime1')
        res = oinf.run({'X': X[:3].astype(numpy.float32)})
        for k, v in res.items():
            self.assertEqual(v.shape[0], 3)
Example #16
0
    def test_onnx_inference_so(self):
        X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 2])  # pylint: disable=E1101
        Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 2])  # pylint: disable=E1101
        Z = helper.make_tensor_value_info('Z', TensorProto.FLOAT, [None, 2])  # pylint: disable=E1101
        node_def = helper.make_node('Add', ['X', 'Y'], ['Zt'], name='Zt')
        node_def2 = helper.make_node('Add', ['X', 'Zt'], ['Z'], name='Z')
        graph_def = helper.make_graph([node_def, node_def2], 'test-model',
                                      [X, Y], [Z])
        model_def = helper.make_model(
            graph_def,
            producer_name='mlprodict',
            ir_version=6,
            producer_version='0.1',
            opset_imports=[helper.make_operatorsetid('', TARGET_OPSET)])

        for rt in ['onnxruntime1', 'onnxruntime2']:
            with self.subTest(runtime=rt):
                so = SessionOptions()
                oinf = OnnxInference(model_def,
                                     runtime_options={'session_options': so},
                                     runtime=rt)
                X = numpy.random.randn(4, 2).astype(  # pylint: disable=E1101
                    numpy.float32)  # pylint: disable=E1101
                Y = numpy.random.randn(4, 2).astype(  # pylint: disable=E1101
                    numpy.float32)  # pylint: disable=E1101
                exp = (X * 2 + Y).astype(numpy.float32)
                res = oinf.run({'X': X, 'Y': Y})
                got = res['Z']
                self.assertEqualArray(exp, got, decimal=6)
Example #17
0
    def test_validate_GradientBoostingClassifier_custom(self):
        mcl = _problems['m-cl']()
        (X, y, init_types, _, __, ___) = mcl
        X_train, X_test, y_train, _ = train_test_split(
            X, y, shuffle=True, random_state=2)
        cl = GradientBoostingClassifier(n_estimators=20)
        cl.fit(X_train, y_train)
        pred_skl = cl.predict_proba(X_test)

        model_onnx = to_onnx(cl, init_types[0][1])
        oinf = OnnxInference(model_onnx, runtime='python')
        pred_onx = oinf.run({'X': X_test.astype(numpy.float32)})
        diff = numpy.max(
            numpy.abs(pred_skl - pred_onx['output_probability'].values).ravel())
        if diff >= 1e-5:
            dd = [(numpy.max(numpy.abs(a - b)), i)
                  for i, (a, b) in enumerate(zip(pred_skl, pred_onx['output_probability'].values))]
            dd.sort(reverse=True)
            diff1 = dd[0][0]
            diff2 = dd[3][0]
            self.assertGreater(diff1, diff2)
            self.assertLesser(diff2, 1e-5)
        diff = measure_relative_difference(
            pred_skl, pred_onx['output_probability'])
        self.assertLesser(diff, 1e-5)
    def test_rt_svr_simple_test_double(self):
        fLOG(__file__,
             self._testMethodName,
             OutputPrint=__name__ == "__main__")
        logger = getLogger('skl2onnx')
        logger.disabled = True

        for nf in range(16, 50):
            with self.subTest(nf=nf):
                iris = load_iris()
                X, y = iris.data, iris.target
                X = _modify_dimension(X, nf)
                X_train, X_test, y_train, _ = train_test_split(X, y)
                clr = SVR(kernel='linear')
                clr.fit(X_train, y_train)

                x2 = X_test.astype(numpy.float64)
                onx = to_onnx(clr, x2)
                pyrun = OnnxInference(onx, runtime="python")
                res = pyrun.run({'X': x2})
                self.assertIn('variable', res)
                self.assertEqual(res['variable'].shape, (38, ))
                self.assertEqualArray(res['variable'],
                                      clr.predict(x2),
                                      decimal=2)
Example #19
0
    def test_onnxrt_python_DecisionTreeClassifier_mlabel(self):
        iris = load_iris()
        X, y_ = iris.data, iris.target
        y = numpy.zeros((y_.shape[0], 3), dtype=int)
        y[y_ == 0, 0] = 1
        y[y_ == 1, 1] = 1
        y[y_ == 2, 2] = 1
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        clr = DecisionTreeClassifier()
        clr.fit(X_train, y_train)

        model_def = to_onnx(clr, X_train.astype(numpy.float32))
        oinf = OnnxInference(model_def)
        text = "\n".join(map(lambda x: str(x.ops_), oinf.sequence_))
        self.assertIn("TreeEnsembleClassifier", text)
        y = oinf.run({'X': X_test.astype(numpy.float32)})
        self.assertEqual(list(sorted(y)),
                         ['output_label', 'output_probability'])
        exp = numpy.array(clr.predict_proba(X_test))
        exp = exp.reshape(max(exp.shape), -1)
        p = y['output_probability']
        got = pandas.DataFrame(p.values, columns=p.columns)
        self.assertEqualArray(exp, got, decimal=5)
        lexp = clr.predict(X_test)
        self.assertEqualArray(lexp, y['output_label'])
Example #20
0
    def test_random_forest_with_only_one_class(self):
        rnd = numpy.random.RandomState(4)  # pylint: disable=E1101
        ntrain = 10000
        nfeat = 30
        X_train = numpy.empty((ntrain, nfeat)).astype(numpy.float32)
        X_train[:, :] = rnd.rand(ntrain, nfeat)[:, :]
        eps = rnd.rand(ntrain) - 0.5
        y_train_f = X_train.sum(axis=1) + eps
        y_train = (y_train_f > 12).astype(numpy.int64)
        y_train[y_train_f > 15] = 2
        y_train[y_train_f < 10] = 3
        y_train[:] = 2

        rf = RandomForestClassifier(max_depth=2, n_estimators=80, n_jobs=4)
        rf.fit(X_train, y_train)
        onx = to_onnx(rf, X_train[:1], options={id(rf): {'zipmap': False}})

        for rv in [3, 2, 1]:
            oinf = OnnxInference(onx)
            oinf.sequence_[0].ops_._init(  # pylint: disable=W0212
                numpy.float32, rv)

            for n in [1, 20, 100, 10000, 1, 1000, 10]:
                x = numpy.empty((n, X_train.shape[1]), dtype=numpy.float32)
                x[:, :] = rnd.rand(n, X_train.shape[1])[:, :]
                with self.subTest(version=rv, n=n):
                    y = oinf.run({'X': x})['probabilities']
                    lexp = rf.predict_proba(x)
                    self.assertEqualArray(lexp.ravel(), y, decimal=5)
Example #21
0
    def test_onnx_example_cdist_bigger(self):

        from skl2onnx.algebra.complex_functions import onnx_cdist
        data = load_iris()
        X, y = data.data, data.target
        self.assertNotEmpty(y)
        X_train = X[::2]
        # y_train = y[::2]
        X_test = X[1::2]
        # y_test = y[1::2]
        onx = OnnxIdentity(onnx_cdist(OnnxIdentity('X', op_version=TARGET_OPSET), X_train.astype(numpy.float32),
                                      metric="euclidean", dtype=numpy.float32,
                                      op_version=TARGET_OPSET),
                           output_names=['Y'],
                           op_version=TARGET_OPSET)
        final = onx.to_onnx(inputs=[('X', FloatTensorType([None, None]))],
                            outputs=[('Y', FloatTensorType())],
                            target_opset=TARGET_OPSET)

        oinf = OnnxInference(final, runtime="python")
        res = oinf.run({'X': X_train.astype(numpy.float32)})['Y']
        exp = scipy_cdist(X_train, X_train, metric="euclidean")
        self.assertEqualArray(exp, res, decimal=6)
        res = oinf.run({'X': X_test.astype(numpy.float32)})['Y']
        exp = scipy_cdist(X_test, X_train, metric="euclidean")
        self.assertEqualArray(exp, res, decimal=6)
Example #22
0
    def test_onnx_shaker(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, _ = train_test_split(X,
                                                       y,
                                                       random_state=1,
                                                       shuffle=True)
        clr = GradientBoostingClassifier(n_estimators=20)
        clr.fit(X_train, y_train)
        exp = clr.predict_proba(X_test)[:, 2]

        def output_fct(res):
            val = res['output_probability'].values
            return val[:, 2]

        model_def = to_onnx(clr, X_train.astype(numpy.float32))
        oinf = OnnxInference(model_def)
        inputs = {'X': X_test}
        res1 = output_fct(oinf.run({'X': X_test.astype(numpy.float32)}))
        shaked = onnx_shaker(oinf,
                             inputs,
                             dtype=numpy.float32,
                             n=100,
                             output_fct=output_fct,
                             force=2)
        delta1 = numpy.max(shaked.max(axis=1) - shaked.min(axis=1))
        deltae = numpy.max(numpy.abs(res1 - exp))
        self.assertLesser(deltae, delta1 * 2)
Example #23
0
 def test_code_add_transpose(self):
     idi = numpy.identity(2, dtype=numpy.float32)
     onx = OnnxTranspose(OnnxAdd('X',
                                 idi,
                                 op_version=get_opset_number_from_onnx()),
                         output_names=['Y'],
                         op_version=get_opset_number_from_onnx())
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     oinf = OnnxInference(model_def, runtime='python')
     res = oinf.to_python(inline=False)
     self.assertNotEmpty(res)
     self.assertIsInstance(res, dict)
     self.assertEqual(len(res), 2)
     self.assertIn('onnx_pyrt_Ad_Addcst.pkl', res)
     self.assertIn('onnx_pyrt_main.py', res)
     cd = res['onnx_pyrt_main.py']
     self.assertIn('def pyrt_Add(X, Ad_Addcst):', cd)
     self.assertIn('def run(self, X):', cd)
     # inline
     temp = get_temp_folder(__file__, "temp_code_add_transpose")
     res = oinf.to_python(inline=True, dest=temp)
     self.assertNotEmpty(res)
     name = os.path.join(temp, 'onnx_pyrt_main.py')
     self.assertExists(name)
     # test code
     test_code = """
         X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)
         oinf = OnnxPythonInference()
         Y = oinf.run(X)
         print(Y)
         """
     X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)
     exp = oinf.run({'X': X})['Y']
     sexp = str(exp)
     self.auto_test_script(name, test_code, sexp)
Example #24
0
    def test_onnxt_iris_adaboost_classifier_lr(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, __ = train_test_split(X,
                                                        y,
                                                        random_state=11,
                                                        test_size=0.8)
        clr = AdaBoostClassifier(base_estimator=LogisticRegression(
            solver='liblinear', random_state=42),
                                 n_estimators=3,
                                 algorithm='SAMME',
                                 random_state=42)
        clr.fit(X_train, y_train)
        X_test = X_test.astype(numpy.float32)
        X_test = numpy.vstack([X_test[:3], X_test[-3:]])
        res0 = clr.predict(X_test).astype(numpy.float32)
        resp = clr.predict_proba(X_test).astype(numpy.float32)

        model_def = to_onnx(clr, X_train.astype(numpy.float32))

        oinf = OnnxInference(model_def, runtime='python')
        res1 = oinf.run({'X': X_test})
        probs = DataFrame(res1['output_probability']).values
        try:
            self.assertEqualArray(resp, probs)
        except AssertionError as e:
            raise RuntimeError("Issue\n{}\n-----\n{}".format(e,
                                                             model_def)) from e
        self.assertEqualArray(res0, res1['output_label'].ravel())
Example #25
0
    def test_export_sklearn_kernel_exp_sine_squared(self):

        x = numpy.array([[1, 2], [3, 4]], dtype=float)

        kernel = ExpSineSquared(length_scale=1.2, periodicity=1.1)

        def kernel_call_ynone(X, length_scale=1.2, periodicity=1.1, pi=3.141592653589793):
            dists = squareform_pdist(X, metric='euclidean')

            t_pi = py_make_float_array(pi)
            t_periodicity = py_make_float_array(periodicity)
            arg = dists / t_periodicity * t_pi

            sin_of_arg = numpy.sin(arg)

            t_2 = py_make_float_array(2)
            t__2 = py_make_float_array(-2)
            t_length_scale = py_make_float_array(length_scale)

            K = numpy.exp((sin_of_arg / t_length_scale) ** t_2 * t__2)
            return K

        exp = kernel(x, None)
        got = kernel_call_ynone(x)
        self.assertEqualArray(exp, got)
        context = {'numpy.sin': numpy.sin, 'numpy.exp': numpy.exp,
                   'numpy_pi': numpy.pi, 'squareform_pdist': 'squareform_pdist',
                   'py_make_float_array': py_make_float_array}

        onnx_code = translate_fct2onnx(kernel_call_ynone, context=context,
                                       output_names=['Z'])
        self.assertIn(
            "X, length_scale=1.2, periodicity=1.1, pi=3.14159", onnx_code)
        self.assertIn("-2", onnx_code)
        self.assertIn('metric="euclidean"', onnx_code)

        from skl2onnx.algebra.onnx_ops import (  # pylint: disable=E0611,E0401
            OnnxAdd, OnnxSin, OnnxMul, OnnxPow, OnnxDiv, OnnxExp
        )
        from skl2onnx.algebra.complex_functions import onnx_squareform_pdist
        ctx = {'OnnxAdd': OnnxAdd, 'OnnxPow': OnnxPow,
               'OnnxSin': OnnxSin, 'OnnxDiv': OnnxDiv,
               'OnnxMul': OnnxMul, 'OnnxIdentity': OnnxIdentity,
               'OnnxExp': OnnxExp, 'numpy': numpy,
               'onnx_squareform_pdist': onnx_squareform_pdist,
               'py_make_float_array': py_make_float_array}

        fct = translate_fct2onnx(kernel_call_ynone, context=context,
                                 cpl=True, context_cpl=ctx,
                                 output_names=['Z'], dtype=numpy.float32)

        r = fct('X')
        self.assertIsInstance(r, OnnxIdentity)

        inputs = {'X': x.astype(numpy.float32)}
        onnx_g = r.to_onnx(inputs)
        oinf = OnnxInference(onnx_g)
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])
Example #26
0
    def test_many_2(self):
        m1 = numpy.arange(2 * 2 * 2).reshape((2, 2, 2)) + 10
        m2 = numpy.arange(4).reshape((2, 2)) + 100

        res = []
        for p1 in itertools.permutations(list("abc")):
            for p2 in itertools.permutations(list("cd")):
                for i in [1, 2]:
                    for j in [0, 1]:
                        sp1 = "".join(p1)
                        sp2 = "".join(p2)
                        if len(set([sp1[0], sp1[i], sp2[j]])) != 3:
                            continue
                        equation = "%s,%s->%s%s%s" % (sp1, sp2, sp1[0], sp1[i],
                                                      sp2[j])
                        try:
                            r = numpy.einsum(equation, m1, m2)
                            res.append((equation, r))
                        except ValueError:
                            # Not viable equation.
                            continue

        for i, (eq, exp) in enumerate(res):
            with self.subTest(equation=eq, index=i, total=len(res)):
                verbose = 12 if eq == ',abc,dc->acd' else 0
                if verbose:
                    print(
                        '\n########################################clean=False'
                    )
                    print("#########0", eq)
                seq = decompose_einsum_equation(eq,
                                                m1.shape,
                                                m2.shape,
                                                verbose=verbose)
                res = apply_einsum_sequence(seq, m1, m2, verbose=verbose)
                self.assertEqualArray(exp, res)

                if verbose:
                    print(
                        '\n########################################clean=True')
                    print("#########1", eq)
                seq = decompose_einsum_equation(eq,
                                                m1.shape,
                                                m2.shape,
                                                strategy='numpy',
                                                clean=True,
                                                verbose=verbose)
                res = apply_einsum_sequence(seq, m1, m2, verbose=verbose)
                self.assertEqualArray(exp, res)
                onx = seq.to_onnx('Y', 'X1', 'X2', dtype=numpy.float32)
                oinf = OnnxInference(onx)
                res2 = oinf.run(
                    {
                        'X1': m1.astype(numpy.float32),
                        'X2': m2.astype(numpy.float32)
                    },
                    verbose=verbose,
                    fLOG=print)
                self.assertEqualArray(exp, res2['Y'])
 def test_onnxt_json(self):
     idi = numpy.identity(2)
     idi2 = numpy.identity(2) * 2
     onx = OnnxAdd(OnnxAdd('X', idi), idi2, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     oinf = OnnxInference(model_def)
     js = oinf.to_json()
     self.assertIn('"initializers": {', js)
 def test_onnxt_add(self):
     idi = numpy.identity(2)
     onx = OnnxAdd('X', idi, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     oinf = OnnxInference(model_def, runtime="python")
     res = oinf.switch_initializers_dtype()
     self.assertEqual(len(res), 1)
     self.assertEqual(res[0][:4], ('pass1', '+', 'init', 'Ad_Addcst'))
 def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct):
     onx = onnx_cl('X', output_names=['Y'])
     X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
     model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
     oinf = OnnxInference(model_def)
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
def measure_onnx_runtime(model,
                         xt,
                         repeat=REPEAT,
                         number=NUMBER,
                         verbose=True):
    if verbose:
        print(model.__class__.__name__)

    res = measure_time(model.predict_proba,
                       xt,
                       repeat=repeat,
                       number=number,
                       div_by_number=True,
                       first_run=True)
    res['model'], res['runtime'] = model.__class__.__name__, 'INNER'
    res['N'] = X_test.shape[0]
    res["max_depth"] = max_depth
    res["n_estimators"] = n_estimators
    res["n_features"] = n_features
    if verbose:
        pprint(res)
    yield res

    onx = to_onnx(model, X_train[:1], options={id(model): {'zipmap': False}})

    oinf = OnnxInference(onx)
    res = measure_time(lambda x: oinf.run({'X': x}),
                       xt,
                       repeat=repeat,
                       number=number,
                       div_by_number=True,
                       first_run=True)
    res['model'], res['runtime'] = model.__class__.__name__, 'NPY/C++'
    res['N'] = X_test.shape[0]
    res['size'] = len(onx.SerializeToString())
    res["max_depth"] = max_depth
    res["n_estimators"] = n_estimators
    res["n_features"] = n_features
    if verbose:
        pprint(res)
    yield res

    sess = InferenceSession(onx.SerializeToString())
    res = measure_time(lambda x: sess.run(None, {'X': x}),
                       xt,
                       repeat=repeat,
                       number=number,
                       div_by_number=True,
                       first_run=True)
    res['model'], res['runtime'] = model.__class__.__name__, 'ORT'
    res['N'] = X_test.shape[0]
    res['size'] = len(onx.SerializeToString())
    res["max_depth"] = max_depth
    res["n_estimators"] = n_estimators
    res["n_features"] = n_features
    if verbose:
        pprint(res)
    yield res