def test_kernel_exp_sine_squared(self):
     from skl2onnx.operator_converters.gaussian_process import convert_kernel
     ker = ExpSineSquared()
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                          op_version=10)
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))])
     sess = OnnxInference(model_onnx)
     Xtest_ = numpy.arange(6).reshape((3, 2))
     res = sess.run({'X': Xtest_.astype(numpy.float32)})
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2, decimal=5)
 def test_speedup_regressor64_onnx_numpy(self):
     data = load_iris()
     X, y = data.data, data.target
     spd = OnnxSpeedupRegressor(LinearRegression(),
                                target_opset=self.opset(),
                                enforce_float32=False,
                                runtime='numpy')
     spd.fit(X, y)
     expected = spd.predict(X)
     onx = to_onnx(spd, X[:1])
     oinf = OnnxInference(onx)
     got = oinf.run({'X': X})['variable']
     self.assertEqualArray(expected, got)
Beispiel #3
0
    def test_onnxrt_label_encoder_floats(self):

        corpus = numpy.array([0.1, 0.2, 0.3, 0.2], dtype=numpy.float32)
        op = OnnxLabelEncoder('text',
                              op_version=TARGET_OPSET,
                              keys_floats=[0.1, 0.2, 0.3],
                              values_floats=[0.3, 0.4, 0.5],
                              output_names=['out'])
        onx = op.to_onnx(inputs=[('text', FloatTensorType())])
        oinf = OnnxInference(onx)
        res = oinf.run({'text': corpus})
        self.assertEqualArray(
            res['out'], numpy.array([0.3, 0.4, 0.5, 0.4], dtype=numpy.float32))
    def test_pdist(self):
        from skl2onnx.algebra.complex_functions import onnx_squareform_pdist
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('input', 'input')
        cdist = onnx_squareform_pdist(cop, dtype=numpy.float32)
        cop2 = OnnxIdentity(cdist, output_names=['cdist'])

        model_def = cop2.to_onnx({'input': FloatTensorType()},
                                 outputs=[('cdist', FloatTensorType())])

        sess = OnnxInference(model_def)
        res = sess.run({'input': x})
        self.assertEqual(list(res.keys()), ['cdist'])

        exp = squareform(pdist(x * 2, metric="sqeuclidean"))
        self.assertEqualArray(exp, res['cdist'])

        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (2, 3))
        res = sess.run({'input': x})
        self.assertEqual(list(res.keys()), ['cdist'])
Beispiel #5
0
 def test_onnxrt_python_count_vectorizer(self):
     corpus = numpy.array([
         'This is the first document.',
         'This document is the second document.',
         'And this is the third one.', 'Is this the first document?'
     ])
     vect = CountVectorizer()
     vect.fit(corpus)
     exp = vect.transform(corpus)
     onx = to_onnx(vect, corpus, target_opset=TARGET_OPSET)
     oinf = OnnxInference(onx)
     got = oinf.run({'X': corpus})
     self.assertEqualArray(exp.todense(), got['variable'])
    def test_onnxrt_python_Binarizer(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        clr = Binarizer()
        clr.fit(X_train, y_train)

        model_def = to_onnx(clr, X_train.astype(numpy.float32))
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X_test})
        self.assertEqual(list(sorted(got)), ['variable'])
        exp = clr.transform(X_test)
        self.assertEqualArray(exp, got['variable'], decimal=6)
    def test_onnxt_runtime_add(self):
        idi = numpy.identity(2, dtype=numpy.float32)
        onx = OnnxAdd('X', idi, output_names=['Y1'],
                      op_version=TARGET_OPSET)
        model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
        X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)

        model_def.ir_version = get_ir_version(TARGET_OPSET)
        oinf = OnnxInference(model_def, runtime='onnxruntime1')
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y1'])
        self.assertEqualArray(idi + X, got['Y1'], decimal=6)

        oinf = OnnxInference(model_def, runtime='onnxruntime2')
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y1'])
        self.assertEqualArray(idi + X, got['Y1'], decimal=6)

        oinf = OnnxInference(model_def, runtime='onnxruntime1', inplace=False)
        got = oinf.run({'X': X}, intermediate=True)
        self.assertEqual(list(sorted(got)), ['Ad_Addcst', 'X', 'Y1'])
        self.assertEqualArray(idi + X, got['Y1'], decimal=6)
 def test_speedup_transform64_onnx_numba(self):
     data = load_iris()
     X, _ = data.data, data.target
     spd = OnnxSpeedupTransformer(PCA(),
                                  target_opset=self.opset(),
                                  enforce_float32=False,
                                  runtime='numba')
     spd.fit(X)
     expected = spd.transform(X)
     onx = to_onnx(spd, X[:1])
     oinf = OnnxInference(onx)
     got = oinf.run({'X': X})['variable']
     self.assertEqualArray(expected, got)
 def test_kernel_rbf1(self):
     from skl2onnx.operator_converters.gaussian_process import convert_kernel
     ker = RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                          op_version=10)
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))])
     sess = OnnxInference(model_onnx)
     Xtest_ = numpy.arange(6).reshape((3, 2))
     res = sess.run({'X': Xtest_.astype(numpy.float32)})
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2)
    def test_kernel_ker2_def_ort1(self):
        ker = Sum(
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
                                       length_scale_bounds=(1e-3, 1e3)),
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
                                       length_scale_bounds=(1e-3, 1e3))
        )
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32)
        model_onnx = onx.to_onnx(
            inputs=[('X', FloatTensorType([None, None]))],
            outputs=[('Y', FloatTensorType([None, None]))])
        sess = OnnxInference(model_onnx.SerializeToString(),
                             runtime="onnxruntime1")

        rows = []

        def myprint(*args, **kwargs):
            rows.append(" ".join(map(str, args)))

        res = sess.run({'X': Xtest_.astype(numpy.float32)},
                       intermediate=True, verbose=1, fLOG=myprint)
        self.assertGreater(len(rows), 2)
        m1 = res['Y']
        self.assertNotEmpty(m1)
        self.assertGreater(len(res), 2)
        # m2 = ker(Xtest_)
        # self.assertEqualArray(m1, m2, decimal=5)

        cpu = OnnxInference(model_onnx.SerializeToString())
        sbs = side_by_side_by_values(
            [cpu, sess], inputs={'X': Xtest_.astype(numpy.float32)})
        self.assertGreater(len(sbs), 2)
        self.assertIsInstance(sbs, list)
        self.assertIsInstance(sbs[0], dict)
        self.assertIn('step', sbs[0])
        self.assertIn('step', sbs[1])
        self.assertIn('metric', sbs[0])
        self.assertIn('metric', sbs[1])
        self.assertIn('cmp', sbs[0])
        self.assertIn('cmp', sbs[1])

        sess3 = OnnxInference(model_onnx.SerializeToString(),
                              runtime="onnxruntime2")
        sbs = side_by_side_by_values(
            [cpu, sess, sess3], inputs={'X': Xtest_.astype(numpy.float32)})
        self.assertNotEmpty(sbs)

        inputs = {'X': Xtest_.astype(numpy.float32)}
        sbs = side_by_side_by_values(
            [(cpu, inputs), (sess, inputs), (sess3, inputs)])
        self.assertNotEmpty(sbs)
    def test_export_sklearn_kernel_rational_quadratic(self):
        def kernel_rational_quadratic_none(X, length_scale=1.0, alpha=2.0):
            dists = squareform_pdist(X, metric='sqeuclidean')
            cst = py_pow(length_scale, 2)
            cst = py_mul(cst, alpha, 2)
            t_cst = py_make_float_array(cst)
            tmp = dists / t_cst
            t_one = py_make_float_array(1)
            base = tmp + t_one
            t_alpha = py_make_float_array(py_opp(alpha))
            K = numpy.power(base, t_alpha)
            return K

        x = numpy.array([[1, 2], [3, 4], [5, 6]], dtype=float)
        kernel = RationalQuadratic(length_scale=1.0, alpha=2.0)
        exp = kernel(x, None)
        got = kernel_rational_quadratic_none(x, length_scale=1.0, alpha=2.0)
        self.assertEqualArray(exp, got)

        fct = translate_fct2onnx(kernel_rational_quadratic_none,
                                 cpl=True,
                                 output_names=['Z'],
                                 dtype=numpy.float32)

        r = fct('X', dtype=numpy.float32)
        self.assertIsInstance(r, OnnxIdentity)
        inputs = {'X': x.astype(numpy.float32)}
        onnx_g = r.to_onnx(inputs)
        oinf = OnnxInference(onnx_g)
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])

        exp = kernel(x.T, None)
        got = kernel_rational_quadratic_none(x.T)
        self.assertEqualArray(exp, got)
        inputs = {'X': x.T.astype(numpy.float32)}
        res = oinf.run(inputs)
        self.assertEqualArray(exp, res['Z'])
Beispiel #12
0
    def test_insert_results_into_onnx_init(self):
        X = helper.make_tensor_value_info('X', TensorProto.FLOAT, None)  # pylint: disable=E1101
        Z = helper.make_tensor_value_info('Z', TensorProto.INT64, None)  # pylint: disable=E1101
        node_def = helper.make_node('Shape', ['X'], ['Z0'], name='Zt')
        node_def1 = helper.make_node('Identity', ['Z0'], ['Z'], name='Zti')
        graph_def = helper.make_graph([node_def, node_def1], 'test-model', [X],
                                      [Z])
        model_def = helper.make_model(
            graph_def,
            producer_name='mlprodict',
            ir_version=7,
            producer_version='0.1',
            opset_imports=[helper.make_operatorsetid('', 13)])

        new_graph = insert_results_into_onnx(
            model_def, {'Z0': numpy.array([[29, 39]], dtype=numpy.int64)},
            as_parameter=False,
            param_name=lambda k: k)
        s_graph = str(new_graph)
        self.assertIn('domain: "DEBUG"', s_graph)
        self.assertIn('op_type: "DEBUG"', s_graph)
        self.assertRaise(
            lambda: insert_results_into_onnx(
                model_def, {'Zt': numpy.array([29, 39], dtype=numpy.int64)}),
            RuntimeError)
        self.assertRaise(
            lambda: insert_results_into_onnx(
                model_def, {'X': numpy.array([29, 39], dtype=numpy.int64)}),
            NotImplementedError)
        # with open('debug.onnx', 'wb') as f:
        #     f.write(new_graph.SerializeToString())

        oinf1 = OnnxInference(model_def)
        oinf2 = OnnxInference(new_graph)
        cst = numpy.array([[5.6, 7.8]])
        self.assertEqualArray(
            oinf1.run({'X': cst})['Z'],
            oinf2.run({'X': cst})['Z'])
    def test_onnx_remove_redundant(self):
        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X',
                      numpy.array([1], dtype=dtype),
                      op_version=get_opset_number_from_onnx())
        cop2 = OnnxAdd('X',
                       numpy.array([1], dtype=dtype),
                       op_version=get_opset_number_from_onnx())
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=get_opset_number_from_onnx())
        cop4 = OnnxSub(OnnxMul(cop,
                               cop3,
                               op_version=get_opset_number_from_onnx()),
                       cop2,
                       output_names=['final'],
                       op_version=get_opset_number_from_onnx())
        model_def = cop4.to_onnx({'X': x})
        stats = onnx_statistics(model_def, optim=True)
        c1 = model_def.SerializeToString()
        new_model = onnx_remove_node_redundant(model_def, max_hash_size=10)
        c2 = model_def.SerializeToString()
        self.assertEqual(c1, c2)
        stats2 = onnx_statistics(model_def, optim=True)
        stats3 = onnx_statistics(new_model, optim=False)
        self.assertEqual(stats['ninits'], 2)
        self.assertEqual(stats2['ninits'], 2)
        self.assertEqual(stats3['ninits'], 2)
        self.assertEqual(stats2['nnodes'], 6)
        self.assertEqual(stats3['nnodes'], 6)
        oinf1 = OnnxInference(model_def)
        y1 = oinf1.run({'X': x})

        oinf2 = OnnxInference(new_model)
        y2 = oinf2.run({'X': x})
        self.assertEqualArray(y1['final'], y2['final'])
Beispiel #14
0
    def test_cpu_conv_init(self):
        x = numpy.random.rand(1, 96, 56, 56).astype(numpy.float32)
        W = numpy.random.rand(24, 96, 1, 1).astype(numpy.float32)

        onx = OnnxConv('X',
                       'W',
                       output_names=['Y'],
                       auto_pad='NOTSET',
                       group=1,
                       dilations=[1, 1],
                       kernel_shape=[1, 1],
                       pads=[0, 0, 0, 0],
                       strides=[1, 1],
                       op_version=get_opset_number_from_onnx())
        model_def = onx.to_onnx(
            {
                'X': x.astype(numpy.float32),
                'W': W.astype(numpy.float32)
            },
            target_opset=get_opset_number_from_onnx())
        oinf = OnnxInference(model_def)
        oinfrt = OnnxInference(model_def, runtime='onnxruntime1')
        for _ in range(0, 3):
            x = numpy.random.rand(1, 96, 56, 56).astype(numpy.float32)
            W = numpy.random.rand(24, 96, 1, 1).astype(numpy.float32)
            got = oinf.run({'X': x, 'W': W})
            gotrt = oinfrt.run({'X': x, 'W': W})
            diff = list(numpy.abs((gotrt['Y'] - got['Y']).ravel()))
            sdiff = list(sorted(diff))
            if sdiff[-1] > 1e-5:
                raise AssertionError("runtimes disagree {}".format(sdiff[-5:]))
            for ii in range(len(diff)):  # pylint: disable=C0200
                if numpy.isnan(diff[ii]):
                    raise AssertionError(
                        "runtimes disagree about nan {}: {} # {} ? {}".format(
                            ii, diff[ii], gotrt['Y'].ravel()[ii],
                            got['Y'].ravel()[ii]))
            self.assertEqualArray(gotrt['Y'], got['Y'], decimal=5)
Beispiel #15
0
    def test_onnx_inference_name_confusion(self):
        X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 2])  # pylint: disable=E1101
        Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 2])  # pylint: disable=E1101
        Z = helper.make_tensor_value_info('Z', TensorProto.FLOAT, [None, 2])  # pylint: disable=E1101
        node_def = helper.make_node('Add', ['X', 'Y'], ['Zt'], name='Zt')
        node_def2 = helper.make_node('Add', ['X', 'Zt'], ['Z'], name='Z')
        graph_def = helper.make_graph([node_def, node_def2], 'test-model',
                                      [X, Y], [Z])
        model_def = helper.make_model(graph_def,
                                      producer_name='mlprodict',
                                      ir_version=6,
                                      producer_version='0.1')
        model_def = insert_node(
            model_def,
            node='Z',
            op_type='Cast',
            to=TensorProto.INT64,  # pylint: disable=E1101
            name='castop')
        self.assertIn('castop', str(model_def))

        oinf = OnnxInference(model_def)
        X = (numpy.random.randn(4, 2) * 100000).astype(  # pylint: disable=E1101
            numpy.float32)
        Y = (numpy.random.randn(4, 2) * 100000).astype(  # pylint: disable=E1101
            numpy.float32)
        exp = (X * 2 + Y).astype(numpy.float32)
        self.assertRaise(lambda: oinf.run({'X': X, 'Y': Y}), RuntimeTypeError)

        model_def = insert_node(
            model_def,
            node='Z',
            op_type='Cast',
            to=TensorProto.FLOAT,  # pylint: disable=E1101
            name='castop2')
        oinf = OnnxInference(model_def)
        res = oinf.run({'X': X, 'Y': Y})
        got = res['Z']
        self.assertEqualArray(exp / 100000, got / 100000, decimal=5)
    def test_onnxt_gpr_iris_cdist(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, _, y_train, __ = train_test_split(X, y, random_state=11)
        clr = GaussianProcessRegressor(ExpSineSquared(), alpha=20.)
        clr.fit(X_train, y_train)

        model_def = to_onnx(
            clr,
            X_train,
            options={GaussianProcessRegressor: {
                'optim': 'cdist'
            }})
        oinf = OnnxInference(model_def)
        res1 = oinf.run({'X': X_train})
        new_model = onnx_optimisations(model_def)
        oinf = OnnxInference(new_model)
        res2 = oinf.run({'X': X_train})
        self.assertEqualArray(res1['GPmean'], res2['GPmean'])
        self.assertIn('op_type: "CDist"', str(new_model))
        dot = oinf.to_dot()
        self.assertIn('''label="CDist\\n(kgpd_CDist)\\nmetric=b'euclidean'"''',
                      dot)
Beispiel #17
0
    def test_onnxt_iris_adaboost_regressor_lr_ds2_10_int(self):
        clr = AdaBoostRegressor(n_estimators=5)
        model, X_test = fit_regression_model(clr, is_int=True)

        itypes = [('X', Int64TensorType([None, X_test.shape[1]]))]
        model_def = convert_sklearn(
            model, initial_types=itypes, target_opset=10)
        X_test = X_test.astype(numpy.float32)
        oinf = OnnxInference(model_def)
        seq = oinf.display_sequence()
        self.assertNotEmpty(seq)
        res0 = clr.predict(X_test).astype(numpy.float32)
        res1 = oinf.run({'X': X_test})
        self.assertEqualArray(res0, res1['variable'].ravel(), decimal=5)
Beispiel #18
0
    def onnxrt_python_RandomForestRegressor_dtype(
            self, dtype, n=37, full=False, use_hist=False, ntrees=10,
            runtime='python'):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, _ = train_test_split(
            X, y, random_state=11 if not full else 13)
        X_test = X_test.astype(dtype)
        if use_hist:
            if full:
                clr = HistGradientBoostingRegressor()
            else:
                clr = HistGradientBoostingRegressor(
                    max_iter=ntrees, max_depth=4)
        else:
            if full:
                clr = RandomForestRegressor(n_jobs=1)
            else:
                clr = RandomForestRegressor(
                    n_estimators=ntrees, n_jobs=1, max_depth=4)

        clr.fit(X_train, y_train)

        model_def = to_onnx(clr, X_train.astype(dtype),
                            rewrite_ops=True)
        oinf = OnnxInference(model_def)

        text = "\n".join(map(lambda x: str(x.ops_), oinf.sequence_))
        self.assertIn("TreeEnsembleRegressor", text)
        if full:
            n = 34
            X_test = X_test[n:n + 5]
        else:
            n = 37
            X_test = X_test[n:n + 5]
        X_test = numpy.vstack([X_test, X_test[:1].copy() * 1.01,
                               X_test[:1].copy() * 0.99])
        y = oinf.run({'X': X_test})
        self.assertEqual(list(sorted(y)), ['variable'])
        lexp = clr.predict(X_test)
        if dtype == numpy.float32:
            self.assertEqualArray(lexp, y['variable'], decimal=5)
        else:
            try:
                self.assertEqualArray(lexp, y['variable'])
            except AssertionError as e:
                raise AssertionError(
                    "---------\n{}\n-----".format(model_def)) from e
        self.assertEqual(oinf.sequence_[0].ops_.rt_.same_mode_, True)
        self.assertNotEmpty(oinf.sequence_[0].ops_.rt_.nodes_modes_)
Beispiel #19
0
    def test_onnxrt_python_lightgbm_categorical3(self):
        from lightgbm import LGBMClassifier

        X = pandas.DataFrame(
            {"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75),  # str
             # int
             "B": numpy.random.permutation([1, 2, 3] * 100),
             # float
             "C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
             # bool
             "D": numpy.random.permutation([True, False] * 150),
             "E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
                                     ordered=True)})  # str and ordered categorical
        y = numpy.random.permutation([0, 1, 2] * 100)
        X_test = pandas.DataFrame(
            {"A": numpy.random.permutation(['a', 'b', 'e'] * 20),  # unseen category
             "B": numpy.random.permutation([1, 3] * 30),
             "C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
             "D": numpy.random.permutation([True, False] * 30),
             "E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
                                     ordered=True)})
        cat_cols_actual = ["A", "B", "C", "D"]
        X[cat_cols_actual] = X[cat_cols_actual].astype('category')
        X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
        gbm0 = LGBMClassifier().fit(X, y)
        exp = gbm0.predict(X_test, raw_scores=False)
        self.assertNotEmpty(exp)

        init_types = [('A', StringTensorType()),
                      ('B', Int64TensorType()),
                      ('C', FloatTensorType()),
                      ('D', BooleanTensorType()),
                      ('E', StringTensorType())]
        self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
                         "at most 1 input(s) is(are) supported")

        X = X[['C']].values.astype(numpy.float32)
        X_test = X_test[['C']].values.astype(numpy.float32)
        gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
        exp = gbm0.predict_proba(X_test, raw_scores=False)
        model_def = to_onnx(gbm0, X, target_opset=TARGET_OPSET)
        self.assertIn('ZipMap', str(model_def))

        oinf = OnnxInference(model_def)
        y = oinf.run({'X': X_test})
        self.assertEqual(list(sorted(y)),
                         ['output_label', 'output_probability'])
        df = pandas.DataFrame(y['output_probability'])
        self.assertEqual(df.shape, (X_test.shape[0], 3))
        self.assertEqual(exp.shape, (X_test.shape[0], 3))
 def common_test_function_cluster_embedded(self, dtype, est):
     X = numpy.random.randn(20, 2).astype(dtype)
     y = ((X.sum(axis=1) + numpy.random.randn(
          X.shape[0]).astype(numpy.float32)) >= 0).astype(numpy.int64)
     dec = AnyCustomClusterOnnx(est)
     dec.fit(X, y)
     onx = to_onnx(dec, X.astype(dtype))
     oinf = OnnxInference(onx)
     exp = dec.predict(X)  # pylint: disable=E1101
     prob = dec.transform(X)  # pylint: disable=E1101
     got = oinf.run({'X': X})
     self.assertEqual(dtype, prob.dtype)
     self.assertEqualArray(exp, got['label'].ravel())
     self.assertEqualArray(prob, got['scores'])
    def test_kernel_ker2_def(self):
        ker = Sum(
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
                                       length_scale_bounds=(1e-3, 1e3)),
            CK(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
                                       length_scale_bounds=(1e-3, 1e3))
        )
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                             op_version=get_opset_number_from_onnx())
        model_onnx = onx.to_onnx(
            inputs=[('X', FloatTensorType([None, None]))],
            outputs=[('Y', FloatTensorType([None, None]))],
            target_opset=get_opset_number_from_onnx())
        sess = OnnxInference(model_onnx.SerializeToString())

        res = sess.run({'X': Xtest_.astype(numpy.float32)})
        m1 = res['Y']
        m2 = ker(Xtest_)
        self.assertEqualArray(m1, m2)

        res = sess.run({'X': Xtest_.astype(numpy.float32)}, intermediate=True)
        self.assertGreater(len(res), 30)
        self.assertIsInstance(res, dict)
Beispiel #22
0
 def test_speedup_kmeans64_onnx(self):
     data = load_iris()
     X, y = data.data, data.target
     spd = OnnxSpeedupCluster(KMeans(n_clusters=3),
                              target_opset=self.opset(),
                              enforce_float32=False)
     spd.fit(X, y)
     expected_label = spd.predict(X)
     expected_score = spd.transform(X)
     onx = to_onnx(spd, X[:1])
     oinf = OnnxInference(onx)
     got = oinf.run({'X': X})
     self.assertEqualArray(expected_score, got['scores'])
     self.assertEqualArray(expected_label, got['label'])
 def test_dict_vectorizer(self):
     model = DictVectorizer()
     data = [{"amy": 1.0, "chin": 200.0}, {"nice": 3.0, "amy": 1.0}]
     model.fit_transform(data)
     exp = model.transform(data)
     model_def = convert_sklearn(
         model, "dictionary vectorizer",
         [("input",
           DictionaryType(StringTensorType([1]), FloatTensorType([1])))])
     oinf = OnnxInference(model_def)
     array_data = numpy.array(data)
     got = oinf.run({'input': array_data})
     self.assertEqual(list(sorted(got)), ['variable'])
     self.assertEqualArray(exp.todense(), got['variable'].todense())
Beispiel #24
0
    def test_onnx_cast(self):
        OnnxCast = loadop("Cast")
        ov = OnnxCast('X', to=numpy.int64, output_names=['Y'])
        onx = ov.to_onnx(numpy.float32, numpy.float32, verbose=0)

        sub = OnnxSubOnnx(onx, 'X', output_names=['Y'])
        onx = sub.to_onnx(numpy.float32, numpy.int64, verbose=0)
        r = repr(sub)
        self.assertStartsWith('OnnxSubOnnx(..., output_name', r)

        oinf = OnnxInference(onx)
        x = numpy.array([-2.4, 2.4], dtype=numpy.float32)
        got = oinf.run({'X': x})
        self.assertEqualArray(x.astype(numpy.int64), got['Y'])
Beispiel #25
0
    def test_onnx_add(self):
        OnnxAdd = loadop("Add")
        ov = OnnxAdd('X',
                     numpy.array([2], dtype=numpy.float32),
                     output_names=['Y'])
        onx = ov.to_onnx(numpy.float32, numpy.float32, verbose=0)

        sub = OnnxSubOnnx(onx, 'X', output_names=['Y'])
        onx = sub.to_onnx(numpy.float32, numpy.float32, verbose=0)

        oinf = OnnxInference(onx)
        x = numpy.array([-2, 2], dtype=numpy.float32)
        got = oinf.run({'X': x})
        self.assertEqualArray(x + 2, got['Y'])
 def test_onnxt_knnimputer(self):
     x_train = numpy.array([[1, 2, numpy.nan, 12], [3, numpy.nan, 3, 13],
                            [1, 4, numpy.nan, 1], [numpy.nan, 4, 3, 12]],
                           dtype=numpy.float32)
     x_test = numpy.array(
         [[1.3, 2.4, numpy.nan, 1], [-1.3, numpy.nan, 3.1, numpy.nan]],
         dtype=numpy.float32)
     kn = KNNImputer(n_neighbors=3, metric='nan_euclidean')
     kn.fit(x_train)
     model_def = to_onnx(kn, x_train)
     oinf = OnnxInference(model_def, runtime='python')
     got = oinf.run({'X': x_test})
     self.assertEqual(list(sorted(got)), ['variable'])
     self.assertEqualArray(kn.transform(x_test), got['variable'], decimal=6)
 def test_function_cluster(self):
     X = numpy.random.randn(20, 2).astype(numpy.float32)
     y = ((X.sum(axis=1) +
           numpy.random.randn(X.shape[0]).astype(numpy.float32)) >=
          0).astype(numpy.int64)
     dec = CustomCluster()
     dec.fit(X, y)
     onx = to_onnx(dec, X.astype(numpy.float32))
     oinf = OnnxInference(onx)
     exp = dec.predict(X)
     dist = dec.transform(X)
     got = oinf.run({'X': X})
     self.assertEqualArray(exp, got['label'].ravel())
     self.assertEqualArray(dist, got['scores'])
    def test_onnxt_iris_adaboost_regressor_lr(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X_train, X_test, y_train, __ = train_test_split(X, y, random_state=11)
        clr = AdaBoostRegressor(base_estimator=LinearRegression(),
                                n_estimators=3)
        clr.fit(X_train, y_train)

        model_def = to_onnx(clr, X_train.astype(numpy.float32))
        X_test = X_test.astype(numpy.float32)
        oinf = OnnxInference(model_def)
        res0 = clr.predict(X_test).astype(numpy.float32)
        res1 = oinf.run({'X': X_test})
        self.assertEqualArray(res0, res1['variable'].ravel(), decimal=5)
Beispiel #29
0
 def test_speedup_classifier64_onnx_numpy(self):
     data = load_iris()
     X, y = data.data, data.target
     spd = OnnxSpeedupClassifier(
         LogisticRegression(), target_opset=self.opset(),
         enforce_float32=False, runtime='numpy')
     spd.fit(X, y)
     expected_label = spd.predict(X)
     expected_proba = spd.predict_proba(X)
     onx = to_onnx(spd, X[:1])
     oinf = OnnxInference(onx)
     got = oinf.run({'X': X})
     self.assertEqualArray(expected_proba, got['probabilities'])
     self.assertEqualArray(expected_label, got['label'])
 def test_kernel_ker12_def(self):
     ker = (Sum(CK(0.1, (1e-3, 1e3)), CK(0.1, (1e-3, 1e3)) *
                RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))))
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                          op_version=TARGET_OPSET)
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))],
         outputs=[('Y', FloatTensorType([None, None]))],
         target_opset=TARGET_OPSET)
     sess = OnnxInference(model_onnx.SerializeToString())
     res = sess.run({'X': Xtest_.astype(numpy.float32)})
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2)