def test_isolation_forest(self): isol = IsolationForest(n_estimators=3, random_state=0) data = np.array([[-1.1, -1.2], [0.3, 0.2], [0.5, 0.4], [100., 99.]], dtype=np.float32) model = isol.fit(data) model_onnx = to_onnx(model, data, target_opset={ '': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML }) self.assertIsNotNone(model_onnx) dump_data_and_model(data, model, model_onnx, basename="IsolationForest")
def test_local_outlier_factor_double(self): lof = LocalOutlierFactor(n_neighbors=2, novelty=True) data = np.array([[-1.1, -1.2], [0.3, 0.2], [0.5, 0.4], [100., 99.]], dtype=np.float64) model = lof.fit(data) model_onnx = to_onnx(model, data, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) names = [o.name for o in sess.get_outputs()] self.assertEqual(names, ['label', 'scores']) got = sess.run(None, {'X': data}) self.assertEqual(len(got), 2) expected_label = lof.predict(data) expected_decif = lof.decision_function(data) assert_almost_equal(expected_label, got[0].ravel()) assert_almost_equal(expected_decif, got[1].ravel())
def test_issue_712_multio(self): dfx = pandas.DataFrame({ 'CAT1': ['985332', '985333', '985334', '985335', '985336'], 'CAT2': ['1985332', '1985333', '1985334', '1985335', '1985336'], 'TEXT': ["abc abc", "abc def", "def ghj", "abcdef", "abc ii"] }) dfy = pandas.DataFrame({ 'REAL': [5, 6, 7, 6, 5], 'CATY': [0, 1, 0, 1, 0] }) cat_features = ['CAT1', 'CAT2'] categorical_transformer = OneHotEncoder(handle_unknown='ignore') textual_feature = 'TEXT' count_vect_transformer = Pipeline(steps=[( 'count_vect', CountVectorizer(max_df=0.8, min_df=0.05, max_features=1000))]) preprocessor = ColumnTransformer(transformers=[( 'cat_transform', categorical_transformer, cat_features ), ('count_vector', count_vect_transformer, textual_feature)]) model_RF = RandomForestClassifier(random_state=42, max_depth=50) rf_clf = Pipeline( steps=[('preprocessor', preprocessor ), ('classifier', MultiOutputClassifier(estimator=model_RF))]) rf_clf.fit(dfx, dfy) expected_label = rf_clf.predict(dfx) expected_proba = rf_clf.predict_proba(dfx) inputs = { 'CAT1': dfx['CAT1'].values.reshape((-1, 1)), 'CAT2': dfx['CAT2'].values.reshape((-1, 1)), 'TEXT': dfx['TEXT'].values.reshape((-1, 1)) } onx = to_onnx(rf_clf, dfx, target_opset=TARGET_OPSET, options={MultiOutputClassifier: { 'zipmap': False }}) sess = InferenceSession(onx.SerializeToString()) got = sess.run(None, inputs) assert_almost_equal(expected_label, got[0]) self.assertEqual(len(expected_proba), len(got[1])) for e, g in zip(expected_proba, got[1]): assert_almost_equal(e, g, decimal=5)
def test_issue_712_svc_binary0(self): for sub_model in [LinearSVC(), SVC()]: for method in ["sigmoid", "isotonic"]: with self.subTest(sub_model=sub_model, method=method): dfx = pandas.DataFrame( {'CAT1': ['985332', '985333', '985334', '985335', '985336', '985332', '985333', '985334', '985335', '985336', '985336'], 'CAT2': ['1985332', '1985333', '1985334', '1985335', '1985336', '1985332', '1985333', '1985334', '1985335', '1985336', '1985336'], 'TEXT': ["abc abc", "abc def", "def ghj", "abcdef", "abc ii", "abc abc", "abc def", "def ghj", "abcdef", "abc ii", "abc abc"]}) dfy = numpy.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]) cat_features = ['CAT1', 'CAT2'] categorical_transformer = OneHotEncoder( handle_unknown='ignore') textual_feature = 'TEXT' count_vect_transformer = Pipeline(steps=[ ('count_vect', CountVectorizer( max_df=0.8, min_df=0.05, max_features=1000))]) preprocessor = ColumnTransformer( transformers=[ ('cat_transform', categorical_transformer, cat_features), ('count_vector', count_vect_transformer, textual_feature)]) model_SVC = CalibratedClassifierCV( sub_model, cv=2, method=method) rf_clf = Pipeline(steps=[ ('preprocessor', preprocessor), ('classifier', model_SVC)]) rf_clf.fit(dfx, dfy) expected_label = rf_clf.predict(dfx) expected_proba = rf_clf.predict_proba(dfx) inputs = {'CAT1': dfx['CAT1'].values.reshape((-1, 1)), 'CAT2': dfx['CAT2'].values.reshape((-1, 1)), 'TEXT': dfx['TEXT'].values.reshape((-1, 1))} onx = to_onnx(rf_clf, dfx, target_opset=TARGET_OPSET, options={'zipmap': False}) sess = InferenceSession(onx.SerializeToString()) got = sess.run(None, inputs) assert_almost_equal(expected_label, got[0]) assert_almost_equal(expected_proba, got[1], decimal=5)
def test_pipeline_make_column_selector(self): X = pandas.DataFrame({ 'city': ['London', 'London', 'Paris', 'Sallisaw'], 'rating': [5, 3, 4, 5]}) X['rating'] = X['rating'].astype(numpy.float32) ct = make_column_transformer( (StandardScaler(), make_column_selector( dtype_include=numpy.number)), (OneHotEncoder(), make_column_selector( dtype_include=object))) expected = ct.fit_transform(X) onx = to_onnx(ct, X, target_opset=TARGET_OPSET) sess = InferenceSession(onx.SerializeToString()) names = [i.name for i in sess.get_inputs()] got = sess.run(None, {names[0]: X[names[0]].values.reshape((-1, 1)), names[1]: X[names[1]].values.reshape((-1, 1))}) assert_almost_equal(expected, got[0])
def measure_onnx_runtime(model, xt, repeat=REPEAT, number=NUMBER, verbose=True): if verbose: print(model.__class__.__name__) res = measure_time(model.predict_proba, xt, repeat=repeat, number=number, div_by_number=True, first_run=True) res['model'], res['runtime'] = model.__class__.__name__, 'INNER' res['N'] = X_test.shape[0] res["max_depth"] = max_depth res["n_estimators"] = n_estimators res["n_features"] = n_features if verbose: pprint(res) yield res onx = to_onnx(model, X_train[:1], options={id(model): {'zipmap': False}}) oinf = OnnxInference(onx) res = measure_time(lambda x: oinf.run({'X': x}), xt, repeat=repeat, number=number, div_by_number=True, first_run=True) res['model'], res['runtime'] = model.__class__.__name__, 'NPY/C++' res['N'] = X_test.shape[0] res['size'] = len(onx.SerializeToString()) res["max_depth"] = max_depth res["n_estimators"] = n_estimators res["n_features"] = n_features if verbose: pprint(res) yield res sess = InferenceSession(onx.SerializeToString()) res = measure_time(lambda x: sess.run(None, {'X': x}), xt, repeat=repeat, number=number, div_by_number=True, first_run=True) res['model'], res['runtime'] = model.__class__.__name__, 'ORT' res['N'] = X_test.shape[0] res['size'] = len(onx.SerializeToString()) res["max_depth"] = max_depth res["n_estimators"] = n_estimators res["n_features"] = n_features if verbose: pprint(res) yield res
def test_target_opset_dict_kbins(self): data = load_iris() X = data.data model = KBinsDiscretizer(encode="ordinal") model.fit(X) for i in range(9, TARGET_OPSET + 1): for j in (1, 2): tops = {'': i, 'ai.onnx.ml': j} model_onnx = to_onnx(model, X[:1].astype(numpy.float32), target_opset=tops) dom = get_domain_opset(model_onnx) if dom != {'ai.onnx.ml': 1, '': i}: assert dom[''] <= i assert dom['ai.onnx.ml'] == 1 continue self.assertEqual(dom, {'ai.onnx.ml': 1, '': i})
def test_isolation_forest_rnd(self): isol = IsolationForest(n_estimators=2, random_state=0) rs = numpy.random.RandomState(0) # pylint: disable=E1101 data = rs.randn(100, 4).astype(numpy.float32) data[-1, 2:] = 99. data[-2, :2] = -99. model = isol.fit(data) model_onnx = to_onnx(model, data, target_opset=TARGET_OPSET) self.assertIsNotNone(model_onnx) dump_data_and_model( data[5:10], model, model_onnx, basename="IsolationForestRnd", backend=('python', ), methods=['predict', 'decision_function'], verbose=False) dump_data_and_model( data, model, model_onnx, basename="IsolationForestRnd", backend=('python', ), methods=['predict', 'decision_function'], verbose=False)
def test_way2_to_onnx(self): X = np.arange(20).reshape(10, 2) tr = KMeans(n_clusters=2) tr.fit(X) onx = to_onnx(tr, X.astype(np.float32), target_opset=TARGET_OPSET) if TARGET_OPSET == 11: sonx = str(onx) if "version: 11" not in sonx or "ir_version: 6" not in sonx: raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format( TARGET_OPSET, sonx)) dump_data_and_model(X.astype(np.float32), tr, onx, basename="MixinWay2ToOnnx")
def common_test_gpc(self, dtype=np.float32, n_classes=2): gp = GaussianProcessClassifier() gp, X = self.fit_classification_model(gp, n_classes=n_classes) # return_cov=False, return_std=False if dtype == np.float32: cls = FloatTensorType else: cls = DoubleTensorType model_onnx = to_onnx(gp, initial_types=[('X', cls([None, None]))], target_opset=TARGET_OPSET, options={ GaussianProcessClassifier: { 'zipmap': False, 'optim': 'cdist' } }) self.assertTrue(model_onnx is not None) try: sess = InferenceSession(model_onnx.SerializeToString()) except OrtFail: if not hasattr(self, 'path'): return suffix = 'Double' if dtype == np.float64 else 'Float' # Operator Solve is missing model_onnx = change_onnx_domain( model_onnx, {'Solve': ('Solve%s' % suffix, 'ai.onnx.contrib')}) so = SessionOptions() so.register_custom_ops_library(self.path) sess = InferenceSession(model_onnx.SerializeToString(), so) res = sess.run(None, {'X': X.astype(dtype)}) assert_almost_equal(res[0].ravel(), gp.predict(X).ravel()) assert_almost_equal(res[1], gp.predict_proba(X), decimal=3) return dt = 32 if dtype == np.float32 else 64 dump_data_and_model(X.astype(dtype), gp, model_onnx, verbose=False, basename="SklearnGaussianProcessRBFT%d%d" % (n_classes, dt))
def test_gpr_rbf_fitted_false(self): gp = GaussianProcessRegressor(alpha=1e-7, n_restarts_optimizer=15, normalize_y=False) gp.fit(Xtrain_, Ytrain_) # return_cov=False, return_std=False model_onnx = to_onnx(gp, initial_types=[('X', FloatTensorType([None, None]))]) self.assertTrue(model_onnx is not None) dump_data_and_model(Xtest_.astype(np.float32), gp, model_onnx, verbose=False, basename="SklearnGaussianProcessRBF-Dec4")
def test_woe_transformer_conv_ext2(self): for inca, incb in [(False, False), (True, True), (False, True), (True, False)]: with self.subTest(inca=inca, incb=incb): x = numpy.array([[0.45], [0.5], [0.55]], dtype=numpy.float32) woe = WOETransformer(intervals=[[(0.4, 0.5, False, inca), (0.5, 0.6, incb, False)]]) woe.fit(x) expected = woe.transform(x) onnx_model = to_onnx(woe, x, target_opset=TARGET_OPSET, verbose=0) sess = InferenceSession(onnx_model.SerializeToString()) got = sess.run(None, {'X': x})[0] assert_almost_equal(expected, got)
def test_woe_transformer_conv_ext3(self): x = numpy.array( [[0.4, 1.4, 2.4, 3.4], [0.5, 1.5, 2.5, 3.5], [0.6, 1.6, 2.6, 3.6]], dtype=numpy.float32) woe = WOETransformer( intervals=[[(0.4, 0.5, False, False), ( 0.5, 0.6, False, False)], [(1.4, 1.5, False, True), ( 1.5, 1.6, False, True)], [(2.4, 2.5, True, False), (2.5, 2.6, True, False)], [(3.4, 3.5, True, True), (3.5, 3.6, True, True)]]) woe.fit(x) expected = woe.transform(x) onnx_model = to_onnx(woe, x, target_opset=TARGET_OPSET) sess = InferenceSession(onnx_model.SerializeToString()) got = sess.run(None, {'X': x})[0] assert_almost_equal(expected, got)
def test_random_trees_embedding(self): X, _ = make_regression( n_features=5, n_samples=100, n_targets=1, random_state=42, n_informative=3) X = X.astype(numpy.float32) model = RandomTreesEmbedding( n_estimators=3, max_depth=2, sparse_output=False).fit(X) model.transform(X) model_onnx = to_onnx( model, X[:1], target_opset=TARGET_OPSET) with open("model.onnx", "wb") as f: f.write(model_onnx.SerializeToString()) self.check_model(model_onnx, X) dump_data_and_model( X.astype(numpy.float32), model, model_onnx, basename="SklearnRandomTreesEmbedding")
def test_onehot(self): try: model = OneHotEncoder(categories='auto') except TypeError: # parameter categories added in 0.20 return data = numpy.array([[1, 2, 3], [4, 3, 0], [0, 1, 4], [0, 5, 6]], dtype=numpy.int64) model.fit(data) for i in range(9, TARGET_OPSET + 1): for j in (1, 2): tops = {'': i, 'ai.onnx.ml': j} model_onnx = to_onnx(model, data[:1], target_opset=tops) dom = get_domain_opset(model_onnx) self.assertEqual(len(dom), 2) self.assertIn(dom[''], (i, i - 1, i - 2)) self.assertEqual(dom['ai.onnx.ml'], 1)
def test_model_knn_iris_classifier_multi_reg3_weight(self): iris = datasets.load_iris() X = iris.data.astype(numpy.float32) y = iris.target.astype(numpy.int64) y = numpy.vstack([y % 2, y % 2, (y+1) % 2]).T model = KNeighborsClassifier( algorithm='brute', weights='distance', n_neighbors=7) model.fit(X[:13], y[:13]) onx = to_onnx(model, X[:1], options={id(model): {'optim': 'cdist', 'zipmap': False}}, target_opset=TARGET_OPSET) dump_data_and_model( X.astype(numpy.float32)[:11], model, onx, basename="SklearnKNeighborsClassifierMReg3-Out0")
def __init__(self, model, dataset, norm): BenchPerfTest.__init__(self) self.model_name = model self.dataset_name = dataset self.datas = common_datasets[dataset] skl_model = get_model(model) if norm: if 'NB' in model: self.model = make_pipeline(MinMaxScaler(), skl_model) else: self.model = make_pipeline(StandardScaler(), skl_model) else: self.model = skl_model self.model.fit(self.datas[0], self.datas[2]) self.data_test = self.datas[1] if '-cdist' in model: options = {id(skl_model): {'optim': 'cdist'}} elif "-ZM" in model: options = {id(skl_model): {'zipmap': False}} else: options = None try: self.onx = to_onnx(self.model, self.datas[0].astype(numpy.float32), options=options, target_opset=__max_supported_opsets__) self.onx.ir_version = get_ir_version(__max_supported_opset__) except (RuntimeError, NameError) as e: raise RuntimeError("Unable to convert model {}.".format( self.model)) from e logger = getLogger("skl2onnx") logger.propagate = False logger.disabled = True self.oinf = OnnxInference(self.onx, runtime='python') self.oinfc = OnnxInference(self.onx, runtime='python_compiled') try: self.ort = InferenceSession(self.onx.SerializeToString()) except OrtFail as e: raise RuntimeError( "Unable to load model {}\n--SUMMARY--\n{}".format( self.model, self.oinfc)) from e self.output_name = self.oinf.output_names[-1] self.input_name = self.ort.get_inputs()[0].name self.model_info = analyze_model(self.model)
def test_onnxt_iris_adaboost_regressor_dt(self): iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, __ = train_test_split(X, y, random_state=11) y_train = y_train.astype(numpy.float32) clr = AdaBoostRegressor( base_estimator=DecisionTreeRegressor(max_depth=3), n_estimators=3) clr.fit(X_train, y_train) X_test = X_test.astype(numpy.float32) X_test = numpy.vstack([X_test[:3], X_test[-3:]]) model_def = to_onnx(clr, X_train.astype(numpy.float32)) oinf1 = OnnxInference(model_def, runtime='python') res1 = oinf1.run({'X': X_test})['variable'] oinf2 = OnnxInference(model_def, runtime='python_compiled') res2 = oinf2.run({'X': X_test})['variable'] self.assertEqualArray(res1, res2) X_test = X_test[:1] t1 = timeit.repeat(stmt="oinf1.run({'X': X_test})", setup='pass', repeat=5, number=1000, globals={ 'X_test': X_test, 'oinf1': oinf1 }) me1 = sum(t1) / len(t1) t2 = timeit.repeat(stmt="oinf2.run({'X': X_test})", setup='pass', repeat=5, number=1000, globals={ 'X_test': X_test, 'oinf2': oinf2 }) me2 = sum(t2) / len(t2) self.assertGreater(me1, me2) # print(me1, me2) # print(oinf2._run_compiled_code) self.assertIn(' def compiled_run(dict_inputs, yield_ops=None):', str(oinf2))
def test_pipe_way2_to_onnx(self): X = np.arange(20).reshape(10, 2) tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)) tr.fit(X) onx = to_onnx(tr, X.astype(np.float32), target_opset=TARGET_OPSET) if (TARGET_OPSET == 11 or os.environ.get('TEST_TARGET_OPSET', '') != ''): sonx = str(onx) if "version: 11" not in sonx or "ir_version: 6" not in sonx: raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format( TARGET_OPSET, sonx)) dump_data_and_model(X.astype(np.float32), tr, onx, basename="MixinPipeWay2ToOnnx")
def test_isolation_forest_rnd(self): isol = IsolationForest(n_estimators=2, random_state=0) rs = np.random.RandomState(0) data = rs.randn(100, 4).astype(np.float32) data[-1, 2:] = 99. data[-2, :2] = -99. model = isol.fit(data) model_onnx = to_onnx(model, data, target_opset={ '': TARGET_OPSET, 'ai.onnx.ml': TARGET_OPSET_ML }) self.assertIsNotNone(model_onnx) dump_data_and_model(data, model, model_onnx, basename="IsolationForestRnd")
def test_local_outlier_factor_rnd(self): lof = LocalOutlierFactor(n_neighbors=2, novelty=True) rs = np.random.RandomState(0) data = rs.randn(100, 4).astype(np.float32) data[-1, 2:] = 99. data[-2, :2] = -99. model = lof.fit(data) model_onnx = to_onnx(model, data, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) names = [o.name for o in sess.get_outputs()] self.assertEqual(names, ['label', 'scores']) got = sess.run(None, {'X': data}) self.assertEqual(len(got), 2) expected_label = lof.predict(data) expected_decif = lof.decision_function(data) assert_almost_equal(expected_label, got[0].ravel()) assert_almost_equal(expected_decif, got[1].ravel(), decimal=5)
def test_gpr_rbf_fitted_true(self): gp = GaussianProcessRegressor(alpha=1e-5, n_restarts_optimizer=25, normalize_y=True) gp, X = fit_regression_model(gp) # return_cov=False, return_std=False model_onnx = to_onnx(gp, initial_types=[('X', DoubleTensorType([None, None]))], target_opset=TARGET_OPSET) self.assertTrue(model_onnx is not None) dump_data_and_model(X.astype(np.float64), gp, model_onnx, verbose=False, basename="SklearnGaussianProcessRBFTDouble")
def _test_score(self, model, X, tg, decimal=5, black_op=None): X = X.astype(np.float32) exp = model.score_samples(X) expp = model.predict_proba(X) onx = to_onnx( model, X[:1], target_opset=tg, options={id(model): {'score_samples': True}}, black_op=black_op) try: sess = InferenceSession(onx.SerializeToString()) except OrtFail as e: raise RuntimeError('Issue {}\n{}'.format(e, str(onx))) got = sess.run(None, {'X': X}) self.assertEqual(len(got), 3) np.testing.assert_almost_equal( expp.ravel(), got[1].ravel(), decimal=decimal) np.testing.assert_almost_equal( exp.ravel(), got[2].ravel(), decimal=decimal)
def test_woe_transformer_conv(self): x = numpy.array( [[0.2, 0.7, 0.9], [0.51, 0.71, 0.91], [0.7, 1.5, 0.92]], dtype=numpy.float32) woe = WOETransformer( intervals=[[(0.4, 0.6, False, True)], [(0.9, numpy.inf), (-numpy.inf, 0.9)]]) woe.fit(x) expected = woe.transform(x) onnx_model = to_onnx(woe, x, target_opset=TARGET_OPSET) with open("debug.onnx", "wb") as f: f.write(onnx_model.SerializeToString()) sess = InferenceSession(onnx_model.SerializeToString()) got = sess.run(None, {'X': x})[0] assert_almost_equal(expected, got)
def test_sub(self): data = load_iris() X = data.data dec = DecorrelateTransformer() dec.fit(X) update_registered_converter( DecorrelateTransformer, "SklearnDecorrelateTransformer", decorrelate_transformer_shape_calculator, decorrelate_transformer_convertor) onx = to_onnx(dec, X.astype(np.float32), target_opset=TARGET_OPSET) self.assertIn('output: "variable"', str(onx)) sess = InferenceSession(onx.SerializeToString()) exp = dec.transform(X.astype(np.float32)) got = sess.run(None, {'X': X.astype(np.float32)})[0] assert_almost_equal(got, exp, decimal=4)
def test_label_encoder(self): model = LabelEncoder() data = numpy.array([1.2, 3.4, 5.4, 1.2], dtype=numpy.float32) model.fit(data) for i in range(9, TARGET_OPSET + 1): for j in (1, 2): tops = {'': i, 'ai.onnx.ml': j} try: model_onnx = to_onnx(model, data[:1], target_opset=tops) except RuntimeError as e: if j == 1: # expected continue raise e if j == 1: raise AssertionError("It should fail for opset.ml == 1") dom = get_domain_opset(model_onnx) self.assertEqual(len(dom), 1) self.assertEqual(dom['ai.onnx.ml'], 2)
def test_model_knn_iris_regressor_multi_reg_radius(self): iris = datasets.load_iris() X = iris.data.astype(numpy.float32) y = iris.target.astype(numpy.float32) y = numpy.vstack([y, 1 - y, y + 10]).T model = KNeighborsRegressor( algorithm='brute', weights='distance') model.fit(X[:13], y[:13]) onx = to_onnx(model, X[:1], options={id(model): {'optim': 'cdist'}}, target_opset=TARGET_OPSET) dump_data_and_model( X.astype(numpy.float32)[:7], model, onx, basename="SklearnRadiusNeighborsRegressorMReg") dump_data_and_model( (X + 0.1).astype(numpy.float32)[:7], model, onx, basename="SklearnRadiusNeighborsRegressorMReg")
def test_gpr_rbf_fitted_return_cov(self): gp = GaussianProcessRegressor(alpha=1., n_restarts_optimizer=15, normalize_y=True) gp.fit(Xtrain_, Ytrain_) # return_cov=True, return_std=False options = {GaussianProcessRegressor: {"return_cov": True}} model_onnx = to_onnx(gp, initial_types=[('X', FloatTensorType([None, None]))], options=options) self.assertTrue(model_onnx is not None) self.check_outputs( gp, model_onnx, Xtest_.astype(np.float32), predict_attributes=options[GaussianProcessRegressor])
def test_sub_sub_estimator(self): data = load_iris() X, y = data.data, data.target X_train, X_test, y_train, y_test = train_test_split(X, y) model = MinMaxScalerTwo() model.fit(X_train, y_train) update_registered_converter(MinMaxScalerTwo, "SubSubDummy", subsub_mmtwo_shape_calculator, subsub_mmtwo_converter, parser=subsub_mmtwo_parser) X32 = X_test[:5].astype(np.float32) model_onnx = to_onnx(model, X32, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': X32}) assert_almost_equal(model.transform(X32), res[0], decimal=5)
def test_gpr_rbf_fitted_return_std_exp_sine_squared_true(self): state = np.random.RandomState(0) X = 15 * state.rand(100, 2) y = np.sin(X[:, 0] - X[:, 1]).ravel() y += 0.5 * (0.5 - state.rand(X.shape[0])) y /= 10 X_train, X_test, y_train, _ = train_test_split(X, y) gp = GaussianProcessRegressor( kernel=ExpSineSquared(periodicity_bounds=(1e-10, 1e10)), alpha=1e-7, n_restarts_optimizer=25, normalize_y=True, random_state=1) try: gp.fit(X_train, y_train) except (AttributeError, TypeError): # unstable bug in scikit-learn, fixed in 0.24 return # return_cov=False, return_std=False options = {GaussianProcessRegressor: {"return_std": True}} gp.predict(X_train, return_std=True) model_onnx = to_onnx(gp, initial_types=[('X', DoubleTensorType([None, None]))], options=options, target_opset=TARGET_OPSET) self.assertTrue(model_onnx is not None) dump_data_and_model( X_test.astype(np.float64), gp, model_onnx, verbose=False, basename="SklearnGaussianProcessExpSineSquaredStdT-Out0-Dec2", disable_optimisation=True) self.check_outputs( gp, model_onnx, X_test.astype(np.float64), predict_attributes=options[GaussianProcessRegressor], decimal=4, disable_optimisation=True)