def test_sub_output_double(self): data = load_iris() X = data.data dec = DecorrelateTransformer2() dec.fit(X) update_registered_converter(DecorrelateTransformer2, "SklearnDecorrelateTransformer2", decorrelate_transformer_shape_calculator, decorrelate_transformer_convertor2) onx = to_onnx(dec, X.astype(np.float64), target_opset=TARGET_OPSET) sess = InferenceSession(onx.SerializeToString()) exp = dec.transform(X.astype(np.float64)) got = sess.run(None, {'X': X.astype(np.float64)})[0] assert_almost_equal(got, exp, decimal=4)
def test_decisiontree_regressor_decision_leaf(self): model = DecisionTreeRegressor(max_depth=2) X, y = make_classification(10, n_features=4, random_state=42) X = X[:, :2] model.fit(X, y) initial_types = [('input', FloatTensorType((None, X.shape[1])))] model_onnx = convert_sklearn( model, initial_types=initial_types, options={id(model): { 'decision_leaf': True }}) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': X.astype(np.float32)}) pred = model.predict(X) assert_almost_equal(pred, res[0].ravel()) dec = model.decision_path(X) exp = path_to_leaf(model.tree_, dec.todense()) assert exp.tolist() == res[1].ravel().tolist()
def check_outputs(self, model, model_onnx, Xtest, predict_attributes, decimal=5, skip_if_float32=False, disable_optimisation=True): if "TransposeScaleMatMul" in str(model_onnx): raise RuntimeError("This node must not be added.") if predict_attributes is None: predict_attributes = {} exp = model.predict(Xtest, **predict_attributes) if disable_optimisation and GraphOptimizationLevel is not None: opts = SessionOptions() opts.graph_optimization_level = ( GraphOptimizationLevel.ORT_DISABLE_ALL) sess = InferenceSession(model_onnx.SerializeToString(), sess_options=opts) else: sess = InferenceSession(model_onnx.SerializeToString()) got = sess.run(None, {'X': Xtest}) if isinstance(exp, tuple): if len(exp) != len(got): raise AssertionError("Mismatched number of outputs.") for i, (e, g) in enumerate(zip(exp, got)): if skip_if_float32 and g.dtype == np.float32: continue try: assert_almost_equal(self.remove_dim1(e), self.remove_dim1(g), decimal=decimal) except AssertionError as e: # noqa raise AssertionError( "Mismatch for output {} and attributes {}" ".".format(i, predict_attributes)) from e else: if skip_if_float32 and Xtest.dtype == np.float32: return assert_almost_equal(np.squeeze(exp), np.squeeze(got), decimal=decimal)
class OnnxWholeSession: """ Runs the prediction for a single :epkg:`ONNX`, it lets the runtime handle the graph logic as well. """ def __init__(self, onnx_data, runtime): """ @param onnx_data :epkg:`ONNX` model or data @param runtime runtime to be used, mostly :epkg:`onnxruntime` """ if runtime != 'onnxruntime1': raise NotImplementedError( "runtime '{}' is not implemented.".format(runtime)) if hasattr(onnx_data, 'SerializeToString'): onnx_data = onnx_data.SerializeToString() self.runtime = runtime sess_options = SessionOptions() self.run_options = RunOptions() try: sess_options.session_log_severity_level = 3 # sess_options.sessions_log_verbosity_level = 0 except AttributeError: # onnxruntime not recent enough. pass try: self.run_options.run_log_severity_level = 3 # self.run_options.run_log_verbosity_level = 0 except AttributeError: # onnxruntime not recent enough. pass self.sess = InferenceSession(onnx_data, sess_options=sess_options) def run(self, inputs): """ Computes the predictions. @param inputs dictionary *{variable, value}* @return list of outputs """ return self.sess.run(None, inputs, self.run_options)
def test_onnx_init_sparse_coo(self): row = np.array([0, 0, 1, 3, 1], dtype=np.float32) col = np.array([0, 2, 1, 3, 1], dtype=np.float32) data = np.array([1, 1, 1, 1, 1], dtype=np.float32) X = coo_matrix((data, (row, col)), shape=(4, 4)) node = OnnxAdd('X', X, output_names=['Y'], op_version=onnx.defs.onnx_opset_version()) model_def = node.to_onnx({'X': X}, outputs=[('Y', FloatTensorType())]) try: sess = InferenceSession(model_def.SerializeToString()) except (RuntimeError, OrtInvalidArgument): # Sparse tensor is not supported for constant. return res = sess.run(None, {'X': X})[0] assert_almost_equal(X + X, res)
def test_randomforestclassifier_decision_path(self): model = RandomForestClassifier(max_depth=2, n_estimators=2) X, y = make_classification(3, n_features=4, random_state=42) X = X[:, :2] model.fit(X, y) initial_types = [('input', FloatTensorType((None, X.shape[1])))] model_onnx = convert_sklearn( model, initial_types=initial_types, options={id(model): {'decision_path': True, 'zipmap': False}}, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': X.astype(numpy.float32)}) pred = model.predict(X) assert_almost_equal(pred, res[0].ravel()) prob = model.predict_proba(X) assert_almost_equal(prob, res[1]) dec = model.decision_path(X) exp = binary_array_to_string(dec[0].todense()) got = numpy.array([''.join(row) for row in res[2]]) assert exp == got.ravel().tolist()
def test_multi_output_classifier_fallback(self): X, y = make_multilabel_classification(n_classes=3, random_state=0) X = X.astype(numpy.float32) clf = MultiOutputClassifier(LogisticRegression()).fit(X, y) del clf.classes_ onx = to_onnx(clf, X[:1], target_opset=TARGET_OPSET, options={ 'zipmap': False, 'output_class_labels': True }) sess = InferenceSession(onx.SerializeToString()) res = sess.run(None, {'X': X}) exp_lab = clf.predict(X) exp_prb = clf.predict_proba(X) assert_almost_equal(exp_lab, res[0]) self.assertEqual(len(exp_prb), len(res[1])) for e, g in zip(exp_prb, res[1]): assert_almost_equal(e, g, decimal=5)
def verify(self, onnx_model_path): """ :param onnx_model_path: onnx model path. """ print("Checking ONNX model loading from: {}".format(onnx_model_path)) try: onnx_options = SessionOptions() sess = InferenceSession(onnx_model_path, onnx_options, providers=["CUDAExecutionProvider"]) print("Model correctly loaded") if self.sample_inputs is not None: inputs_onnx = {k: v.numpy() for k, v in self.sample_inputs.items()} print(f"Model inputs name: {[tmp_obj.name for tmp_obj in sess.get_inputs()]}") print(f"Model inputs shape: {[tmp_obj.shape for tmp_obj in sess.get_inputs()]}") print(f"Model outputs name: {[tmp_obj.name for tmp_obj in sess.get_outputs()]}") print(f"Model outputs shape: {[tmp_obj.shape for tmp_obj in sess.get_outputs()]}") # Run the model (None = get all the outputs) outputs_onnx = sess.run(None, inputs_onnx) print("Model inference correctly") except RuntimeException as re: print("Error while loading the model: {}".format(re))
def test_pipeline_make_column_selector(self): X = pandas.DataFrame({ 'city': ['London', 'London', 'Paris', 'Sallisaw'], 'rating': [5, 3, 4, 5] }) X['rating'] = X['rating'].astype(numpy.float32) ct = make_column_transformer( (StandardScaler(), make_column_selector(dtype_include=numpy.number)), (OneHotEncoder(), make_column_selector(dtype_include=object))) expected = ct.fit_transform(X) onx = to_onnx(ct, X, target_opset=TARGET_OPSET) sess = InferenceSession(onx.SerializeToString()) names = [i.name for i in sess.get_inputs()] got = sess.run( None, { names[0]: X[names[0]].values.reshape((-1, 1)), names[1]: X[names[1]].values.reshape((-1, 1)) }) assert_almost_equal(expected, got[0])
def test_local_outlier_factor_p3(self): lof = LocalOutlierFactor(n_neighbors=2, novelty=True, p=3) data = np.array([[-1.1, -1.2], [0.3, 0.2], [0.5, 0.4], [100., 99.]], dtype=np.float32) model = lof.fit(data) model_onnx = to_onnx(model, data, target_opset=TARGET_OPSET) self.assertNotIn('CDist', str(model_onnx)) data = data.copy() data[:, 0] += 0.1 sess = InferenceSession(model_onnx.SerializeToString()) names = [o.name for o in sess.get_outputs()] self.assertEqual(names, ['label', 'scores']) got = sess.run(None, {'X': data}) self.assertEqual(len(got), 2) expected_label = lof.predict(data) expected_decif = lof.decision_function(data) assert_almost_equal(expected_label, got[0].ravel()) assert_almost_equal(expected_decif, got[1].ravel(), decimal=5)
def test_container_init(self): onx = OnnxReshapeApi13(OnnxReshapeApi13('X', np.array([1, -1], dtype=np.int64), op_version=TARGET_OPSET), np.array([1, -1], dtype=np.int64), output_names=['Y'], op_version=TARGET_OPSET) X = np.array([[1, 2], [3, 4]], dtype=np.float32) model_def = onx.to_onnx({'X': X}, outputs=[('Y', FloatTensorType([None, 2]))], target_opset=TARGET_OPSET) sess = InferenceSession(model_def.SerializeToString()) got = sess.run(None, {'X': X})[0] assert_almost_equal(X.reshape((1, -1)), got) inits = [ row for row in str(model_def).split('\n') if row.startswith(" initializer {") ] self.assertEqual(len(inits), 1)
def test_isolation_forest_score_samples(self): isol = IsolationForest(n_estimators=3, random_state=0) data = np.array([[-1.1, -1.2], [0.3, 0.2], [0.5, 0.4], [100., 99.]], dtype=np.float32) model = isol.fit(data) model_onnx = to_onnx(model, data, target_opset=TARGET_OPSET, options={'score_samples': True}) sess = InferenceSession(model_onnx.SerializeToString()) names = [o.name for o in sess.get_outputs()] self.assertEqual(names, ['label', 'scores', 'score_samples']) got = sess.run(None, {'X': data}) self.assertEqual(len(got), 3) expected_label = isol.predict(data) expected_decif = isol.decision_function(data) expected_score = isol.score_samples(data) assert_almost_equal(expected_label, got[0].ravel()) assert_almost_equal(expected_decif, got[1].ravel()) assert_almost_equal(expected_score, got[2].ravel())
def test_rf_regressor_decision_leaf(self): model = RandomForestRegressor(n_estimators=2, max_depth=3) X, y = make_regression(10, n_features=4, random_state=42) X = X[:, :2] model.fit(X, y) initial_types = [('input', FloatTensorType((None, X.shape[1])))] model_onnx = convert_sklearn( model, initial_types=initial_types, options={id(model): { 'decision_leaf': True }}, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': X.astype(numpy.float32)}) pred = model.predict(X) assert_almost_equal(pred, res[0].ravel(), decimal=4) dec = model.decision_path(X) exp = path_to_leaf(model.estimators_, dec[0].todense(), dec[1]) assert exp.tolist() == res[1].tolist()
def test_onnx_subgraphs1(self): x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) cop = OnnxAdd(OnnxIdentity('input', op_version=TARGET_OPSET), 'input', op_version=TARGET_OPSET) cdist = onnx_squareform_pdist(cop, dtype=numpy.float32, op_version=TARGET_OPSET) cop2 = OnnxIdentity(cdist, output_names=['cdist'], op_version=TARGET_OPSET) model_def = cop2.to_onnx({'input': FloatTensorType([None, None])}, outputs=[('cdist', FloatTensorType([None, None]))], target_opset=TARGET_OPSET) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'input': x}) self.assertEqual(len(res), 1)
def test_extratreesregressor_decision_path(self): model = ExtraTreesRegressor(max_depth=2, n_estimators=2) X, y = make_classification(10, n_features=4, random_state=42) X = X[:, :2] model.fit(X, y) initial_types = [('input', FloatTensorType((None, X.shape[1])))] model_onnx = convert_sklearn( model, initial_types=initial_types, options={id(model): { 'decision_path': True }}) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': X.astype(numpy.float32)}) pred = model.predict(X) assert_almost_equal(pred, res[0].ravel()) dec = model.decision_path(X) exp = binary_array_to_string(dec[0].todense()) got = numpy.array([''.join(row) for row in res[1]]) assert exp == got.ravel().tolist()
def test_onnx_if_algebra_direct(self): opv = TARGET_OPSET x1 = np.array([[0, 3], [7, 0]], dtype=np.float32) x2 = np.array([[1, 0], [2, 0]], dtype=np.float32) node = OnnxAdd('x1', 'x2', output_names=['absxythen'], op_version=opv) then_body = node.to_onnx({ 'x1': x1, 'x2': x2 }, target_opset=opv, outputs=[('absxythen', FloatTensorType())]) node = OnnxSub('x1', 'x2', output_names=['absxyelse'], op_version=opv) else_body = node.to_onnx({ 'x1': x1, 'x2': x2 }, target_opset=opv, outputs=[('absxyelse', FloatTensorType())]) del else_body.graph.input[:] del then_body.graph.input[:] cond = OnnxGreater(OnnxReduceSum('x1', op_version=opv), OnnxReduceSum('x2', op_version=opv), op_version=opv) ifnode = OnnxIf(cond, then_branch=then_body.graph, else_branch=else_body.graph, op_version=opv, output_names=['y']) model_def = ifnode.to_onnx({ 'x1': x1, 'x2': x2 }, target_opset=opv, outputs=[('y', FloatTensorType())]) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'x1': x1, 'x2': x2}) assert_almost_equal(x1 + x2, res[0])
def test_kernel_cosine_double(self): ker = PairwiseKernel(metric='cosine') onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float64, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', DoubleTensorType([None, None]))], target_opset=TARGET_OPSET) x = np.random.randn(4, 3) x[0, 0] = x[1, 1] = x[2, 2] = 10. x[3, 2] = 5. try: sess = InferenceSession(model_onnx.SerializeToString()) except NotImplemented: # Failed to find kernel for FusedMatMul(1). return res = sess.run(None, {'X': x.astype(np.float64)})[0] m1 = res m2 = ker(x) assert_almost_equal(m1, m2, decimal=5)
def test_model_tfidf_vectorizer_nan(self): corpus = numpy.array([ "This is the first document.", "This document is the second document.", "And this is the third one.", "Is this the first document?", ]).reshape((4, 1)) vect = TfidfVectorizer(ngram_range=(1, 1), norm=None) vect.fit(corpus.ravel()) options = copy.deepcopy(self.get_options()) options[TfidfVectorizer]['nan'] = True model_onnx = convert_sklearn(vect, "TfidfVectorizer", [("input", StringTensorType())], options=options, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': corpus.ravel()})[0] assert res.shape == (4, 9) assert numpy.isnan(res[0, 0])
def test_custom_ordinal_woe(self): update_registered_converter(OrdinalWOETransformer, "OrdinalWOETransformer", ordwoe_encoder_shape_calculator, ordwoe_encoder_converter, parser=ordwoe_encoder_parser) data = load_iris() X, y = data.data, data.target X = X.astype(np.int64)[:, :2] y = (y == 2).astype(np.int64) ordwoe = OrdinalWOETransformer() ordwoe.fit(X, y) expected = ordwoe.transform(X) onx = to_onnx(ordwoe, X, target_opset=TARGET_OPSET) sess = InferenceSession(onx.SerializeToString()) got = sess.run(None, {'X': X})[0] assert_almost_equal(expected, got)
def common_check_alpha(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x1 = numpy.random.randn(10, 1).astype(numpy.float32) x2 = numpy.random.randn(10, 1).astype(numpy.float32) alpha = numpy.random.randn(1).astype(numpy.float32) fin = fct(x1, x2, alpha) oinf = OnnxInference(onx) got = oinf.run({'X1': x1, 'X2': x2, 'alpha': alpha}) self.assertEqualArray(fin, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X1': x1, 'X2': x2, 'alpha': alpha}) self.assertEqualArray(fin, got[0], decimal=5)
def test_onnx_test_knn_transform(self): iris = datasets.load_iris() X, _ = iris.data, iris.target X_train, X_test = train_test_split(X, random_state=11) clr = NearestNeighbors(n_neighbors=3, radius=None) clr.fit(X_train) for to in (9, 10, 11): if to > onnx_opset_version(): break model_def = to_onnx(clr, X_train.astype(numpy.float32), target_opset=to) oinf = InferenceSession(model_def.SerializeToString()) X_test = X_test[:3] y = oinf.run(None, {'X': X_test.astype(numpy.float32)}) dist, ind = clr.kneighbors(X_test) assert_almost_equal(dist, DataFrame(y[1]).values, decimal=5) assert_almost_equal(ind, y[0])
def test_sub_estimator(self): data = load_iris() X, y = data.data, data.target X_train, X_test, y_train, y_test = train_test_split(X, y) model = ValidatorClassifier() model.fit(X_train, y_train) update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier', validator_classifier_shape_calculator, validator_classifier_converter, parser=validator_classifier_parser) X32 = X_test[:5].astype(np.float32) model_onnx = to_onnx(model, X32, target_opset=TARGET_OPSET) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': X32}) assert_almost_equal(model.predict(X32), res[0]) assert_almost_equal(model.predict_proba(X32), res[1], decimal=4) assert_almost_equal(model.validate(X32), res[2])
def test_gradient_boosting_regressor_learning_rate(self): X, y = make_classification(n_features=100, n_samples=1000, n_classes=2, n_informative=8) X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.5, random_state=42) model = GradientBoostingClassifier().fit(X_train, y_train) onnx_model = convert_sklearn( model, 'lr2', [('input', FloatTensorType(X_test.shape))]) sess = InferenceSession(onnx_model.SerializeToString()) res = sess.run(None, input_feed={'input': X_test.astype(np.float32)}) r1 = np.mean( np.isclose(model.predict_proba(X_test), list(map(lambda x: list(map(lambda y: x[y], x)), res[1])), atol=1e-4)) r2 = np.mean(res[0] == model.predict(X_test)) assert r1 == r2
def test_model_bayesian_ridge_return_std_normalize_double(self): model, X = fit_regression_model( linear_model.BayesianRidge(normalize=True), n_features=2, n_samples=50) model_onnx = convert_sklearn( model, "bayesian ridge", [("input", DoubleTensorType([None, X.shape[1]]))], options={linear_model.BayesianRidge: { 'return_std': True }}, target_opset=TARGET_OPSET) self.assertIsNotNone(model_onnx) X = X.astype(numpy.float64) sess = InferenceSession(model_onnx.SerializeToString()) outputs = sess.run(None, {'input': X}) pred, std = model.predict(X, return_std=True) assert_almost_equal(pred, outputs[0].ravel()) assert_almost_equal(std, outputs[1].ravel(), decimal=4)
def common_test_gpc(self, dtype=np.float32, n_classes=2): gp = GaussianProcessClassifier() gp, X = self.fit_classification_model(gp, n_classes=n_classes) # return_cov=False, return_std=False if dtype == np.float32: cls = FloatTensorType else: cls = DoubleTensorType model_onnx = to_onnx( gp, initial_types=[('X', cls([None, None]))], target_opset=TARGET_OPSET, options={GaussianProcessClassifier: { 'zipmap': False, 'optim': 'cdist'}}) self.assertTrue(model_onnx is not None) try: sess = InferenceSession(model_onnx.SerializeToString()) except OrtFail: if not hasattr(self, 'path'): return suffix = 'Double' if dtype == np.float64 else 'Float' # Operator Solve is missing model_onnx = change_onnx_domain( model_onnx, {'Solve': ('Solve%s' % suffix, 'ai.onnx.contrib')}) so = SessionOptions() so.register_custom_ops_library(self.path) sess = InferenceSession(model_onnx.SerializeToString(), so) res = sess.run(None, {'X': X.astype(dtype)}) assert_almost_equal(res[0].ravel(), gp.predict(X).ravel()) assert_almost_equal(res[1], gp.predict_proba(X), decimal=3) return dt = 32 if dtype == np.float32 else 64 dump_data_and_model( X.astype(dtype), gp, model_onnx, verbose=False, basename="SklearnGaussianProcessRBFT%d%d" % (n_classes, dt))
def test_sub_div(self): class CustomOpTransformer(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y=None): self.W = np.mean(X, axis=0) self.S = np.std(X, axis=0) return self def transform(self, X): return (X - self.W) / self.S mat = np.array([[0., 1.], [0., 1.], [2., 2.]]) tr = CustomOpTransformer() tr.fit(mat) z = tr.transform(mat) def conv(scope, operator, container): W = operator.raw_operator.W S = operator.raw_operator.S X = operator.inputs[0] out = operator.outputs op = OnnxDiv(OnnxSub(X, W), S, output_names=out) op.add_to(scope, container) def shape(operator): N = operator.inputs[0].type.shape[0] W = operator.raw_operator.W operator.outputs[0].type.shape = [N, W.shape[0]] model_onnx = convert_sklearn( tr, 'a-sub-div', [('input', FloatTensorType([1, 2]))], custom_shape_calculators={CustomOpTransformer: shape}, custom_conversion_functions={CustomOpTransformer: conv}) sess = InferenceSession(model_onnx.SerializeToString()) z2 = sess.run(None, {'input': mat.astype(np.float32)})[0] assert_almost_equal(z, z2)
def test_pipeline_voting_tfidf_svc(self): pipe1 = Pipeline([('tfidf1', TfidfVectorizer()), ('svc', SVC(probability=True, kernel='linear'))]) pipe2 = Pipeline([('tfidf2', TfidfVectorizer(norm='l2', use_idf=False)), ('sgd', SGDClassifier(alpha=0.0001, penalty='l2', loss='modified_huber'))]) pipe3 = Pipeline([('tfidf3', TfidfVectorizer()), ('mnb', MultinomialNB())]) voting = VotingClassifier([('p1', pipe1), ('p2', pipe2), ('p3', pipe3)], voting='soft', flatten_transform=False) data = numpy.array([ "first sentance", "second sentence", "many sentances", "dummy sentance", "no sentance at all" ]) y = numpy.array([0, 0, 1, 0, 1]) voting.fit(data, y) expected_label = voting.predict(data) expected_proba = voting.predict_proba(data) df = pandas.DataFrame(data) df.columns = ['text'] model_onnx = convert_sklearn(voting, initial_types=[ ('text', StringTensorType([None, 1])) ], target_opset=TARGET_OPSET, options={id(voting): { 'zipmap': False }}) # with open("debug.onnx", "wb") as f: # f.write(model_onnx.SerializeToString()) sess = InferenceSession(model_onnx.SerializeToString()) got = sess.run(None, {'text': data.reshape((-1, 1))}) assert_almost_equal(expected_proba, got[1], decimal=5) assert_almost_equal(expected_label, got[0])
class Predict: def __init__(self, model_path, bert_path): self.processor = TextProcessor() self.sess_options = SessionOptions() # self.sess_options.intra_op_num_threads = 1 self.sess_options.intra_op_num_threads = psutil.cpu_count(logical=True) # self.sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL self.session = InferenceSession(model_path, self.sess_options) self.use_gpu = torch.cuda.is_available() self.device = torch.device("cuda:7" if use_gpu else "cpu") self.tokenizer = BertTokenizer.from_pretrained(self.bert_path) def to_numpy(self, tensor): return tensor.detach().cpu().numpy( ) if tensor.requires_grad else tensor.cpu().numpy() def run(self, record): text_a, text_b = record[0], record[1] example = self.processor._create_single_example(text_a, text_b) feature = convert_single_example(example, self.max_seq_length, self.tokenizer) input_ids = torch.tensor(feature.input_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(feature.input_mask, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(feature.segment_ids, dtype=torch.long).unsqueeze(0) ort_inputs = { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } ort_outputs = self.session.run(None, ort_inputs) print(ort_outputs) print(type(ort_outputs)) def infer(self, data_path): pass
def test_onnx_example_algebra(self): initial = np.array([0, 0]).astype(np.float32).reshape((2, )) x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2)) opv = _TARGET_OPSET_ add_node = OnnxAdd('sum_in', 'next', output_names=['sum_out'], op_version=opv) id_node = OnnxIdentity(add_node, output_names=['scan_out'], op_version=opv) scan_body = id_node.to_onnx({ 'sum_in': initial, 'next': initial }, outputs=[('sum_out', FloatTensorType()), ('scan_out', FloatTensorType())]) node = OnnxScan('initial', 'x', output_names=['y', 'z'], num_scan_inputs=1, body=scan_body.graph, op_version=opv) model_def = node.to_onnx({ 'initial': initial, 'x': x }, outputs=[('y', FloatTensorType()), ('z', FloatTensorType())]) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'initial': initial, 'x': x}) y = np.array([9, 12]).astype(np.float32).reshape((2, )) z = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2)) assert_almost_equal(y, res[0]) assert_almost_equal(z, res[1])
def test_model_classifier_multi_class_string_zipmap_columns(self): model, X = fit_classification_model(linear_model.LogisticRegression(), 3, n_features=4, label_string=False) model_onnx = convert_sklearn( model, "multi-class ridge classifier", [("input", FloatTensorType([None, X.shape[1]]))], options={linear_model.LogisticRegression: { 'zipmap': 'columns' }}, target_opset=TARGET_OPSET) self.assertIsNotNone(model_onnx) sess = InferenceSession(model_onnx.SerializeToString()) names = [_.name for _ in sess.get_outputs()] self.assertEqual(['output_label', 'i0', 'i1', 'i2'], names) xt = X[:10].astype(np.float32) got = sess.run(None, {'input': xt}) prob = model.predict_proba(xt) for i in range(prob.shape[1]): assert_almost_equal(prob[:, i], got[i + 1])