def test_onnxt_iris_random_forest_regressor(self): iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, __ = train_test_split(X, y, random_state=11, test_size=0.8) clr = RandomForestRegressor(n_estimators=10, random_state=42) clr.fit(X_train, y_train) X_test = X_test.astype(numpy.float32) X_test2 = make_n_rows(X_test, 10000) model_def = to_onnx(clr, X_train.astype(numpy.float32)) oinf = OnnxInference(model_def, runtime='python') ti = timeit.repeat("oinf.run({'X': X_test2})", number=100, globals={ 'oinf': oinf, 'X_test2': X_test2 }, repeat=10) self.assertEqual(len(ti), 10) # print("R",sum(ti), ti) op = oinf.sequence_[0] self.assertTrue(op.ops_.rt_.same_mode_) if hasattr(op.ops_.rt_, 'consecutive_leaf_data_'): self.assertFalse(op.ops_.rt_.consecutive_leaf_data_)
def test_split_xy(self): X = numpy.arange(15).reshape(3, 5).astype(numpy.float32) y = numpy.arange(3).astype(numpy.float32) for k in [1, 2, 3, 4, 10]: xs = make_n_rows(X, k) self.assertIsInstance(xs, numpy.ndarray) self.assertEqual(xs.shape[0], k) self.assertEqual(xs.shape[1], X.shape[1]) rr = make_n_rows(X, k, y) self.assertIsInstance(rr, tuple) xs, ys = rr self.assertIsInstance(xs, numpy.ndarray) self.assertIsInstance(ys, numpy.ndarray) self.assertEqual(xs.shape[0], k) self.assertEqual(xs.shape[1], X.shape[1]) self.assertEqual(ys.shape[0], k)
def setup(self, runtime, N, nf, opset, dtype, optim): "asv API" logger = getLogger('skl2onnx') logger.disabled = True register_converters() register_rewritten_operators() with open(self._name(nf, opset, dtype), "rb") as f: stored = pickle.load(f) self.stored = stored self.model = stored['model'] self.X, self.y = make_n_rows(stored['X'], N, stored['y']) onx, rt_, rt_fct_, rt_fct_track_ = self._create_onnx_and_runtime( runtime, self.model, self.X, opset, dtype, optim) self.onx = onx setattr(self, "rt_" + runtime, rt_) setattr(self, "rt_fct_" + runtime, rt_fct_) setattr(self, "rt_fct_track_" + runtime, rt_fct_track_) set_config(assume_finite=True)