Example #1
0
def convert_model(model, name, input_types):
    """
    Runs the appropriate conversion method.

    :param model: model
    :return: *onnx* model
    """
    from sklearn.base import BaseEstimator
    if model.__class__.__name__.startswith("LGBM"):
        from onnxmltools.convert import convert_lightgbm
        model, prefix = convert_lightgbm(model, name, input_types), "LightGbm"
    elif model.__class__.__name__.startswith("XGB"):
        from onnxmltools.convert import convert_xgboost
        model, prefix = convert_xgboost(model, name, input_types), "XGB"
    elif model.__class__.__name__ == 'Booster':
        import lightgbm
        if isinstance(model, lightgbm.Booster):
            from onnxmltools.convert import convert_lightgbm
            model, prefix = convert_lightgbm(model, name,
                                             input_types), "LightGbm"
        else:
            raise RuntimeError("Unable to convert model of type '{0}'.".format(
                type(model)))
    elif isinstance(model, BaseEstimator):
        from onnxmltools.convert import convert_sklearn
        model, prefix = convert_sklearn(model, name, input_types), "Sklearn"
    else:
        from onnxmltools.convert import convert_coreml
        model, prefix = convert_coreml(model, name, input_types), "Cml"
    if model is None:
        raise RuntimeError("Unable to convert model of type '{0}'.".format(
            type(model)))
    return model, prefix
Example #2
0
    def _test_lgbm(self, X, model, extra_config={}):
        # Create ONNX-ML model
        onnx_ml_model = convert_lightgbm(
            model,
            initial_types=[("input", FloatTensorType([X.shape[0],
                                                      X.shape[1]]))],
            target_opset=9)

        # Create ONNX model
        onnx_model = convert(onnx_ml_model, "onnx", extra_config=extra_config)

        # Get the predictions for the ONNX-ML model
        session = ort.InferenceSession(onnx_ml_model.SerializeToString())
        output_names = [
            session.get_outputs()[i].name
            for i in range(len(session.get_outputs()))
        ]
        onnx_ml_pred = [[] for i in range(len(output_names))]
        inputs = {session.get_inputs()[0].name: X}
        pred = session.run(output_names, inputs)
        for i in range(len(output_names)):
            if "label" in output_names[i]:
                onnx_ml_pred[1] = pred[i]
            else:
                onnx_ml_pred[0] = pred[i]

        # Get the predictions for the ONNX model
        onnx_pred = [[] for i in range(len(output_names))]
        if len(output_names) == 1:  # regression
            onnx_pred = onnx_model.predict(X)
        else:  # classification
            onnx_pred[0] = onnx_model.predict_proba(X)
            onnx_pred[1] = onnx_model.predict(X)

        return onnx_ml_pred, onnx_pred, output_names
 def test_lightgbm_classifier_nozipmap2(self):
     X = [[0, 1], [1, 1], [2, 0], [1, 2], [1, 5], [6, 2]]
     X = numpy.array(X, dtype=numpy.float32)
     y = [0, 1, 0, 1, 1, 0]
     model = LGBMClassifier(n_estimators=3,
                            min_child_samples=1,
                            max_depth=2)
     model.fit(X, y)
     onx = convert_lightgbm(model,
                            'dummy',
                            initial_types=[
                                ('X', FloatTensorType([None, X.shape[1]]))
                            ],
                            zipmap=False)
     assert "zipmap" not in str(onx).lower()
     onxs = onx.SerializeToString()
     try:
         sess = onnxruntime.InferenceSession(onxs)
     except Exception as e:
         raise AssertionError(
             "Model cannot be loaded by onnxruntime due to %r\n%s." %
             (e, onx[0]))
     exp = model.predict(X), model.predict_proba(X)
     got = sess.run(None, {'X': X})
     assert_almost_equal(exp[0], got[0])
     assert_almost_equal(exp[1], got[1])
Example #4
0
    def test_lightgbm_onnx_pytorch(self):
        warnings.filterwarnings("ignore")
        X = [[0, 1], [1, 1], [2, 0]]
        X = np.array(X, dtype=np.float32)
        y = np.array([100, -10, 50], dtype=np.float32)
        model = lgb.LGBMRegressor(n_estimators=3, min_child_samples=1)
        model.fit(X, y)

        # Create ONNX-ML model
        onnx_ml_model = convert_lightgbm(
            model,
            initial_types=[("input", FloatTensorType([X.shape[0],
                                                      X.shape[1]]))],
            target_opset=9)

        pt_model = convert(onnx_ml_model, "torch", X)
        assert pt_model

        # Get the predictions for the ONNX-ML model
        session = ort.InferenceSession(onnx_ml_model.SerializeToString())
        output_names = [
            session.get_outputs()[i].name
            for i in range(len(session.get_outputs()))
        ]
        onnx_ml_pred = [[] for i in range(len(output_names))]
        inputs = {session.get_inputs()[0].name: X}
        onnx_ml_pred = session.run(output_names, inputs)

        np.testing.assert_allclose(onnx_ml_pred[0].flatten(),
                                   pt_model.predict(X))
Example #5
0
def convert_model(model, name, input_types):
    """
    Runs the appropriate conversion method.
    
    :param model: model, *scikit-learn*, *keras*, or *coremltools* object
    :return: *onnx* model
    """
    from sklearn.base import BaseEstimator
    if model.__class__.__name__.startswith("LGBM"):
        from onnxmltools.convert import convert_lightgbm
        model, prefix = convert_lightgbm(model, name, input_types), "LightGbm"
    elif isinstance(model, BaseEstimator):
        from onnxmltools.convert import convert_sklearn
        model, prefix = convert_sklearn(model, name, input_types), "Sklearn"
    else:
        from keras.models import Model
        if isinstance(model, Model):
            from onnxmltools.convert import convert_keras
            model, prefix = convert_keras(model, name, input_types), "Keras"
        else:
            from onnxmltools.convert import convert_coreml
            model, prefix = convert_coreml(model, name, input_types), "Cml"
    if model is None:
        raise RuntimeError("Unable to convert model of type '{0}'.".format(type(model)))
    return model, prefix
    def test_lightgbm_pytorch_extra_config(self):
        warnings.filterwarnings("ignore")
        X = [[0, 1], [1, 1], [2, 0]]
        X = np.array(X, dtype=np.float32)
        y = np.array([100, -10, 50], dtype=np.float32)
        model = lgb.LGBMRegressor(n_estimators=3, min_child_samples=1)
        model.fit(X, y)

        # Create ONNX-ML model
        onnx_ml_model = convert_lightgbm(
            model,
            initial_types=[("input", FloatTensorType([X.shape[0],
                                                      X.shape[1]]))],
            target_opset=9)

        # Create ONNX model
        model_name = "hummingbird.ml.test.lightgbm"
        extra_config = {}
        extra_config[constants.ONNX_OUTPUT_MODEL_NAME] = model_name
        extra_config[constants.ONNX_INITIAL_TYPES] = [
            ("input", FloatTensorType([X.shape[0], X.shape[1]]))
        ]
        onnx_model = convert(onnx_ml_model, "onnx", extra_config=extra_config)

        assert onnx_model.model.graph.name == model_name
    def test_lightgbm_regressor(self):
        try:
            from onnxmltools import __version__
        except ImportError:
            return
        if compare_module_version(__version__, '1.11') <= 0:
            return
        from lightgbm import LGBMRegressor
        try:
            from onnxmltools.convert import convert_lightgbm
        except ImportError:
            convert_lightgbm = None
        X, y = self.data_X, self.data_y

        for ne in [1, 2, 10, 50, 100, 200]:
            for mx in [1, 10]:
                if __name__ != "__main__" and mx > 5:
                    break
                model = LGBMRegressor(max_depth=mx,
                                      n_estimators=ne,
                                      min_child_samples=1,
                                      learning_rate=0.0000001)
                model.fit(X, y)
                expected = model.predict(X)

                model_onnx = to_onnx(model, X)
                if convert_lightgbm is not None:
                    try:
                        model_onnx2 = convert_lightgbm(
                            model,
                            initial_types=[('X',
                                            FloatTensorType([None,
                                                             X.shape[1]]))])
                    except RuntimeError as e:
                        if "is higher than the number of the installed" in str(
                                e):
                            model_onnx2 = None
                        else:
                            raise e
                else:
                    model_onnx2 = None

                for i, mo in enumerate([model_onnx, model_onnx2]):
                    if mo is None:
                        continue
                    for rt in ['python', 'onnxruntime1']:
                        with self.subTest(i=i, rt=rt, max_depth=mx, n_est=ne):
                            oinf = OnnxInference(mo, runtime=rt)
                            got = oinf.run({'X': X})['variable']
                            diff = numpy.abs(got.ravel() -
                                             expected.ravel()).max()
                            if __name__ == "__main__":
                                print("lgb1 mx=%d ne=%d" % (mx, ne),
                                      "mlprod" if i == 0 else "mltool", rt[:6],
                                      diff)
                            self.assertLess(diff, 1e-3)
Example #8
0
    def test_lightgbm_pytorch(self):
        warnings.filterwarnings("ignore")
        X = [[0, 1], [1, 1], [2, 0]]
        X = np.array(X, dtype=np.float32)
        y = np.array([100, -10, 50], dtype=np.float32)
        model = lgb.LGBMRegressor(n_estimators=3, min_child_samples=1)
        model.fit(X, y)

        # Create ONNX-ML model
        onnx_ml_model = convert_lightgbm(
            model, initial_types=[("input", FloatTensorType([X.shape[0], X.shape[1]]))], target_opset=9
        )

        self.assertRaises(RuntimeError, convert, onnx_ml_model, "torch")
Example #9
0
ONNX_TRANS_PATH = env.str('TRANS_PATH', './outputs/trans.onnx')
ONNX_MODEl_PATH_XGB = env.str('ONNX_MODEl_PATH_XGB', './outputs/xgb.onnx')
ONNX_MODEl_PATH_LGB = env.str('ONNX_MODEl_PATH_LGB', './outputs/lgb.onnx')
ONNX_MODEl_PATH_DCN = env.str('ONNX_MODEl_PATH_DCN', './outputs/dcn.onnx')

trans_initial_type = [('num_feat', FloatTensorType([None, 13])),
                      ('cat_feat', StringTensorType([None, 26]))]
model_initial_type = [('num_feat', FloatTensorType([None, 39]))]

print('convert sklearn transformer')
trans = joblib.load(TRANS_PATH)
onx = convert_sklearn(trans, initial_types=trans_initial_type)
onnx.save(onx, ONNX_TRANS_PATH)

print('convert XGBoost model')
model = xgb.XGBClassifier()
model.load_model(MODEL_PATH_XGB)
onx = convert_xgboost(model, initial_types=model_initial_type)
onnx.save(onx, ONNX_MODEl_PATH_XGB)

print('convert LightGBM model')
model = lgb.Booster(model_file=MODEL_PATH_LGB)
onx = convert_lightgbm(model, initial_types=model_initial_type)
onnx.save(onx, ONNX_MODEl_PATH_LGB)

print('convert DCN model')
graph_def, inputs, outputs = from_saved_model(MODEL_PATH_DCN, None, None)
tf.compat.v1.disable_eager_execution()
onx = convert_tensorflow(graph_def, input_names=inputs, output_names=outputs)
onnx.save(onx, ONNX_MODEl_PATH_DCN)
from onnxconverter_common.data_types import FloatTensorType
from onnxmltools.convert import convert_lightgbm

iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = LGBMClassifier()
clr.fit(X_train, y_train)
print(clr)

###########################
# Convert a model into ONNX
# +++++++++++++++++++++++++

initial_type = [('float_input', FloatTensorType([None, 4]))]
onx = convert_lightgbm(clr, initial_types=initial_type)

###################################
# Compute the predictions with onnxruntime
# ++++++++++++++++++++++++++++++++++++++++

sess = rt.InferenceSession(onx.SerializeToString())
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
pred_onx = sess.run([label_name],
                    {input_name: X_test.astype(numpy.float32)})[0]
print(pred_onx)

###############################################
# With Dataset
# ++++++++++++
Example #11
0
# In[2]:


# Create and train a model (LightGBM in this case).
model = lgb.LGBMClassifier()
model.fit(X, y)


# In[3]:


# Use ONNXMLTOOLS to convert the model to ONNXML.
initial_types = [("input", FloatTensorType([X.shape[0], X.shape[1]]))] # Define the inputs for the ONNX
onnx_ml_model = convert_lightgbm(
    model, initial_types=initial_types, target_opset=9
)


# In[4]:


# Use Hummingbird to convert the ONNXML model to ONNX.
onnx_model = convert(onnx_ml_model, "onnx", X)


# In[5]:


# Alternatively we can set the inital types using the extra_config parameters as in the ONNXMLTOOL converter.
extra_config = {}