Esempio n. 1
0
    def test_add_loss_output_cls(self):
        from onnxcustom.utils.orttraining_helper import add_loss_output
        X, y = make_classification(  # pylint: disable=W0632
            100, n_features=10)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        reg = LogisticRegression()
        reg.fit(X_train, y_train)
        reg.coef_ = reg.coef_.reshape((1, -1))
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearClassifier'},
                      options={'zipmap': False})
        onx_loss = add_loss_output(onx,
                                   'log',
                                   output_index='probabilities',
                                   eps=1 - 6)
        try:
            text = onnx_simple_text_plot(onx_loss)
        except RuntimeError:
            text = ""
        if text:
            self.assertIn("Clip(probabilities", text)

        oinf = OnnxInference(onx_loss)
        output = oinf.run({'X': X_test, 'label': y_test.reshape((-1, 1))})
        loss = output['loss']
        skl_loss = log_loss(y_test, reg.predict_proba(X_test), eps=1 - 6)
        self.assertLess(numpy.abs(skl_loss - loss[0, 0]), 1e-5)
def print_graph(d):
    for k, v in sorted(d.items()):
        if isinstance(v, dict):
            print_graph(v)
        else:
            print("\n++++++", v.replace("\\", "/"), "\n")
            with open(v, "rb") as f:
                print(onnx_simple_text_plot(onnx.load(f)))
Esempio n. 3
0
def plot_onnx(filename, format="onnx", verbose=0, output=None, fLOG=print):
    """
    Plots an ONNX graph on the standard output.

    :param filename: onnx file
    :param format: format to export too (`simple`, `tree`, `dot`,
        `io`, `mat`, `raw`)
    :param output: output file to produce or None to print it on stdout
    :param verbose: verbosity level
    :param fLOG: logging function

    .. cmdref::
        :title: Plots an ONNX graph as text
        :cmd: -m mlprodict plot_onnx --help
        :lid: l-cmd-plot_onnx

        The command shows the ONNX graphs as a text on the standard output.

        Example::

            python -m mlprodict plot_onnx --filename="something.onnx" --format=simple
    """
    if isinstance(filename, str):
        from onnx import load
        content = load(filename)
    else:
        content = filename
    if format == 'dot':
        from ..onnxrt import OnnxInference
        code = OnnxInference(filename).to_dot()
    elif format == 'simple':
        from mlprodict.plotting.text_plot import onnx_simple_text_plot
        code = onnx_simple_text_plot(content)
    elif format == 'io':
        from mlprodict.plotting.text_plot import onnx_text_plot_io
        code = onnx_text_plot_io(content)
    elif format == 'mat':
        from mlprodict.plotting.text_plot import onnx_text_plot
        code = onnx_text_plot(content)
    elif format == 'raw':
        code = str(content)
    elif format == 'tree':
        from mlprodict.plotting.plotting import onnx_text_plot_tree
        rows = []
        for node in content.graph.node:
            if node.op_type.startswith("TreeEnsemble"):
                rows.append('Node type=%r name=%r' % (node.op_type, node.name))
                rows.append(onnx_text_plot_tree(node))
        code = "\n".join(rows)
    else:
        raise ValueError(  # pragma: no cover
            "Unknown format %r." % format)

    if output not in ('', None):
        with open(output, "w", encoding="utf-8") as f:
            f.write(code)
    else:
        fLOG(code)
Esempio n. 4
0
    def wtest_ort_gradient_optimizers_fw_sgd_binary(self, use_weight):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (LearningRateSGD)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X = numpy.arange(60).astype(numpy.float32).reshape((-1, 3))
        y = numpy.arange(X.shape[0]).astype(numpy.float32).reshape(
            (-1, 1)) > 10
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        y[0, 0] = 0
        y[-1, 0] = 1
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDClassifier(loss='log')
        if use_weight:
            reg.fit(X_train,
                    y_train.ravel(),
                    sample_weight=w_train.astype(numpy.float64))
        else:
            reg.fit(X_train, y_train.ravel())
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'},
                      options={
                          'zipmap': False,
                          'raw_scores': True
                      })
        onx = select_model_inputs_outputs(onx, outputs=['score'])
        self.assertIn("output: name='score'", onnx_simple_text_plot(onx))
        inits = ['coef', 'intercept']

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGD(1e10),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10,
            enable_logging=False)
        self.assertIsInstance(train_session.learning_loss, NegLogLearningLoss)
        self.assertEqual(train_session.learning_loss.eps, 1e-5)
        y_train = y_train.reshape((-1, 1))
        if use_weight:
            train_session.fit(X_train, y_train, w_train.reshape((-1, 1)))
        else:
            train_session.fit(X_train, y_train)
        losses = train_session.train_losses_
        self.assertGreater(len(losses), 1)
        if any(map(numpy.isnan, losses)):
            raise AssertionError(losses)
Esempio n. 5
0
    def wtest_ort_gradient_optimizers_fw_nesterov_binary_mlp(
            self, use_weight=True):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (
            LearningRateSGDNesterov)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X, y = make_classification(  # pylint: disable=W0632
            100, n_features=10, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = MLPClassifier(solver='sgd')
        reg.fit(X_train, y_train)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'},
                      options={'zipmap': False})
        onx = select_model_inputs_outputs(onx,
                                          outputs=['out_activations_result'])
        self.assertIn("output: name='out_activations_result'",
                      onnx_simple_text_plot(onx))
        set_model_props(onx, {'info': 'unit test'})
        onx = onnx_rename_weights(onx)
        inits = [
            'I0_coefficient', 'I1_intercepts', 'I2_coefficient1',
            'I3_intercepts1'
        ]

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGDNesterov(1e-4,
                                                  nesterov=False,
                                                  momentum=0.9),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10)
        self.assertIsInstance(train_session.learning_loss, NegLogLearningLoss)
        self.assertEqual(train_session.learning_loss.eps, 1e-5)
        if use_weight:
            train_session.fit(X_train, y_train, w_train)
        else:
            train_session.fit(X_train, y_train)
        temp = get_temp_folder(
            __file__, "temp_ort_gradient_optimizers_fw_nesterov_binary_mlp%d" %
            use_weight)
        train_session.save_onnx_graph(temp)
Esempio n. 6
0
 def test_grad_helper_loss(self):
     temp = get_temp_folder(__file__, "temp_grad_helper_loss")
     grad_file = os.path.join(temp, "grad.onnx")
     X, y = make_regression(  # pylint: disable=W0632
         100,
         n_features=10,
         bias=2,
         random_state=0)
     X = X.astype(numpy.float32)
     y = y.astype(numpy.float32)
     reg = LinearRegression()
     reg.fit(X, y)
     reg.coef_ = reg.coef_.reshape((1, -1))
     onx = to_onnx(reg, X, target_opset=opset, black_op={'LinearRegressor'})
     onx_loss = add_loss_output(onx)
     text1 = onnx_simple_text_plot(onx_loss)
     new_onx = onnx_derivative(onx,
                               options=DerivativeOptions.Loss,
                               label='variable',
                               loss='loss',
                               path_name=grad_file)
     text2 = onnx_simple_text_plot(new_onx)
     self.assertNotEqual(text1, text2)
Esempio n. 7
0
 def check_infer_shapes(self, onx, out, rt):
     onnx_shapes = infer_shapes(onx)
     inferred = onnx_shapes.graph.value_info  # pylint: disable=
     for data in inferred:
         if data.name not in out:
             raise AssertionError("Name %r not found." % data.name)
         shape, dtype, sparse = OnnxShapeInference._get_shape(data)  # pylint: disable=W0212
         for i in range(len(shape)):
             if not isinstance(shape[i], str):
                 continue
             if shape[i].startswith('unk_'):
                 shape[i] = shape[i][4:]
         res = ShapeResult(data.name, shape, dtype, sparse)
         if res != out[data.name]:
             raise AssertionError(
                 "Unexpected differences for name %r:\nexp: %r\ngot: %r"
                 "\n-----\n%s" % (data.name, res, out[data.name],
                                  onnx_simple_text_plot(onx)))
Esempio n. 8
0
    def test_gradient_mlpclassifier(self):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X = numpy.arange(30).reshape((-1, 3)).astype(numpy.float32) / 100
        y = numpy.arange(X.shape[0]).astype(numpy.float32)
        y = (y.reshape((-1, 1)) >= 15).astype(numpy.int64)
        reg = MLPClassifier(hidden_layer_sizes=(5,), max_iter=2,
                            activation='logistic',
                            momentum=0, nesterovs_momentum=False,
                            alpha=0)
        reg.fit(X, y.ravel())
        onx = to_onnx(reg, X, target_opset=opset,
                      options={'zipmap': False})
        onx = select_model_inputs_outputs(onx, outputs=['add_result1'],
                                          infer_shapes=True)
        text = onnx_simple_text_plot(onx)
        self.assertIn("output: name='add_result1'", text)

        onx = onnx_rename_weights(onx)
        inits = ["I0_coefficient", 'I1_intercepts', 'I2_coefficient1',
                 'I3_intercepts1']

        xp = numpy.arange(2 * X.shape[1]).reshape((2, -1)).astype(
            numpy.float32) / 100
        xp[0, 0] -= 4
        xp[1, :] += 4
        yp = numpy.array([0, 1], dtype=numpy.int64).reshape((-1, 1))

        train_session = OrtGradientForwardBackwardOptimizer(
            onx, inits, learning_rate=1e-5,
            warm_start=True, max_iter=2, batch_size=10,
            learning_loss=NegLogLearningLoss())
        train_session.fit(X, y)
        state = train_session.get_state()
        state_np = [st.numpy() for st in state]

        # gradient scikit-learn

        coef_grads = state_np[::2]
        intercept_grads = state_np[1::2]
        layer_units = [3, 5, 1]
        activations = [xp] + [None] * (len(layer_units) - 1)
        deltas = [None] * (len(activations) - 1)

        skl_pred = reg.predict_proba(xp)

        batch_loss, coef_grads, intercept_grads = reg._backprop(  # pylint: disable=W0212
            xp, yp, activations, deltas,
            coef_grads, intercept_grads)
        deltas = activations[-1] - yp

        # gradient onnxcustom

        ort_xp = C_OrtValue.ortvalue_from_numpy(xp, train_session.device)
        ort_yp = C_OrtValue.ortvalue_from_numpy(yp, train_session.device)
        ort_state = [ort_xp] + state
        prediction = train_session.train_function_.forward(
            ort_state, training=True)

        ort_pred = prediction[0].numpy()
        self.assertEqualArray(skl_pred[:, 1:2], expit(ort_pred), decimal=2)

        loss, loss_gradient = train_session.learning_loss.loss_gradient(
            train_session.device, ort_yp, prediction[0])

        gradient = train_session.train_function_.backward([loss_gradient])

        # comparison

        self.assertEqualArray(
            batch_loss * 2, loss.numpy(), decimal=3)
        self.assertEqualArray(deltas, loss_gradient.numpy(), decimal=3)

        # do not use iterator for gradient, it may crash
        ort_grad = [gradient[i].numpy() / xp.shape[0]
                    for i in range(len(gradient))][1:]
        self.assertEqualArray(
            intercept_grads[1], ort_grad[3].ravel(), decimal=2)
        self.assertEqualArray(coef_grads[1], ort_grad[2], decimal=2)
        self.assertEqualArray(
            intercept_grads[0], ort_grad[1].ravel(), decimal=2)
        self.assertEqualArray(coef_grads[0], ort_grad[0], decimal=2)
def build_model(n_nodes, size, opv=15):
    OnnxAdd, OnnxIdentity = loadop('Add', 'Identity')
    x = 'X'
    for n in range(n_nodes):
        y = OnnxAdd(x,
                    numpy.random.randn(size).astype(numpy.float32),
                    op_version=opv)
        x = y
    final = OnnxIdentity(x, op_version=opv, output_names=['Y'])
    x = numpy.zeros((10, 10), dtype=numpy.float32)
    return final.to_onnx({'X': x}, {'Y': x}, target_opset=opv)


model = build_model(2, 5)
print(onnx_simple_text_plot(model))

##########################################
# Measure the time of serialization functions
# +++++++++++++++++++++++++++++++++++++++++++


def parse(buffer):
    proto = onnx.ModelProto()
    proto.ParseFromString(buffer)
    return proto


data = []
nodes = [5, 10, 20]
for size in tqdm([10, 100, 1000, 10000, 100000, 200000, 300000]):
Esempio n. 10
0
"""
import numpy
from scipy.special import expit
import pandas
from tqdm import tqdm
from cpyquickhelper.numbers.speed_measure import measure_time
import matplotlib.pyplot as plt
from onnxruntime import InferenceSession
from onnxruntime.capi._pybind_state import (  # pylint: disable=E0611
    SessionIOBinding, OrtDevice as C_OrtDevice, OrtValue as C_OrtValue)
from mlprodict.plotting.text_plot import onnx_simple_text_plot
from onnxcustom.utils.onnx_function import function_onnx_graph

fct_onx = function_onnx_graph("axpy")
print(onnx_simple_text_plot(fct_onx))

###########################################
# The numpy implementation is the following.

fct_numpy = lambda X1, X2, alpha: X1 * alpha + X2

###########################################
# The benchmark


def reshape(a, dim):
    if len(a.shape) == 2:
        return a[:dim].copy()
    return a
Esempio n. 11
0
    def wtest_ort_gradient_optimizers_fw_nesterov_binary(self, use_weight):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (
            LearningRateSGDNesterov)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X, y = make_classification(  # pylint: disable=W0632
            100, n_features=10, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDClassifier(loss='log')
        if use_weight:
            reg.fit(X_train,
                    y_train,
                    sample_weight=w_train.astype(numpy.float64))
        else:
            reg.fit(X_train, y_train)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'},
                      options={
                          'zipmap': False,
                          'raw_scores': True
                      })
        onx2 = onx
        onx = select_model_inputs_outputs(onx, outputs=['score'])
        self.assertIn("output: name='score'", onnx_simple_text_plot(onx))
        set_model_props(onx, {'info': 'unit test'})
        inits = ['coef', 'intercept']

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGDNesterov(1e-4,
                                                  nesterov=False,
                                                  momentum=0.9),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10)
        self.assertIsInstance(train_session.learning_loss, NegLogLearningLoss)
        self.assertEqual(train_session.learning_loss.eps, 1e-5)
        y_train = y_train.reshape((-1, 1))
        if use_weight:
            train_session.fit(X_train, y_train, w_train.reshape((-1, 1)))
        else:
            train_session.fit(X_train, y_train)
        temp = get_temp_folder(
            __file__, "temp_ort_gradient_optimizers_fw_nesterov_binary")
        train_session.save_onnx_graph(temp)

        # get_trained_weight
        trained_onnx = train_session.get_trained_onnx(model=onx2)
        sess = InferenceSession(onx2.SerializeToString(),
                                providers=['CPUExecutionProvider'])
        got1 = sess.run(None, {'X': X_train})
        sess = InferenceSession(trained_onnx.SerializeToString(),
                                providers=['CPUExecutionProvider'])
        got2 = sess.run(None, {'X': X_train})
        self.assertEqual(len(got1), len(got2))
        self.assertEqual(got1[0].shape, got2[0].shape)

        # state
        state = train_session.get_state()
        self.assertIsInstance(state, list)
        train_session.set_state(state)
        for k in range(len(state)):  # pylint: disable=C0200
            state[k] = state[k].numpy()
        train_session.set_state(state)