Пример #1
0
 def test_onnx_simple_text_plot_toy(self):
     x = numpy.random.randn(10, 3).astype(numpy.float32)
     node1 = OnnxAdd('X', x, op_version=15)
     node2 = OnnxSub('X', x, op_version=15)
     node3 = OnnxAbs(node1, op_version=15)
     node4 = OnnxAbs(node2, op_version=15)
     node5 = OnnxDiv(node3, node4, op_version=15)
     node6 = OnnxAbs(node5, output_names=['Y'], op_version=15)
     onx = node6.to_onnx({'X': x.astype(numpy.float32)},
                         outputs={'Y': x},
                         target_opset=15)
     text = onnx_simple_text_plot(onx, verbose=False)
     expected = textwrap.dedent("""
     Add(X, Ad_Addcst) -> Ad_C0
       Abs(Ad_C0) -> Ab_Y0
     Identity(Ad_Addcst) -> Su_Subcst
       Sub(X, Su_Subcst) -> Su_C0
         Abs(Su_C0) -> Ab_Y02
         Div(Ab_Y0, Ab_Y02) -> Di_C0
           Abs(Di_C0) -> Y
     """).strip(" \n")
     self.assertIn(expected, text)
     text2, out, err = self.capture(
         lambda: onnx_simple_text_plot(onx, verbose=True))
     self.assertEqual(text, text2)
     self.assertIn('BEST:', out)
     self.assertEmpty(err)
Пример #2
0
    def test_algebra_abs(self):

        op = OnnxAbs('I0', op_version=TARGET_OPSET)
        onx = op.to_onnx({'I0': numpy.empty((1, 2), dtype=numpy.float32)})
        assert onx is not None

        import onnxruntime as ort
        try:
            sess = ort.InferenceSession(onx.SerializeToString())
        except RuntimeError as e:
            raise RuntimeError("Unable to read\n{}".format(onx)) from e
        X = numpy.array([[0, 1], [-1, -2]])
        try:
            Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0]
        except RuntimeError as e:
            raise RuntimeError("Unable to run\n{}".format(onx)) from e
        assert_almost_equal(Y, numpy.abs(X))
Пример #3
0
def _onnx_grad_loss_absolute_error(target_opset=None,
                                   dtype=numpy.float32,
                                   weight_name=None):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert` or
    :math:`Y = f(X1, X2) = \\lVert (X1 - X2)w \\rVert` if
    *weight_name* is not None and its gradient.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_absolute_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)
    if weight_name is None:
        res = OnnxReduceSum(abs_diff, op_version=target_opset)
        res2 = OnnxSign(diff, op_version=target_opset, output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        mul = OnnxMul(abs_diff, resh, op_version=target_opset)
        res = OnnxReduceSum(mul, op_version=target_opset)
        res2 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
Пример #4
0
def _onnx_n_penalty_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01,
                                  n_tensors=1,
                                  loss_shape=(1, 1)):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = \\beta \\lVert W \\rVert +
    \\alpha \\lVert W \\rVert^2`
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.
    It does that for *n_tensors* and adds all of the results
    to an input loss.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph(
            'n_penalty_elastic_error', n_tensors=2)
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd,
                                           OnnxReduceSumSquare, OnnxReduceSum,
                                           OnnxAbs, OnnxReshape)

    if n_tensors <= 0:
        raise ValueError(  # pragma: no cover
            "This function is useless if the number of tensors is null.")

    var_type = dtype_to_var_type(dtype)
    varsx = [('loss', var_type(loss_shape))]
    names = ['loss']
    for n in range(n_tensors):
        name = 'W%d' % n
        abs_diff = OnnxAbs(name, op_version=target_opset)
        res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset)
        # res2_l1 = OnnxSign(diff, op_version=target_opset)
        res_l2 = OnnxReduceSumSquare(name, op_version=target_opset)
        # res2_l2 = diff
        res = OnnxAdd(OnnxMul(res_l1,
                              numpy.array([l1_weight], dtype=dtype),
                              op_version=target_opset),
                      OnnxMul(res_l2,
                              numpy.array([l2_weight], dtype=dtype),
                              op_version=target_opset),
                      op_version=target_opset)
        names.append(res)
        varsx.append(('W%d' % n, var_type()))

    if len(names) == 2:
        res = OnnxAdd(*names, op_version=target_opset)
    else:
        res = OnnxAdd(names[1], names[2], op_version=target_opset)
        for i in range(3, len(names)):
            res = OnnxAdd(res, names[i], op_version=target_opset)
        res = OnnxAdd(names[0], res, op_version=target_opset)

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type([None]))],
                      target_opset=target_opset)
    return onx
Пример #5
0
def _onnx_grad_penalty_elastic_error(target_opset=None,
                                     dtype=numpy.float32,
                                     l1_weight=0.01,
                                     l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = \\beta \\lVert W \\rVert +
    \\alpha \\lVert W \\rVert^2`
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_penalty_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd,
                                           OnnxReduceSumSquare, OnnxReduceSum,
                                           OnnxSign, OnnxAbs, OnnxReshape)
    diff = 'X'
    abs_diff = OnnxAbs(diff, op_version=target_opset)
    res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset)
    res2_l1 = OnnxSign(diff, op_version=target_opset)
    res_l2 = OnnxReduceSumSquare(diff, op_version=target_opset)
    res2_l2 = diff

    res = OnnxAdd(OnnxMul(res_l1,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset),
                  OnnxMul(res_l2,
                          numpy.array([l2_weight], dtype=dtype),
                          op_version=target_opset),
                  op_version=target_opset)
    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    res2 = OnnxAdd(OnnxMul(res2_l1,
                           numpy.array([l1_weight], dtype=dtype),
                           op_version=target_opset),
                   OnnxMul(res2_l2,
                           numpy.array([l2_weight * (2)], dtype=dtype),
                           op_version=target_opset),
                   op_version=target_opset,
                   output_names=['Y_grad'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X', var_type([None, None]))]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type([None])),
                               ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    return onx
Пример #6
0
def _onnx_grad_loss_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\beta \\lVert X1 - X2 \\rVert +
    \\alpha \\lVert X1 - X2 \\rVert^2` or
    :math:`Y = f(X1, X2) = \\beta \\lVert w(X1 - X2) \\rVert +
    \\alpha \\lVert (\\sqrt{w})(X1 - X2) \\rVert^2` if
    *weight_name* is not None and its gradient.
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxAdd,
                                           OnnxIdentity, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)

    # loss
    abs_diff_l1 = OnnxMul(abs_diff,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset)
    diff_l2 = OnnxMul(OnnxMul(diff, diff, op_version=target_opset),
                      numpy.array([l2_weight], dtype=dtype),
                      op_version=target_opset)
    score = OnnxAdd(abs_diff_l1, diff_l2, op_version=target_opset)

    # gradient
    grad_l1 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                      numpy.array([l1_weight], dtype=dtype),
                      op_version=target_opset)
    grad_l2 = OnnxMul(diff,
                      numpy.array([l2_weight * -2], dtype=dtype),
                      op_version=target_opset)
    grad = OnnxAdd(grad_l1, grad_l2, op_version=target_opset)

    if weight_name is None:
        res = OnnxReduceSum(score, op_version=target_opset)
        res2 = OnnxIdentity(grad,
                            op_version=target_opset,
                            output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        res = OnnxReduceSum(OnnxMul(score, resh, op_version=target_opset),
                            op_version=target_opset)
        res2 = OnnxMul(grad,
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
Пример #7
0
 def onnx_abs_shape(x: NDArray[(Any, Any), numpy.float32],
                    op_version=None) -> NDArray[(Any, Any), numpy.float32]:
     return OnnxAbs(x, op_version=op_version)
Пример #8
0
 def onnx_abs(x: NDArray[Any, numpy.float32],
              op_version=None) -> NDArray[Any, numpy.float32]:
     return OnnxAbs(x, op_version=op_version)