Example #1
0
def _onnx_grad_loss_absolute_error(target_opset=None,
                                   dtype=numpy.float32,
                                   weight_name=None):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert` or
    :math:`Y = f(X1, X2) = \\lVert (X1 - X2)w \\rVert` if
    *weight_name* is not None and its gradient.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_absolute_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)
    if weight_name is None:
        res = OnnxReduceSum(abs_diff, op_version=target_opset)
        res2 = OnnxSign(diff, op_version=target_opset, output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        mul = OnnxMul(abs_diff, resh, op_version=target_opset)
        res = OnnxReduceSum(mul, op_version=target_opset)
        res2 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
    def test_onnxt_runtime_reduce_sum(self):
        X = numpy.array([[2, 1], [0, 1]], dtype=float)

        onx = OnnxReduceSum('X', output_names=['Y'], keepdims=0)
        model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y'])
        self.assertEqualArray(numpy.sum(X), got['Y'], decimal=6)

        onx = OnnxReduceSum('X', output_names=['Y'], axes=1)
        model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y'])
        self.assertEqualArray(numpy.sum(X, axis=1).ravel(), got['Y'].ravel())

        onx = OnnxReduceSum('X', output_names=['Y'], axes=1, keepdims=1)
        model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y'])
        self.assertEqualArray(
            numpy.sum(X, axis=1, keepdims=1).ravel(), got['Y'].ravel())
 def nmf_to_onnx(W, H):
     """
     The function converts a NMF described by matrices
     *W*, *H* (*WH* approximate training data *M*).
     into a function which takes two indices *(i, j)*
     and returns the predictions for it. It assumes
     these indices applies on the training data.
     """
     col = OnnxArrayFeatureExtractor(H, 'col')
     row = OnnxArrayFeatureExtractor(W.T, 'row')
     dot = OnnxMul(col, row)
     res = OnnxReduceSum(dot, output_names="rec")
     indices_type = np.array([0], dtype=np.int64)
     onx = res.to_onnx(inputs={'col': indices_type,
                               'row': indices_type},
                       outputs=[('rec', FloatTensorType((None, 1)))])
     return onx
Example #4
0
def _onnx_grad_loss_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\beta \\lVert X1 - X2 \\rVert +
    \\alpha \\lVert X1 - X2 \\rVert^2` or
    :math:`Y = f(X1, X2) = \\beta \\lVert w(X1 - X2) \\rVert +
    \\alpha \\lVert (\\sqrt{w})(X1 - X2) \\rVert^2` if
    *weight_name* is not None and its gradient.
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxAdd,
                                           OnnxIdentity, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)

    # loss
    abs_diff_l1 = OnnxMul(abs_diff,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset)
    diff_l2 = OnnxMul(OnnxMul(diff, diff, op_version=target_opset),
                      numpy.array([l2_weight], dtype=dtype),
                      op_version=target_opset)
    score = OnnxAdd(abs_diff_l1, diff_l2, op_version=target_opset)

    # gradient
    grad_l1 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                      numpy.array([l1_weight], dtype=dtype),
                      op_version=target_opset)
    grad_l2 = OnnxMul(diff,
                      numpy.array([l2_weight * -2], dtype=dtype),
                      op_version=target_opset)
    grad = OnnxAdd(grad_l1, grad_l2, op_version=target_opset)

    if weight_name is None:
        res = OnnxReduceSum(score, op_version=target_opset)
        res2 = OnnxIdentity(grad,
                            op_version=target_opset,
                            output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        res = OnnxReduceSum(OnnxMul(score, resh, op_version=target_opset),
                            op_version=target_opset)
        res2 = OnnxMul(grad,
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx