예제 #1
0
def _onnx_zero(target_opset=None, dtype=numpy.float32):
    """
    Returns the ONNX graph for function
    :math:`Y = X * 0`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('zero')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import OnnxMul
    res = OnnxMul('X',
                  numpy.array([0], dtype=dtype),
                  op_version=target_opset,
                  output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X', var_type())]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type())],
                      target_opset=target_opset)
    return onx
예제 #2
0
def _onnx_axpyw2(target_opset=None, dtype=numpy.float32):
    """
    Returns the ONNX graph for function
    :math:`Y, Z = f(X1, X2, G, \\alpha, \\beta) = (Y, Z)`
    where :math:`Z = \\beta G + \\alpha X1` and
    :math:`Y = \\beta * Z + \\alpha X1 + X2`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('axpy')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul
    s1 = OnnxMul('X1', 'alpha', op_version=target_opset)
    s2 = OnnxMul('G', 'beta', op_version=target_opset)
    Z = OnnxAdd(s1, s2, op_version=target_opset, output_names=['Z'])
    s2_2 = OnnxMul(Z, 'beta', op_version=target_opset)
    s2_3 = OnnxAdd(s1, s2_2, op_version=target_opset)
    Y = OnnxAdd(s2_3, 'X2', op_version=target_opset, output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type()), ('X2', var_type()), ('G', var_type()),
             ('alpha', var_type([1])), ('beta', var_type([1]))]
    onx = Y.to_onnx(varsx,
                    outputs=[('Y', var_type()), ('Z', var_type())],
                    target_opset=target_opset,
                    other_outputs=[Z])
    return onx
예제 #3
0
 def test_grad_helper_mul(self):
     opv = opset
     xi = OnnxIdentity('X', op_version=opv)
     node = OnnxMul(xi, xi, op_version=opv, output_names=['Y'])
     onx = node.to_onnx({'X': FloatTensorType([None, 10])},
                        {'Y': FloatTensorType([None, 10])},
                        target_opset=opv)
     new_onx = onnx_derivative(onx)
     self.check_runtime(new_onx, 'test_grad_helper_mul')
예제 #4
0
def pyod_iforest_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # In most case, computation happen in floats.
    # But it might be with double. ONNX is very strict
    # about types, every constant should have the same
    # type as the input.
    dtype = guess_numpy_type(X.type)

    detector = op.detector_  # Should be IForest from scikit-learn.
    lab_pred = OnnxSubEstimator(detector, X, op_version=opv)
    scores = OnnxIdentity(lab_pred[1], op_version=opv)

    # labels
    threshold = op.threshold_
    above = OnnxLess(scores,
                     np.array([threshold], dtype=dtype),
                     op_version=opv)
    labels = OnnxCast(above,
                      op_version=opv,
                      to=onnx_proto.TensorProto.INT64,
                      output_names=out[:1])

    # probabilities
    train_scores = op.decision_scores_
    scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))
    scores_ = OnnxMul(scores, np.array([-1], dtype=dtype), op_version=opv)
    print(scaler.min_)
    print(scaler.scale_)

    scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)
    scaled_centered = OnnxAdd(scaled,
                              scaler.min_.astype(dtype),
                              op_version=opv)
    clipped = OnnxClip(scaled_centered,
                       np.array([0], dtype=dtype),
                       np.array([1], dtype=dtype),
                       op_version=opv)
    clipped_ = OnnxAdd(OnnxMul(clipped,
                               np.array([-1], dtype=dtype),
                               op_version=opv),
                       np.array([1], dtype=dtype),
                       op_version=opv)

    scores_2d = OnnxConcat(clipped_,
                           clipped,
                           axis=1,
                           op_version=opv,
                           output_names=out[1:])

    labels.add_to(scope, container)
    scores_2d.add_to(scope, container)
예제 #5
0
def _onnx_grad_loss_absolute_error(target_opset=None,
                                   dtype=numpy.float32,
                                   weight_name=None):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert` or
    :math:`Y = f(X1, X2) = \\lVert (X1 - X2)w \\rVert` if
    *weight_name* is not None and its gradient.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_absolute_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)
    if weight_name is None:
        res = OnnxReduceSum(abs_diff, op_version=target_opset)
        res2 = OnnxSign(diff, op_version=target_opset, output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        mul = OnnxMul(abs_diff, resh, op_version=target_opset)
        res = OnnxReduceSum(mul, op_version=target_opset)
        res2 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
예제 #6
0
def build_leaky_relu_decomposed_greater(alpha=0.5, target_opset=15):
    signo = OnnxGreater('X',
                        numpy.array([0], dtype=numpy.float32),
                        op_version=target_opset)
    sign = OnnxCast(signo, to=TensorProto.FLOAT, op_version=target_opset)
    fact = OnnxAdd(OnnxMul(sign,
                           numpy.array([1 - alpha], dtype=numpy.float32),
                           op_version=target_opset),
                   numpy.array([alpha], dtype=numpy.float32),
                   op_version=target_opset)
    x = OnnxMul('X', fact, op_version=target_opset, output_names=['Y'])
    return x.to_onnx({'X': FloatTensorType()},
                     outputs={'Y': FloatTensorType()},
                     target_opset=target_opset)
예제 #7
0
def build_ort_where_add(op_version=12):
    node = OnnxSub(
        OnnxMul('x', 'cond', op_version=op_version),
        OnnxMul('y',
                OnnxSub('cond', numpy.array([1], dtype=numpy.float32),
                        op_version=op_version),
                op_version=op_version),
        op_version=op_version, output_names=['z'])
    onx = node.to_onnx(inputs=[('cond', FloatTensorType()),
                               ('x', FloatTensorType()),
                               ('y', FloatTensorType())],
                       target_opset=op_version)
    sess = InferenceSession(onx.SerializeToString())
    return lambda cond, x, y: sess.run(None, {'cond': cond, 'x': x, 'y': y})
    def test_onnxruntime_bug(self):
        rnd = numpy.random.randn(3, 20, 20).astype(numpy.float32)
        bni = (numpy.random.random((20, 20)).astype(  # pylint: disable=E1101
            numpy.float32) >= 0.7).astype(numpy.float32)
        mul = rnd * bni
        isn = any(numpy.isnan(mul.ravel()))
        self.assertFalse(isn)

        node = OnnxMul('X', bni, output_names=['Y4'],
                       op_version=TARGET_OPSET)
        onx = node.to_onnx({'X': rnd})
        for rt in ['python', 'onnxruntime1']:
            with self.subTest(runtime=rt):
                oinf = OnnxInference(onx, runtime=rt)
                y = oinf.run({'X': rnd})['Y4']
                self.assertEqualArray(mul, y)
예제 #9
0
def _onnx_axpy(target_opset=None, dtype=numpy.float32):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2, \\alpha) = \\alpha X1 + X2`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('axpy')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul
    res = OnnxAdd(OnnxMul('X1', 'alpha', op_version=target_opset),
                  'X2',
                  op_version=target_opset,
                  output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type()), ('X2', var_type()), ('alpha', var_type([1]))]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type())],
                      target_opset=target_opset)
    return onx
예제 #10
0
    def test_onnx_if_algebra_indirect_unnamed_clear_input(self):

        opv = TARGET_OPSET
        x1 = np.array([[0, 3], [7, 0]], dtype=np.float32)
        x2 = np.array([[1, 0], [2, 0]], dtype=np.float32)

        node_xy = OnnxMul('x1', 'x2', op_version=opv)
        node_then = OnnxAdd(
            'x1', 'xy', output_names=['absxythen'], op_version=opv)
        then_body = node_then.to_onnx(
            {'x1': x1, 'xy': x2}, target_opset=opv,
            outputs=[('absxythen', FloatTensorType())])
        node_else = OnnxSub(
            'x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node_else.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxyelse', FloatTensorType())])

        cond = OnnxGreater(
            OnnxReduceSum('x1', op_version=opv),
            OnnxReduceSum('x2', op_version=opv),
            op_version=opv)
        ifnode = OnnxIf(cond, then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv, output_names=['y'],
                        global_context={'xy': node_xy},
                        clear_subgraph_inputs=True)
        model_def = ifnode.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('y', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'x1': x1, 'x2': x2})
        assert_almost_equal(x1 + x1 * x2, res[0])
예제 #11
0
    def test_onnx_rename_names_type(self):
        rows = []

        def flog(*s):
            rows.append(" ".join(map(str, s)))

        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X',
                      numpy.array([1], dtype=dtype),
                      op_version=TARGET_OPSET)
        cop2 = OnnxAdd('X',
                       numpy.array([1], dtype=dtype),
                       op_version=TARGET_OPSET)
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=TARGET_OPSET,
                       output_names=['inter'])
        cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET),
                       cop2,
                       output_names=['final'],
                       op_version=TARGET_OPSET)
        model_def = cop4.to_onnx({'X': x})
        oinf1 = OnnxInference(model_def)
        new_model = onnx_rename_names(model_def,
                                      verbose=1,
                                      fLOG=flog,
                                      strategy='type')
        total = "\n".join(rows)
        self.assertIn("'Ad_Addcst' -> 'i_05'", total)
        oinf2 = OnnxInference(new_model)
        y1 = oinf1.run({'X': x})
        y2 = oinf2.run({'X': x})
        self.assertEqualArray(y1['final'], y2['final'])
예제 #12
0
def build_leaky_relu_decomposed(alpha=0.5, target_opset=15):
    signo = OnnxSign('X', op_version=target_opset)
    sign = OnnxDiv(OnnxAdd(signo,
                           numpy.array([1], dtype=numpy.float32),
                           op_version=target_opset),
                   numpy.array([2], dtype=numpy.float32),
                   op_version=target_opset)
    fact = OnnxAdd(OnnxMul(sign,
                           numpy.array([1 - alpha], dtype=numpy.float32),
                           op_version=target_opset),
                   numpy.array([alpha], dtype=numpy.float32),
                   op_version=target_opset)
    x = OnnxMul('X', fact, op_version=target_opset, output_names=['Y'])
    return x.to_onnx({'X': FloatTensorType()},
                     outputs={'Y': FloatTensorType()},
                     target_opset=target_opset)
예제 #13
0
def _onnx_update_penalty_elastic_error(target_opset=None,
                                       dtype=numpy.float32,
                                       l1=1e-4,
                                       l2=1e-4):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = W - 2 \\beta W - \\alpha sign(W)`
    *l1* is :math:`\\beta` and
    *l2* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph(
            'update_penalty_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSign)

    res = OnnxSub(OnnxMul('X',
                          numpy.array([1 - 2 * l2], dtype=dtype),
                          op_version=target_opset),
                  OnnxMul(OnnxSign('X', op_version=target_opset),
                          numpy.array([l1], dtype=dtype),
                          op_version=target_opset),
                  op_version=target_opset,
                  output_names=['Y'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X', var_type())]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type())],
                      target_opset=target_opset)
    return onx
예제 #14
0
def build_ort_op(op_version=14, save=None, slices=None):  # opset=13, 14, ...
    if slices is None:
        starts = numpy.array([1, 1], dtype=numpy.int64)
        ends = numpy.array([-1, -1], dtype=numpy.int64)
        axes = None
    else:
        starts, ends = slices
        if starts[0] is None:
            indexes = [i for i in range(len(starts)) if starts[i] is not None]
            starts = numpy.array([n for n in starts if n is not None],
                                 dtype=numpy.int64)
            ends = numpy.array([n for n in ends if n is not None],
                               dtype=numpy.int64)
            axes = numpy.array(indexes, dtype=numpy.int64)
        else:
            starts = numpy.array(starts, dtype=numpy.int64)
            ends = numpy.array(ends, dtype=numpy.int64)
            axes = None

    if axes is None:
        node1 = OnnxSlice('X', starts, ends, op_version=op_version)
    else:
        node1 = OnnxSlice('X', starts, ends, axes, op_version=op_version)
    node2 = OnnxAdd(node1,
                    numpy.array([1], dtype=numpy.float32),
                    op_version=op_version)
    if axes is None:
        node3 = OnnxSlice(node2, starts, ends, op_version=op_version)
    else:
        node3 = OnnxSlice(node2, starts, ends, axes, op_version=op_version)
    node4 = OnnxMul(node3,
                    numpy.array([2], dtype=numpy.float32),
                    op_version=op_version,
                    output_names=['Y'])
    onx = node4.to_onnx(inputs=[('X', FloatTensorType([None, None]))],
                        target_opset=op_version)
    return onx
예제 #15
0
    def test_onnx_remove_unused_outputs_new(self):
        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X',
                      numpy.array([1], dtype=dtype),
                      op_version=TARGET_OPSET)
        cop2 = OnnxAdd('X',
                       numpy.array([1], dtype=dtype),
                       op_version=TARGET_OPSET)
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=TARGET_OPSET,
                       output_names=['inter'])
        cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET),
                       cop2,
                       output_names=['final'],
                       op_version=TARGET_OPSET)
        model_def0 = cop4.to_onnx({'X': x})
        model_def = select_model_inputs_outputs(model_def0,
                                                "inter",
                                                infer_shapes=True,
                                                remove_unused=False)
        stats = onnx_statistics(model_def, optim=True)
        c1 = model_def.SerializeToString()
        new_model = select_model_inputs_outputs(model_def0,
                                                "inter",
                                                infer_shapes=True)
        c2 = model_def.SerializeToString()
        self.assertEqual(c1, c2)
        stats2 = onnx_statistics(model_def, optim=True)
        stats3 = onnx_statistics(new_model, optim=False)
        self.assertEqual(stats['ninits'], 2)
        self.assertEqual(stats2['ninits'], 2)
        self.assertEqual(stats3['ninits'], 1)
        self.assertEqual(stats2['nnodes'], 1)
        self.assertEqual(stats3['nnodes'], 1)
        oinf1 = OnnxInference(model_def)
        y1 = oinf1.run({'X': x})

        oinf2 = OnnxInference(new_model)
        y2 = oinf2.run({'X': x})
        self.assertNotIn('final', y1)
        self.assertNotIn('final', y2)
        self.assertIn('inter', y1)
        self.assertIn('inter', y2)
        self.assertEqualArray(y1['inter'], y2['inter'])
예제 #16
0
 def nmf_to_onnx(W, H):
     """
     The function converts a NMF described by matrices
     *W*, *H* (*WH* approximate training data *M*).
     into a function which takes two indices *(i, j)*
     and returns the predictions for it. It assumes
     these indices applies on the training data.
     """
     col = OnnxArrayFeatureExtractor(H, 'col')
     row = OnnxArrayFeatureExtractor(W.T, 'row')
     dot = OnnxMul(col, row)
     res = OnnxReduceSum(dot, output_names="rec")
     indices_type = np.array([0], dtype=np.int64)
     onx = res.to_onnx(inputs={'col': indices_type,
                               'row': indices_type},
                       outputs=[('rec', FloatTensorType((None, 1)))])
     return onx
예제 #17
0
    def test_onnx_remove_two_outputs(self):
        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X',
                      numpy.array([1], dtype=dtype),
                      op_version=get_opset_number_from_onnx())
        cop2 = OnnxAdd('X',
                       numpy.array([1], dtype=dtype),
                       output_names=['keep'],
                       op_version=get_opset_number_from_onnx())
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=get_opset_number_from_onnx())
        cop4 = OnnxSub(OnnxMul(cop,
                               cop3,
                               op_version=get_opset_number_from_onnx()),
                       cop2,
                       output_names=['final'],
                       op_version=get_opset_number_from_onnx())
        model_def = cop4.to_onnx({'X': x},
                                 outputs=[('keep', FloatTensorType([None, 2])),
                                          ('final', FloatTensorType([None,
                                                                     2]))])
        c1 = model_def.SerializeToString()
        self.assertEqual(len(model_def.graph.output), 2)
        c2 = model_def.SerializeToString()
        self.assertEqual(c1, c2)
        stats = onnx_statistics(model_def, optim=True)
        new_model = onnx_remove_node_redundant(model_def, max_hash_size=10)
        stats2 = onnx_statistics(model_def, optim=True)
        stats3 = onnx_statistics(new_model, optim=False)
        self.assertEqual(stats['ninits'], 2)
        self.assertEqual(stats2['ninits'], 2)
        self.assertEqual(stats3['ninits'], 2)
        self.assertEqual(stats2['nnodes'], 6)
        self.assertEqual(stats3['nnodes'], 6)
        oinf1 = OnnxInference(model_def)
        y1 = oinf1.run({'X': x})

        oinf2 = OnnxInference(new_model)
        y2 = oinf2.run({'X': x})
        self.assertEqualArray(y1['final'], y2['final'])
        self.assertEqualArray(y1['keep'], y2['keep'])
        def conv(scope, operator, container):
            X = operator.inputs[0]
            out = operator.outputs
            op = operator.raw_operator
            dtype = guess_numpy_type(X.type)

            C = op.cluster_centers_
            C2 = row_norms(C, squared=True).astype(dtype)
            C = C.astype(dtype)

            rs = OnnxReduceSumSquare(X,
                                     axes=[1],
                                     keepdims=1,
                                     op_version=container.target_opset)

            N = X.type.shape[0]
            if isinstance(N, int):
                zeros = np.zeros((N, ))
            else:
                zeros = OnnxMul(rs,
                                np.array([0], dtype=np.float32),
                                op_version=container.target_opset)

            z = OnnxAdd(rs,
                        OnnxGemm(X,
                                 C,
                                 zeros,
                                 alpha=-2.,
                                 transB=1,
                                 op_version=container.target_opset),
                        op_version=container.target_opset)
            y2 = OnnxAdd(C2, z, op_version=container.target_opset)
            lo = OnnxArgMin(y2,
                            axis=1,
                            keepdims=0,
                            output_names=out[:1],
                            op_version=container.target_opset)
            y2s = OnnxSqrt(y2,
                           output_names=out[1:],
                           op_version=container.target_opset)

            lo.add_to(scope, container)
            y2s.add_to(scope, container)
예제 #19
0
 def test_onnx_rename_names_exc(self):
     dtype = numpy.float32
     x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
         (3, 2))
     cop = OnnxAdd('X',
                   numpy.array([1], dtype=dtype),
                   op_version=TARGET_OPSET)
     cop2 = OnnxAdd('X',
                    numpy.array([1], dtype=dtype),
                    op_version=TARGET_OPSET)
     cop3 = OnnxAdd('X',
                    numpy.array([2], dtype=dtype),
                    op_version=TARGET_OPSET,
                    output_names=['inter'])
     cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET),
                    cop2,
                    output_names=['final'],
                    op_version=TARGET_OPSET)
     model_def = cop4.to_onnx({'X': x})
     self.assertRaise(lambda: onnx_rename_names(model_def, strategy="none"),
                      ValueError)
예제 #20
0
def _onnx_grad_square_error(target_opset=None,
                            dtype=numpy.float32,
                            weight_name=None):
    """
    Returns the ONNX graph for the gradient of function
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2` or
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2 w` if
    *weight_name* is not None

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_square_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import OnnxSub, OnnxMul, OnnxReshape
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    if weight_name is None:
        res = OnnxMul(diff,
                      numpy.array([-2], dtype=dtype),
                      op_version=target_opset,
                      output_names=['Y_grad'])
    else:
        res = OnnxMul(OnnxMul(diff,
                              numpy.array([-2], dtype=dtype),
                              op_version=target_opset),
                      OnnxReshape(weight_name,
                                  numpy.array([-1, 1], dtype=numpy.int64),
                                  op_version=target_opset),
                      op_version=target_opset,
                      output_names=['Y_grad'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y_grad', var_type())],
                      target_opset=target_opset)
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
예제 #21
0
    def test_prepare_c_profiling(self):
        opset = TestOrt.opset
        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=opset)
        cop2 = OnnxAdd('X', numpy.array([1], dtype=dtype), op_version=opset)
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=opset,
                       output_names=['inter'])
        cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=opset),
                       cop2,
                       output_names=['final'],
                       op_version=13)
        model_def = cop4.to_onnx({'X': x}, target_opset=opset)

        temp = get_temp_folder(__file__, "temp_prepare_c_profiling")
        cmd = prepare_c_profiling(model_def, [x], dest=temp)
        self.assertStartsWith("onnx", cmd)
        self.assertExists(os.path.join(temp, "model.onnx"))
        self.assertExists(os.path.join(temp, "test_data_set_0", "input_0.pb"))
        self.assertExists(os.path.join(temp, "test_data_set_0", "output_0.pb"))
예제 #22
0
    def test_onnx_remove_redundant(self):
        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X',
                      numpy.array([1], dtype=dtype),
                      op_version=TARGET_OPSET)
        cop2 = OnnxAdd('X',
                       numpy.array([1], dtype=dtype),
                       op_version=TARGET_OPSET)
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=TARGET_OPSET)
        cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET),
                       cop2,
                       output_names=['final'],
                       op_version=TARGET_OPSET)
        model_def = cop4.to_onnx({'X': x})
        stats = onnx_statistics(model_def, optim=True)
        c1 = model_def.SerializeToString()
        new_model = onnx_remove_node_redundant(model_def, max_hash_size=10)
        c2 = model_def.SerializeToString()
        self.assertEqual(c1, c2)
        stats2 = onnx_statistics(model_def, optim=True)
        stats3 = onnx_statistics(new_model, optim=False)
        self.assertEqual(stats['ninits'], 2)
        self.assertEqual(stats2['ninits'], 2)
        self.assertEqual(stats3['ninits'], 2)
        self.assertEqual(stats2['nnodes'], 6)
        self.assertEqual(stats3['nnodes'], 6)
        oinf1 = OnnxInference(model_def)
        y1 = oinf1.run({'X': x})

        oinf2 = OnnxInference(new_model)
        y2 = oinf2.run({'X': x})
        self.assertEqualArray(y1['final'], y2['final'])
예제 #23
0
 def test_enumerate_model_node_outputs(self):
     dtype = numpy.float32
     x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
         (3, 2))
     cop = OnnxAdd('X',
                   numpy.array([1], dtype=dtype),
                   op_version=TARGET_OPSET)
     cop2 = OnnxAdd('X',
                    numpy.array([1], dtype=dtype),
                    op_version=TARGET_OPSET)
     cop3 = OnnxAdd('X',
                    numpy.array([2], dtype=dtype),
                    op_version=TARGET_OPSET,
                    output_names=['inter'])
     cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET),
                    cop2,
                    output_names=['final'],
                    op_version=TARGET_OPSET)
     model_def = cop4.to_onnx({'X': x})
     nodes1 = list(enumerate_model_node_outputs(model_def))
     nodes2 = list(enumerate_model_node_outputs(model_def, order=True))
     self.assertEqual(list(sorted(nodes1)), list(sorted(nodes2)))
     expected = ['Ad_Addcst2', 'Ad_C0', 'inter', 'Ad_C02', 'Mu_C0', 'final']
     self.assertEqual(nodes2, expected)
예제 #24
0
 def get_onnx_mul(self):
     mul = OnnxMul('X', 'X', output_names=['Y'])
     onx = mul.to_onnx(inputs=[('X', FloatTensorType())])
     return onx.SerializeToString()
예제 #25
0
def build_ort_op(op_version=14, save=None, **kwargs):  # opset=13, 14, ...
    slices = kwargs['slices']
    slice1, slice2 = slices
    slice1 = slice(0, None) if slice1 is None else slice(*slice1)
    slice2 = slice(0, None) if slice2 is None else slice(*slice2)

    axes = []
    starts = []
    ends = []
    for i in [0, 1]:
        if slices[i] is None:
            continue
        axes.append(i)
        starts.append(slices[i][0])
        ends.append(slices[i][1])
    starts = numpy.array(starts, dtype=numpy.int64)
    ends = numpy.array(ends, dtype=numpy.int64)
    axes = numpy.array(axes, dtype=numpy.int64)
    node1 = OnnxSlice('X', starts, ends, axes, op_version=op_version)
    node2 = OnnxAdd(node1,
                    numpy.array([1], dtype=numpy.float32),
                    op_version=op_version)
    node3 = OnnxSlice(node2, starts, ends, axes, op_version=op_version)
    node4 = OnnxMul(node3,
                    numpy.array([2], dtype=numpy.float32),
                    op_version=op_version,
                    output_names=['Y'])
    onx = node4.to_onnx(inputs=[('X', FloatTensorType([None, None]))],
                        target_opset=op_version)
    sess = InferenceSession(onx.SerializeToString(),
                            providers=["CPUExecutionProvider"])
    if save is not None:
        with open(save, "wb") as f:
            f.write(onx.SerializeToString())

    def npy_fct(x):
        return ((x[slice1, slice2] + 1)[slice1, slice2] * 2).copy()

    rnd = numpy.random.randn(10, 10).astype(numpy.float32)
    expected = npy_fct(rnd)
    got = sess.run(None, {'X': rnd})[0]
    try:
        assert_almost_equal(expected, got)
    except AssertionError as e:
        raise AssertionError("kwargs=%r slice1=%r slice2=%r shapes=%r ? %r "
                             "(x[slice1, slice2].shape)=%r" %
                             (kwargs, slice1, slice2, expected.shape,
                              got.shape, rnd[slice1, slice2].shape)) from e

    if get_device().upper() == 'GPU':
        sessg = InferenceSession(onx.SerializeToString(),
                                 providers=["CUDAExecutionProvider"])
        io_binding = sessg.io_binding()._iobinding
        device = get_ort_device('cuda:0')

        def run_gpu(x):
            io_binding.bind_input('X', device, numpy.float32, x.shape(),
                                  x.data_ptr())
            io_binding.bind_output('Y', device)
            return sessg._sess.run_with_iobinding(io_binding, None)

        return onx, lambda x: sess.run(None, {'X': x}), npy_fct, run_gpu
    else:
        return onx, lambda x: sess.run(None, {'X': x}), npy_fct, None
예제 #26
0
def live_decorrelate_transformer_converter(scope, operator, container):
    # shortcuts
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # We guess its type. If the operator ingests float (or double),
    # it outputs float (or double).
    proto_dtype = guess_proto_type(X.type)
    dtype = guess_numpy_type(X.type)

    # Lines in comment specify the numpy computation
    # the ONNX code implements.
    # mean_ = numpy.mean(X, axis=0, keepdims=True)
    mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv)

    # This is trick I often use. The converter automatically
    # chooses a name for every output. In big graph,
    # it is difficult to know which operator is producing which output.
    # This line just tells every node must prefix its ouputs with this string.
    # It also applies to all inputs nodes unless this method
    # was called for one of these nodes.
    mean.set_onnx_name_prefix('mean')

    # X2 = X - mean_
    X2 = OnnxSub(X, mean, op_version=opv)

    # V = X2.T @ X2 / X2.shape[0]
    N = OnnxGatherElements(OnnxShape(X, op_version=opv),
                           numpy.array([0], dtype=numpy.int64),
                           op_version=opv)
    Nf = OnnxCast(N, to=proto_dtype, op_version=opv)

    # Every output involved in N and Nf is prefixed by 'N'.
    Nf.set_onnx_name_prefix('N')

    V = OnnxDiv(OnnxMatMul(OnnxTranspose(X2, op_version=opv),
                           X2,
                           op_version=opv),
                Nf,
                op_version=opv)
    V.set_onnx_name_prefix('V1')

    # V += numpy.identity(V.shape[0]) * self.alpha
    V = OnnxAdd(V,
                op.alpha * numpy.identity(op.nf_, dtype=dtype),
                op_version=opv)
    V.set_onnx_name_prefix('V2')

    # L, P = numpy.linalg.eig(V)
    LP = OnnxEig(V, eigv=True, op_version=opv)
    LP.set_onnx_name_prefix('LP')

    # Linv = L ** (-0.5)
    # Notation LP[0] means OnnxPow is taking the first output
    # of operator OnnxEig, LP[1] would mean the second one
    # LP is not allowed as it is ambiguous
    Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype), op_version=opv)
    Linv.set_onnx_name_prefix('Linv')

    # diag = numpy.diag(Linv)
    diag = OnnxMul(OnnxEyeLike(numpy.zeros((op.nf_, op.nf_),
                                           dtype=numpy.int64),
                               k=0,
                               op_version=opv),
                   Linv,
                   op_version=opv)
    diag.set_onnx_name_prefix('diag')

    # root = P @ diag @ P.transpose()
    trv = OnnxTranspose(LP[1], op_version=opv)
    coef_left = OnnxMatMul(LP[1], diag, op_version=opv)
    coef_left.set_onnx_name_prefix('coef_left')
    coef = OnnxMatMul(coef_left, trv, op_version=opv)
    coef.set_onnx_name_prefix('coef')

    # Same part as before.
    Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])
    Y.set_onnx_name_prefix('Y')

    # The last line specifies the final output.
    # Every node involved in the computation is added to the ONNX
    # graph at this stage.
    Y.add_to(scope, container)
예제 #27
0
def _onnx_grad_sigmoid_neg_log_loss_error(target_opset=None,
                                          dtype=numpy.float32,
                                          eps=1e-5,
                                          weight_name=None):
    """
    The function the raw scores from a classifier, uses the
    sigmoid function to compute probabilities, then the log function
    to compute the loss. It creates the ONNX graph for this function
    and the associated gradient of the loss against the raw scores.

    Probabilites (class 1): :math:`p(s) = \\frac{1}{1 + \\exp(-s)}`.
    Loss (for two classes): :math:`L(y, s) = (1 - y)\\log(1 - p(s)) +
    y \\log(p(s))`.
    Gradient :math:`\\frac{dL(y, s)}{ds} = y - p(s)`.
    To avoid nan values, probabilies are clipped:
    :math:`p(s) = \\max(\\min(p(s), 1 - \\epsilon), \\epsilon)`.
    :math:`y \\in \\{0, 1\\}` (integer). *s* is a float.

    :param eps: to clip probabilities and avoid computing `log(0)`

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_sigmoid_neg_log_loss_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSigmoid,
                                           OnnxLog, OnnxNeg, OnnxReduceSum,
                                           OnnxReshape, OnnxAdd, OnnxCast,
                                           OnnxClip)

    p1c = OnnxSigmoid('X2', op_version=target_opset)
    p1 = OnnxClip(p1c,
                  numpy.array([eps], dtype=dtype),
                  numpy.array([1 - eps], dtype=dtype),
                  op_version=target_opset)
    p0 = OnnxSub(numpy.array([1], dtype=dtype), p1, op_version=target_opset)
    y1 = OnnxCast('X1',
                  to=NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)],
                  op_version=target_opset)
    y0 = OnnxSub(numpy.array([1], dtype=dtype), y1, op_version=target_opset)
    loss_obs = OnnxAdd(OnnxMul(y0,
                               OnnxLog(p0, op_version=target_opset),
                               op_version=target_opset),
                       OnnxMul(y1,
                               OnnxLog(p1, op_version=target_opset),
                               op_version=target_opset),
                       op_version=target_opset)

    loss_neg = OnnxNeg(loss_obs, op_version=target_opset)
    if weight_name is None:
        loss = OnnxReduceSum(loss_neg, op_version=target_opset)
        grad = OnnxSub(p1,
                       y1,
                       op_version=target_opset,
                       output_names=['Y_grad'])
    else:
        loss = OnnxReduceSum(OnnxMul(loss_neg,
                                     OnnxReshape(weight_name,
                                                 numpy.array(
                                                     [-1, 1],
                                                     dtype=numpy.int64),
                                                 op_version=target_opset),
                                     op_version=target_opset),
                             op_version=target_opset)
        grad = OnnxMul(OnnxSub(p1, y1, op_version=target_opset),
                       OnnxReshape(weight_name,
                                   numpy.array([-1, 1], dtype=numpy.int64),
                                   op_version=target_opset),
                       output_names=['Y_grad'],
                       op_version=target_opset)

    res = OnnxReshape(loss,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type_int64 = dtype_to_var_type(numpy.int64)
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type_int64([None, None])),
             ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[grad])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
예제 #28
0
def _onnx_n_penalty_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01,
                                  n_tensors=1,
                                  loss_shape=(1, 1)):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = \\beta \\lVert W \\rVert +
    \\alpha \\lVert W \\rVert^2`
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.
    It does that for *n_tensors* and adds all of the results
    to an input loss.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph(
            'n_penalty_elastic_error', n_tensors=2)
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd,
                                           OnnxReduceSumSquare, OnnxReduceSum,
                                           OnnxAbs, OnnxReshape)

    if n_tensors <= 0:
        raise ValueError(  # pragma: no cover
            "This function is useless if the number of tensors is null.")

    var_type = dtype_to_var_type(dtype)
    varsx = [('loss', var_type(loss_shape))]
    names = ['loss']
    for n in range(n_tensors):
        name = 'W%d' % n
        abs_diff = OnnxAbs(name, op_version=target_opset)
        res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset)
        # res2_l1 = OnnxSign(diff, op_version=target_opset)
        res_l2 = OnnxReduceSumSquare(name, op_version=target_opset)
        # res2_l2 = diff
        res = OnnxAdd(OnnxMul(res_l1,
                              numpy.array([l1_weight], dtype=dtype),
                              op_version=target_opset),
                      OnnxMul(res_l2,
                              numpy.array([l2_weight], dtype=dtype),
                              op_version=target_opset),
                      op_version=target_opset)
        names.append(res)
        varsx.append(('W%d' % n, var_type()))

    if len(names) == 2:
        res = OnnxAdd(*names, op_version=target_opset)
    else:
        res = OnnxAdd(names[1], names[2], op_version=target_opset)
        for i in range(3, len(names)):
            res = OnnxAdd(res, names[i], op_version=target_opset)
        res = OnnxAdd(names[0], res, op_version=target_opset)

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type([None]))],
                      target_opset=target_opset)
    return onx
예제 #29
0
def _onnx_grad_penalty_elastic_error(target_opset=None,
                                     dtype=numpy.float32,
                                     l1_weight=0.01,
                                     l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = \\beta \\lVert W \\rVert +
    \\alpha \\lVert W \\rVert^2`
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_penalty_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd,
                                           OnnxReduceSumSquare, OnnxReduceSum,
                                           OnnxSign, OnnxAbs, OnnxReshape)
    diff = 'X'
    abs_diff = OnnxAbs(diff, op_version=target_opset)
    res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset)
    res2_l1 = OnnxSign(diff, op_version=target_opset)
    res_l2 = OnnxReduceSumSquare(diff, op_version=target_opset)
    res2_l2 = diff

    res = OnnxAdd(OnnxMul(res_l1,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset),
                  OnnxMul(res_l2,
                          numpy.array([l2_weight], dtype=dtype),
                          op_version=target_opset),
                  op_version=target_opset)
    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    res2 = OnnxAdd(OnnxMul(res2_l1,
                           numpy.array([l1_weight], dtype=dtype),
                           op_version=target_opset),
                   OnnxMul(res2_l2,
                           numpy.array([l2_weight * (2)], dtype=dtype),
                           op_version=target_opset),
                   op_version=target_opset,
                   output_names=['Y_grad'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X', var_type([None, None]))]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type([None])),
                               ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    return onx
예제 #30
0
def _onnx_grad_loss_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\beta \\lVert X1 - X2 \\rVert +
    \\alpha \\lVert X1 - X2 \\rVert^2` or
    :math:`Y = f(X1, X2) = \\beta \\lVert w(X1 - X2) \\rVert +
    \\alpha \\lVert (\\sqrt{w})(X1 - X2) \\rVert^2` if
    *weight_name* is not None and its gradient.
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxAdd,
                                           OnnxIdentity, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)

    # loss
    abs_diff_l1 = OnnxMul(abs_diff,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset)
    diff_l2 = OnnxMul(OnnxMul(diff, diff, op_version=target_opset),
                      numpy.array([l2_weight], dtype=dtype),
                      op_version=target_opset)
    score = OnnxAdd(abs_diff_l1, diff_l2, op_version=target_opset)

    # gradient
    grad_l1 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                      numpy.array([l1_weight], dtype=dtype),
                      op_version=target_opset)
    grad_l2 = OnnxMul(diff,
                      numpy.array([l2_weight * -2], dtype=dtype),
                      op_version=target_opset)
    grad = OnnxAdd(grad_l1, grad_l2, op_version=target_opset)

    if weight_name is None:
        res = OnnxReduceSum(score, op_version=target_opset)
        res2 = OnnxIdentity(grad,
                            op_version=target_opset,
                            output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        res = OnnxReduceSum(OnnxMul(score, resh, op_version=target_opset),
                            op_version=target_opset)
        res2 = OnnxMul(grad,
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx