def nmf_to_onnx(W, H):
     """
     The function converts a NMF described by matrices
     *W*, *H* (*WH* approximate training data *M*).
     into a function which takes two indices *(i, j)*
     and returns the predictions for it. It assumes
     these indices applies on the training data.
     """
     col = OnnxArrayFeatureExtractor(H, 'col')
     row = OnnxArrayFeatureExtractor(W.T, 'row')
     dot = OnnxMul(col, row)
     res = OnnxReduceSum(dot, output_names="rec")
     indices_type = np.array([0], dtype=np.int64)
     onx = res.to_onnx(inputs={'col': indices_type,
                               'row': indices_type},
                       outputs=[('rec', FloatTensorType((None, 1)))])
     return onx
Example #2
0
    def test_onnx_if_algebra_indirect_unnamed_clear_input_recursive(self):

        opv = TARGET_OPSET
        x1 = np.array([[0, 3], [7, 0]], dtype=np.float32)
        x2 = np.array([[1, 0], [2, 0]], dtype=np.float32)

        node_xy = OnnxMul('x1', 'x2', op_version=opv)
        node_then = OnnxAdd(
            'x1', 'xy', output_names=['absxythen'], op_version=opv)
        then_body = node_then.to_onnx(
            {'x1': x1, 'xy': x2}, target_opset=opv,
            outputs=[('absxythen', FloatTensorType())])
        node_else = OnnxSub(
            'x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node_else.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxyelse', FloatTensorType())])

        cond = OnnxGreater(
            OnnxReduceSum('x1', op_version=opv),
            OnnxReduceSum('x2', op_version=opv),
            op_version=opv)
        ifnode = OnnxIf(cond, then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv, output_names=['yt'],
                        clear_subgraph_inputs=True)
        subgraph = ifnode.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('yt', FloatTensorType())])

        cond2 = OnnxGreater(
            OnnxReduceMin('x1', op_version=opv),
            OnnxReduceMin('x2', op_version=opv),
            op_version=opv)
        ifnode2 = OnnxIf(cond2, then_branch=then_body.graph,
                         else_branch=subgraph.graph,
                         op_version=opv, output_names=['y'],
                         global_context={'xy': node_xy},
                         clear_subgraph_inputs=True)
        model_def = ifnode2.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('y', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'x1': x1, 'x2': x2})
        assert_almost_equal(x1 + x1 * x2, res[0])
Example #3
0
    def test_onnx_simple_text_plot_if(self):

        opv = TARGET_OPSET
        x1 = numpy.array([[0, 3], [7, 0]], dtype=numpy.float32)
        x2 = numpy.array([[1, 0], [2, 0]], dtype=numpy.float32)

        node = OnnxAdd('x1', 'x2', output_names=['absxythen'], op_version=opv)
        then_body = node.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                 target_opset=opv,
                                 outputs=[('absxythen', FloatTensorType())])
        node = OnnxSub('x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                 target_opset=opv,
                                 outputs=[('absxyelse', FloatTensorType())])
        del else_body.graph.input[:]
        del then_body.graph.input[:]

        cond = OnnxGreater(OnnxReduceSum('x1', op_version=opv),
                           OnnxReduceSum('x2', op_version=opv),
                           op_version=opv)
        ifnode = OnnxIf(cond,
                        then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv,
                        output_names=['y'])
        model_def = ifnode.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                   target_opset=opv,
                                   outputs=[('y', FloatTensorType())])
        text = onnx_simple_text_plot(model_def)
        expected = textwrap.dedent("""
        input:
        """).strip(" \n")
        self.assertIn(expected, text)
        self.assertIn("If(Gr_C0) -> y", text)
        oinf = OnnxInference(model_def)
        text2 = oinf.to_text(kind="seq")
        self.assertEqual(text, text2)
    def test_onnx_if_algebra_direct(self):

        opv = TARGET_OPSET
        x1 = np.array([[0, 3], [7, 0]], dtype=np.float32)
        x2 = np.array([[1, 0], [2, 0]], dtype=np.float32)

        node = OnnxAdd('x1', 'x2', output_names=['absxythen'], op_version=opv)
        then_body = node.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                 target_opset=opv,
                                 outputs=[('absxythen', FloatTensorType())])
        node = OnnxSub('x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                 target_opset=opv,
                                 outputs=[('absxyelse', FloatTensorType())])
        del else_body.graph.input[:]
        del then_body.graph.input[:]

        cond = OnnxGreater(OnnxReduceSum('x1', op_version=opv),
                           OnnxReduceSum('x2', op_version=opv),
                           op_version=opv)
        ifnode = OnnxIf(cond,
                        then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv,
                        output_names=['y'])
        model_def = ifnode.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                   target_opset=opv,
                                   outputs=[('y', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'x1': x1, 'x2': x2})
        assert_almost_equal(x1 + x2, res[0])
Example #5
0
    def test_onnx_if_to_dot2(self):
        opv = TARGET_OPSET
        x1 = numpy.array([[0, 3], [7, 0]], dtype=numpy.float32)
        x2 = numpy.array([[1, 0], [2, 0]], dtype=numpy.float32)

        node = OnnxAdd('x1', 'x2', output_names=['absxythen'], op_version=opv)
        then_body = node.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                 target_opset=opv,
                                 outputs=[('absxythen', FloatTensorType())])
        node = OnnxSub('x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                 target_opset=opv,
                                 outputs=[('absxyelse', FloatTensorType())])
        del else_body.graph.input[:]
        del then_body.graph.input[:]

        cond = OnnxGreater(OnnxReduceSum('x1', op_version=opv),
                           OnnxReduceSum('x2', op_version=opv),
                           op_version=opv)
        ifnode = OnnxIf(cond,
                        then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv,
                        output_names=['y'])
        model_def = ifnode.to_onnx({
            'x1': x1,
            'x2': x2
        },
                                   target_opset=opv,
                                   outputs=[('y', FloatTensorType())])
        dot = OnnxInference(model_def).to_dot(recursive=True)
        self.assertIn('[lhead=cluster_If', dot)
Example #6
0
def _onnx_square_error(target_opset=None,
                       dtype=numpy.float32,
                       weight_name=None):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2` or
    :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2 w` if
    *weight_name* is not None

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('square_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxReduceSumSquare,
                                           OnnxReshape, OnnxReduceSum, OnnxMul)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    if weight_name is None:
        res = OnnxReduceSumSquare(diff, op_version=target_opset)
    else:
        mul = OnnxMul(OnnxMul(diff, diff, op_version=target_opset),
                      OnnxReshape(weight_name,
                                  numpy.array([-1, 1], dtype=numpy.int64),
                                  op_version=target_opset),
                      op_version=target_opset)
        res = OnnxReduceSum(mul, op_version=target_opset)
    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type())],
                      target_opset=target_opset)
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
    def test_onnxt_runtime_reduce_sum(self):
        X = numpy.array([[2, 1], [0, 1]], dtype=float)

        onx = OnnxReduceSum('X', output_names=['Y'], keepdims=0)
        model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y'])
        self.assertEqualArray(numpy.sum(X), got['Y'], decimal=6)

        onx = OnnxReduceSum('X', output_names=['Y'], axes=1)
        model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y'])
        self.assertEqualArray(numpy.sum(X, axis=1).ravel(), got['Y'].ravel())

        onx = OnnxReduceSum('X', output_names=['Y'], axes=1, keepdims=1)
        model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
        oinf = OnnxInference(model_def)
        got = oinf.run({'X': X})
        self.assertEqual(list(sorted(got)), ['Y'])
        self.assertEqualArray(
            numpy.sum(X, axis=1, keepdims=1).ravel(), got['Y'].ravel())
Example #8
0
def _onnx_grad_sigmoid_neg_log_loss_error(target_opset=None,
                                          dtype=numpy.float32,
                                          eps=1e-5,
                                          weight_name=None):
    """
    The function the raw scores from a classifier, uses the
    sigmoid function to compute probabilities, then the log function
    to compute the loss. It creates the ONNX graph for this function
    and the associated gradient of the loss against the raw scores.

    Probabilites (class 1): :math:`p(s) = \\frac{1}{1 + \\exp(-s)}`.
    Loss (for two classes): :math:`L(y, s) = (1 - y)\\log(1 - p(s)) +
    y \\log(p(s))`.
    Gradient :math:`\\frac{dL(y, s)}{ds} = y - p(s)`.
    To avoid nan values, probabilies are clipped:
    :math:`p(s) = \\max(\\min(p(s), 1 - \\epsilon), \\epsilon)`.
    :math:`y \\in \\{0, 1\\}` (integer). *s* is a float.

    :param eps: to clip probabilities and avoid computing `log(0)`

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_sigmoid_neg_log_loss_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSigmoid,
                                           OnnxLog, OnnxNeg, OnnxReduceSum,
                                           OnnxReshape, OnnxAdd, OnnxCast,
                                           OnnxClip)

    p1c = OnnxSigmoid('X2', op_version=target_opset)
    p1 = OnnxClip(p1c,
                  numpy.array([eps], dtype=dtype),
                  numpy.array([1 - eps], dtype=dtype),
                  op_version=target_opset)
    p0 = OnnxSub(numpy.array([1], dtype=dtype), p1, op_version=target_opset)
    y1 = OnnxCast('X1',
                  to=NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)],
                  op_version=target_opset)
    y0 = OnnxSub(numpy.array([1], dtype=dtype), y1, op_version=target_opset)
    loss_obs = OnnxAdd(OnnxMul(y0,
                               OnnxLog(p0, op_version=target_opset),
                               op_version=target_opset),
                       OnnxMul(y1,
                               OnnxLog(p1, op_version=target_opset),
                               op_version=target_opset),
                       op_version=target_opset)

    loss_neg = OnnxNeg(loss_obs, op_version=target_opset)
    if weight_name is None:
        loss = OnnxReduceSum(loss_neg, op_version=target_opset)
        grad = OnnxSub(p1,
                       y1,
                       op_version=target_opset,
                       output_names=['Y_grad'])
    else:
        loss = OnnxReduceSum(OnnxMul(loss_neg,
                                     OnnxReshape(weight_name,
                                                 numpy.array(
                                                     [-1, 1],
                                                     dtype=numpy.int64),
                                                 op_version=target_opset),
                                     op_version=target_opset),
                             op_version=target_opset)
        grad = OnnxMul(OnnxSub(p1, y1, op_version=target_opset),
                       OnnxReshape(weight_name,
                                   numpy.array([-1, 1], dtype=numpy.int64),
                                   op_version=target_opset),
                       output_names=['Y_grad'],
                       op_version=target_opset)

    res = OnnxReshape(loss,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type_int64 = dtype_to_var_type(numpy.int64)
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type_int64([None, None])),
             ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[grad])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
Example #9
0
def _onnx_n_penalty_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01,
                                  n_tensors=1,
                                  loss_shape=(1, 1)):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = \\beta \\lVert W \\rVert +
    \\alpha \\lVert W \\rVert^2`
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.
    It does that for *n_tensors* and adds all of the results
    to an input loss.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph(
            'n_penalty_elastic_error', n_tensors=2)
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd,
                                           OnnxReduceSumSquare, OnnxReduceSum,
                                           OnnxAbs, OnnxReshape)

    if n_tensors <= 0:
        raise ValueError(  # pragma: no cover
            "This function is useless if the number of tensors is null.")

    var_type = dtype_to_var_type(dtype)
    varsx = [('loss', var_type(loss_shape))]
    names = ['loss']
    for n in range(n_tensors):
        name = 'W%d' % n
        abs_diff = OnnxAbs(name, op_version=target_opset)
        res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset)
        # res2_l1 = OnnxSign(diff, op_version=target_opset)
        res_l2 = OnnxReduceSumSquare(name, op_version=target_opset)
        # res2_l2 = diff
        res = OnnxAdd(OnnxMul(res_l1,
                              numpy.array([l1_weight], dtype=dtype),
                              op_version=target_opset),
                      OnnxMul(res_l2,
                              numpy.array([l2_weight], dtype=dtype),
                              op_version=target_opset),
                      op_version=target_opset)
        names.append(res)
        varsx.append(('W%d' % n, var_type()))

    if len(names) == 2:
        res = OnnxAdd(*names, op_version=target_opset)
    else:
        res = OnnxAdd(names[1], names[2], op_version=target_opset)
        for i in range(3, len(names)):
            res = OnnxAdd(res, names[i], op_version=target_opset)
        res = OnnxAdd(names[0], res, op_version=target_opset)

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type([None]))],
                      target_opset=target_opset)
    return onx
Example #10
0
def _onnx_grad_penalty_elastic_error(target_opset=None,
                                     dtype=numpy.float32,
                                     l1_weight=0.01,
                                     l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(W) = \\beta \\lVert W \\rVert +
    \\alpha \\lVert W \\rVert^2`
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_penalty_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd,
                                           OnnxReduceSumSquare, OnnxReduceSum,
                                           OnnxSign, OnnxAbs, OnnxReshape)
    diff = 'X'
    abs_diff = OnnxAbs(diff, op_version=target_opset)
    res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset)
    res2_l1 = OnnxSign(diff, op_version=target_opset)
    res_l2 = OnnxReduceSumSquare(diff, op_version=target_opset)
    res2_l2 = diff

    res = OnnxAdd(OnnxMul(res_l1,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset),
                  OnnxMul(res_l2,
                          numpy.array([l2_weight], dtype=dtype),
                          op_version=target_opset),
                  op_version=target_opset)
    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    res2 = OnnxAdd(OnnxMul(res2_l1,
                           numpy.array([l1_weight], dtype=dtype),
                           op_version=target_opset),
                   OnnxMul(res2_l2,
                           numpy.array([l2_weight * (2)], dtype=dtype),
                           op_version=target_opset),
                   op_version=target_opset,
                   output_names=['Y_grad'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X', var_type([None, None]))]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type([None])),
                               ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    return onx
Example #11
0
def _onnx_grad_loss_elastic_error(target_opset=None,
                                  dtype=numpy.float32,
                                  weight_name=None,
                                  l1_weight=0.01,
                                  l2_weight=0.01):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2) = \\beta \\lVert X1 - X2 \\rVert +
    \\alpha \\lVert X1 - X2 \\rVert^2` or
    :math:`Y = f(X1, X2) = \\beta \\lVert w(X1 - X2) \\rVert +
    \\alpha \\lVert (\\sqrt{w})(X1 - X2) \\rVert^2` if
    *weight_name* is not None and its gradient.
    *l1_weight* is :math:`\\beta` and
    *l2_weight* is :math:`\\alpha`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_loss_elastic_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxAdd,
                                           OnnxIdentity, OnnxReduceSum,
                                           OnnxReshape, OnnxSign, OnnxAbs)
    diff = OnnxSub('X1', 'X2', op_version=target_opset)
    abs_diff = OnnxAbs(diff, op_version=target_opset)

    # loss
    abs_diff_l1 = OnnxMul(abs_diff,
                          numpy.array([l1_weight], dtype=dtype),
                          op_version=target_opset)
    diff_l2 = OnnxMul(OnnxMul(diff, diff, op_version=target_opset),
                      numpy.array([l2_weight], dtype=dtype),
                      op_version=target_opset)
    score = OnnxAdd(abs_diff_l1, diff_l2, op_version=target_opset)

    # gradient
    grad_l1 = OnnxMul(OnnxSign(diff, op_version=target_opset),
                      numpy.array([l1_weight], dtype=dtype),
                      op_version=target_opset)
    grad_l2 = OnnxMul(diff,
                      numpy.array([l2_weight * -2], dtype=dtype),
                      op_version=target_opset)
    grad = OnnxAdd(grad_l1, grad_l2, op_version=target_opset)

    if weight_name is None:
        res = OnnxReduceSum(score, op_version=target_opset)
        res2 = OnnxIdentity(grad,
                            op_version=target_opset,
                            output_names=['Y_grad'])
    else:
        resh = OnnxReshape(weight_name,
                           numpy.array([-1, 1], dtype=numpy.int64),
                           op_version=target_opset)
        res = OnnxReduceSum(OnnxMul(score, resh, op_version=target_opset),
                            op_version=target_opset)
        res2 = OnnxMul(grad,
                       resh,
                       op_version=target_opset,
                       output_names=['Y_grad'])

    res = OnnxReshape(res,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res2])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx
Example #12
0
def convert_score_cdist_sum(scope, operator, container):
    """
    Converts function @see fn score_cdist_sum into :epkg:`ONNX`.
    """
    op = operator.raw_operator
    if op._fct != score_cdist_sum:  # pylint: disable=W0143
        raise RuntimeError(  # pragma: no cover
            "The wrong converter was called {} != {}.".format(
                op._fct, score_cdist_sum))

    from skl2onnx.algebra.complex_functions import onnx_cdist
    from skl2onnx.algebra.onnx_ops import OnnxReduceSum  # pylint: disable=E0611
    from skl2onnx.common.data_types import guess_numpy_type

    X = operator.inputs[0]
    Y = operator.inputs[1]
    out = operator.outputs
    opv = container.target_opset
    dtype = guess_numpy_type(operator.inputs[0].type)
    if dtype != numpy.float64:
        dtype = numpy.float32
    out = operator.outputs

    options = container.get_options(score_cdist_sum, dict(cdist=None))

    kwargs = op.kwargs

    if options.get('cdist', None) == 'single-node':
        attrs = kwargs
        cdist_name = scope.get_unique_variable_name('cdist')
        container.add_node('CDist', [X.full_name, Y.full_name],
                           cdist_name,
                           op_domain='mlprodict',
                           name=scope.get_unique_operator_name('CDist'),
                           **attrs)
        container.add_node('ReduceSum', [cdist_name],
                           out[0].full_name,
                           axes=[1],
                           keepdims=0,
                           name=scope.get_unique_operator_name('ReduceSum'))
    else:
        metric = kwargs['metric']
        if metric == 'minkowski':
            dists = onnx_cdist(X,
                               Y,
                               dtype=dtype,
                               op_version=opv,
                               metric=metric,
                               p=kwargs.get('p', 2))
        else:
            dists = onnx_cdist(X,
                               Y,
                               dtype=dtype,
                               op_version=opv,
                               metric=kwargs['metric'])

        res = OnnxReduceSum(dists,
                            axes=[1],
                            keepdims=0,
                            output_names=[out[0].full_name],
                            op_version=opv)
        res.add_to(scope, container)