def common_test_onnxt_runtime_reshape(self):
     sh = numpy.array([1, 4], dtype=numpy.int64)
     onx = OnnxReshape('X', sh, output_names=['Y'])
     X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
     model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
     oinf = OnnxInference(model_def)
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     exp = X.reshape(sh.tolist())
     self.assertEqualArray(exp, got['Y'])
 def test_container_init(self):
     onx = OnnxReshape(
             OnnxReshape('X', np.array([1, -1], dtype=np.int64),
                         op_version=TARGET_OPSET),
             np.array([1, -1], dtype=np.int64),
             output_names=['Y'], op_version=TARGET_OPSET)
     X = np.array([[1, 2], [3, 4]], dtype=np.float32)
     model_def = onx.to_onnx({'X': X},
                             outputs=[('Y', FloatTensorType([None, 2]))],
                             target_opset=TARGET_OPSET)
     sess = InferenceSession(model_def.SerializeToString())
     got = sess.run(None, {'X': X})[0]
     assert_almost_equal(X.reshape((1, -1)), got)
     inits = [row for row in str(model_def).split('\n')
              if row.startswith("  initializer {")]
     self.assertEqual(len(inits), 1)
Пример #3
0
def _onnx_grad_sigmoid_neg_log_loss_error(target_opset=None,
                                          dtype=numpy.float32,
                                          eps=1e-5,
                                          weight_name=None):
    """
    The function the raw scores from a classifier, uses the
    sigmoid function to compute probabilities, then the log function
    to compute the loss. It creates the ONNX graph for this function
    and the associated gradient of the loss against the raw scores.

    Probabilites (class 1): :math:`p(s) = \\frac{1}{1 + \\exp(-s)}`.
    Loss (for two classes): :math:`L(y, s) = (1 - y)\\log(1 - p(s)) +
    y \\log(p(s))`.
    Gradient :math:`\\frac{dL(y, s)}{ds} = y - p(s)`.
    To avoid nan values, probabilies are clipped:
    :math:`p(s) = \\max(\\min(p(s), 1 - \\epsilon), \\epsilon)`.
    :math:`y \\in \\{0, 1\\}` (integer). *s* is a float.

    :param eps: to clip probabilities and avoid computing `log(0)`

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('grad_sigmoid_neg_log_loss_error')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
    from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSigmoid,
                                           OnnxLog, OnnxNeg, OnnxReduceSum,
                                           OnnxReshape, OnnxAdd, OnnxCast,
                                           OnnxClip)

    p1c = OnnxSigmoid('X2', op_version=target_opset)
    p1 = OnnxClip(p1c,
                  numpy.array([eps], dtype=dtype),
                  numpy.array([1 - eps], dtype=dtype),
                  op_version=target_opset)
    p0 = OnnxSub(numpy.array([1], dtype=dtype), p1, op_version=target_opset)
    y1 = OnnxCast('X1',
                  to=NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)],
                  op_version=target_opset)
    y0 = OnnxSub(numpy.array([1], dtype=dtype), y1, op_version=target_opset)
    loss_obs = OnnxAdd(OnnxMul(y0,
                               OnnxLog(p0, op_version=target_opset),
                               op_version=target_opset),
                       OnnxMul(y1,
                               OnnxLog(p1, op_version=target_opset),
                               op_version=target_opset),
                       op_version=target_opset)

    loss_neg = OnnxNeg(loss_obs, op_version=target_opset)
    if weight_name is None:
        loss = OnnxReduceSum(loss_neg, op_version=target_opset)
        grad = OnnxSub(p1,
                       y1,
                       op_version=target_opset,
                       output_names=['Y_grad'])
    else:
        loss = OnnxReduceSum(OnnxMul(loss_neg,
                                     OnnxReshape(weight_name,
                                                 numpy.array(
                                                     [-1, 1],
                                                     dtype=numpy.int64),
                                                 op_version=target_opset),
                                     op_version=target_opset),
                             op_version=target_opset)
        grad = OnnxMul(OnnxSub(p1, y1, op_version=target_opset),
                       OnnxReshape(weight_name,
                                   numpy.array([-1, 1], dtype=numpy.int64),
                                   op_version=target_opset),
                       output_names=['Y_grad'],
                       op_version=target_opset)

    res = OnnxReshape(loss,
                      numpy.array([-1], numpy.int64),
                      op_version=target_opset,
                      output_names=['Y'])

    var_type_int64 = dtype_to_var_type(numpy.int64)
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type_int64([None, None])),
             ('X2', var_type([None, None]))]
    if weight_name is not None:
        varsx.append((weight_name, var_type([None])))
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type()), ('Y_grad', var_type())],
                      target_opset=target_opset,
                      other_outputs=[grad])
    if weight_name is not None:
        onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype))
    return onx