def common_test_onnxt_runtime_reshape(self): sh = numpy.array([1, 4], dtype=numpy.int64) onx = OnnxReshape('X', sh, output_names=['Y']) X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64) model_def = onx.to_onnx({'X': X.astype(numpy.float32)}) oinf = OnnxInference(model_def) got = oinf.run({'X': X}) self.assertEqual(list(sorted(got)), ['Y']) exp = X.reshape(sh.tolist()) self.assertEqualArray(exp, got['Y'])
def _onnx_grad_loss_absolute_error(target_opset=None, dtype=numpy.float32, weight_name=None): """ Returns the ONNX graph for function :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert` or :math:`Y = f(X1, X2) = \\lVert (X1 - X2)w \\rVert` if *weight_name* is not None and its gradient. .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_loss_absolute_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxReduceSum, OnnxReshape, OnnxSign, OnnxAbs) diff = OnnxSub('X1', 'X2', op_version=target_opset) abs_diff = OnnxAbs(diff, op_version=target_opset) if weight_name is None: res = OnnxReduceSum(abs_diff, op_version=target_opset) res2 = OnnxSign(diff, op_version=target_opset, output_names=['Y_grad']) else: resh = OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset) mul = OnnxMul(abs_diff, resh, op_version=target_opset) res = OnnxReduceSum(mul, op_version=target_opset) res2 = OnnxMul(OnnxSign(diff, op_version=target_opset), resh, op_version=target_opset, output_names=['Y_grad']) res = OnnxReshape(res, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], target_opset=target_opset, other_outputs=[res2]) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx
def test_container_init(self): onx = OnnxReshape( OnnxReshape('X', np.array([1, -1], dtype=np.int64), op_version=TARGET_OPSET), np.array([1, -1], dtype=np.int64), output_names=['Y'], op_version=TARGET_OPSET) X = np.array([[1, 2], [3, 4]], dtype=np.float32) model_def = onx.to_onnx({'X': X}, outputs=[('Y', FloatTensorType([None, 2]))], target_opset=TARGET_OPSET) sess = InferenceSession(model_def.SerializeToString()) got = sess.run(None, {'X': X})[0] assert_almost_equal(X.reshape((1, -1)), got) inits = [row for row in str(model_def).split('\n') if row.startswith(" initializer {")] self.assertEqual(len(inits), 1)
def _onnx_grad_square_error(target_opset=None, dtype=numpy.float32, weight_name=None): """ Returns the ONNX graph for the gradient of function :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2` or :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2 w` if *weight_name* is not None .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_square_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import OnnxSub, OnnxMul, OnnxReshape diff = OnnxSub('X1', 'X2', op_version=target_opset) if weight_name is None: res = OnnxMul(diff, numpy.array([-2], dtype=dtype), op_version=target_opset, output_names=['Y_grad']) else: res = OnnxMul(OnnxMul(diff, numpy.array([-2], dtype=dtype), op_version=target_opset), OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset), op_version=target_opset, output_names=['Y_grad']) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y_grad', var_type())], target_opset=target_opset) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx
def custom_classifier_converter(scope, operator, container): op = operator.raw_operator X = operator.inputs[0] outputs = operator.outputs opv = container.target_opset y_list = [ OnnxReshape( OnnxSubEstimator(est, X, op_version=opv)[1], np.array([-1, 1], dtype=np.int64), op_version=opv) for est in op.estimators_] y_matrix = OnnxConcat(*y_list, axis=1, op_version=opv) probs = OnnxSoftmax(y_matrix, axis=1, op_version=opv, output_names=[outputs[1]]) probs.add_to(scope, container) labels = OnnxArgMax(probs, axis=1, keepdims=0, op_version=opv, output_names=[outputs[0]]) labels.add_to(scope, container)
def _onnx_grad_sigmoid_neg_log_loss_error(target_opset=None, dtype=numpy.float32, eps=1e-5, weight_name=None): """ The function the raw scores from a classifier, uses the sigmoid function to compute probabilities, then the log function to compute the loss. It creates the ONNX graph for this function and the associated gradient of the loss against the raw scores. Probabilites (class 1): :math:`p(s) = \\frac{1}{1 + \\exp(-s)}`. Loss (for two classes): :math:`L(y, s) = (1 - y)\\log(1 - p(s)) + y \\log(p(s))`. Gradient :math:`\\frac{dL(y, s)}{ds} = y - p(s)`. To avoid nan values, probabilies are clipped: :math:`p(s) = \\max(\\min(p(s), 1 - \\epsilon), \\epsilon)`. :math:`y \\in \\{0, 1\\}` (integer). *s* is a float. :param eps: to clip probabilities and avoid computing `log(0)` .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_sigmoid_neg_log_loss_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSigmoid, OnnxLog, OnnxNeg, OnnxReduceSum, OnnxReshape, OnnxAdd, OnnxCast, OnnxClip) p1c = OnnxSigmoid('X2', op_version=target_opset) p1 = OnnxClip(p1c, numpy.array([eps], dtype=dtype), numpy.array([1 - eps], dtype=dtype), op_version=target_opset) p0 = OnnxSub(numpy.array([1], dtype=dtype), p1, op_version=target_opset) y1 = OnnxCast('X1', to=NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)], op_version=target_opset) y0 = OnnxSub(numpy.array([1], dtype=dtype), y1, op_version=target_opset) loss_obs = OnnxAdd(OnnxMul(y0, OnnxLog(p0, op_version=target_opset), op_version=target_opset), OnnxMul(y1, OnnxLog(p1, op_version=target_opset), op_version=target_opset), op_version=target_opset) loss_neg = OnnxNeg(loss_obs, op_version=target_opset) if weight_name is None: loss = OnnxReduceSum(loss_neg, op_version=target_opset) grad = OnnxSub(p1, y1, op_version=target_opset, output_names=['Y_grad']) else: loss = OnnxReduceSum(OnnxMul(loss_neg, OnnxReshape(weight_name, numpy.array( [-1, 1], dtype=numpy.int64), op_version=target_opset), op_version=target_opset), op_version=target_opset) grad = OnnxMul(OnnxSub(p1, y1, op_version=target_opset), OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset), output_names=['Y_grad'], op_version=target_opset) res = OnnxReshape(loss, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) var_type_int64 = dtype_to_var_type(numpy.int64) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type_int64([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], target_opset=target_opset, other_outputs=[grad]) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx
def _onnx_n_penalty_elastic_error(target_opset=None, dtype=numpy.float32, weight_name=None, l1_weight=0.01, l2_weight=0.01, n_tensors=1, loss_shape=(1, 1)): """ Returns the ONNX graph for function :math:`Y = f(W) = \\beta \\lVert W \\rVert + \\alpha \\lVert W \\rVert^2` *l1_weight* is :math:`\\beta` and *l2_weight* is :math:`\\alpha`. It does that for *n_tensors* and adds all of the results to an input loss. .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph( 'n_penalty_elastic_error', n_tensors=2) oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd, OnnxReduceSumSquare, OnnxReduceSum, OnnxAbs, OnnxReshape) if n_tensors <= 0: raise ValueError( # pragma: no cover "This function is useless if the number of tensors is null.") var_type = dtype_to_var_type(dtype) varsx = [('loss', var_type(loss_shape))] names = ['loss'] for n in range(n_tensors): name = 'W%d' % n abs_diff = OnnxAbs(name, op_version=target_opset) res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset) # res2_l1 = OnnxSign(diff, op_version=target_opset) res_l2 = OnnxReduceSumSquare(name, op_version=target_opset) # res2_l2 = diff res = OnnxAdd(OnnxMul(res_l1, numpy.array([l1_weight], dtype=dtype), op_version=target_opset), OnnxMul(res_l2, numpy.array([l2_weight], dtype=dtype), op_version=target_opset), op_version=target_opset) names.append(res) varsx.append(('W%d' % n, var_type())) if len(names) == 2: res = OnnxAdd(*names, op_version=target_opset) else: res = OnnxAdd(names[1], names[2], op_version=target_opset) for i in range(3, len(names)): res = OnnxAdd(res, names[i], op_version=target_opset) res = OnnxAdd(names[0], res, op_version=target_opset) res = OnnxReshape(res, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) onx = res.to_onnx(varsx, outputs=[('Y', var_type([None]))], target_opset=target_opset) return onx
def _onnx_grad_penalty_elastic_error(target_opset=None, dtype=numpy.float32, l1_weight=0.01, l2_weight=0.01): """ Returns the ONNX graph for function :math:`Y = f(W) = \\beta \\lVert W \\rVert + \\alpha \\lVert W \\rVert^2` *l1_weight* is :math:`\\beta` and *l2_weight* is :math:`\\alpha`. .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_penalty_elastic_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import (OnnxMul, OnnxAdd, OnnxReduceSumSquare, OnnxReduceSum, OnnxSign, OnnxAbs, OnnxReshape) diff = 'X' abs_diff = OnnxAbs(diff, op_version=target_opset) res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset) res2_l1 = OnnxSign(diff, op_version=target_opset) res_l2 = OnnxReduceSumSquare(diff, op_version=target_opset) res2_l2 = diff res = OnnxAdd(OnnxMul(res_l1, numpy.array([l1_weight], dtype=dtype), op_version=target_opset), OnnxMul(res_l2, numpy.array([l2_weight], dtype=dtype), op_version=target_opset), op_version=target_opset) res = OnnxReshape(res, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) res2 = OnnxAdd(OnnxMul(res2_l1, numpy.array([l1_weight], dtype=dtype), op_version=target_opset), OnnxMul(res2_l2, numpy.array([l2_weight * (2)], dtype=dtype), op_version=target_opset), op_version=target_opset, output_names=['Y_grad']) var_type = dtype_to_var_type(dtype) varsx = [('X', var_type([None, None]))] onx = res.to_onnx(varsx, outputs=[('Y', var_type([None])), ('Y_grad', var_type())], target_opset=target_opset, other_outputs=[res2]) return onx
def _onnx_grad_loss_elastic_error(target_opset=None, dtype=numpy.float32, weight_name=None, l1_weight=0.01, l2_weight=0.01): """ Returns the ONNX graph for function :math:`Y = f(X1, X2) = \\beta \\lVert X1 - X2 \\rVert + \\alpha \\lVert X1 - X2 \\rVert^2` or :math:`Y = f(X1, X2) = \\beta \\lVert w(X1 - X2) \\rVert + \\alpha \\lVert (\\sqrt{w})(X1 - X2) \\rVert^2` if *weight_name* is not None and its gradient. *l1_weight* is :math:`\\beta` and *l2_weight* is :math:`\\alpha`. .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_loss_elastic_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxAdd, OnnxIdentity, OnnxReduceSum, OnnxReshape, OnnxSign, OnnxAbs) diff = OnnxSub('X1', 'X2', op_version=target_opset) abs_diff = OnnxAbs(diff, op_version=target_opset) # loss abs_diff_l1 = OnnxMul(abs_diff, numpy.array([l1_weight], dtype=dtype), op_version=target_opset) diff_l2 = OnnxMul(OnnxMul(diff, diff, op_version=target_opset), numpy.array([l2_weight], dtype=dtype), op_version=target_opset) score = OnnxAdd(abs_diff_l1, diff_l2, op_version=target_opset) # gradient grad_l1 = OnnxMul(OnnxSign(diff, op_version=target_opset), numpy.array([l1_weight], dtype=dtype), op_version=target_opset) grad_l2 = OnnxMul(diff, numpy.array([l2_weight * -2], dtype=dtype), op_version=target_opset) grad = OnnxAdd(grad_l1, grad_l2, op_version=target_opset) if weight_name is None: res = OnnxReduceSum(score, op_version=target_opset) res2 = OnnxIdentity(grad, op_version=target_opset, output_names=['Y_grad']) else: resh = OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset) res = OnnxReduceSum(OnnxMul(score, resh, op_version=target_opset), op_version=target_opset) res2 = OnnxMul(grad, resh, op_version=target_opset, output_names=['Y_grad']) res = OnnxReshape(res, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], target_opset=target_opset, other_outputs=[res2]) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx