def test_onnx_clip_10(self): with self.subTest(name="OnnxClip_6[1e-5, 1e5]"): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip_6( x, min=1e-5, max=1e5, output_names=output_names), lambda x: np.clip(x, 1e-5, 1e5), op_version=10) with self.subTest(name="OnnxClip-10[1e-5, 1e5]"): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip(x, min=1e-5, max=1e5, output_names= output_names, op_version=10), lambda x: np.clip(x, 1e-5, 1e5), op_version=10) with self.subTest(name="OnnxClip-10[-1e5, 1e-5]"): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, max=1e-5, output_names=output_names, op_version=10), lambda x: np.clip(x, -1e5, 1e-5), op_version=10) with self.subTest(name="OnnxClip-10[0.1, 2.1]"): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip(x, min=0.1, max=2.1, output_names= output_names, op_version=10), lambda x: np.clip(x, 0.1, 2.1), op_version=10)
def test_onnx_clip_10(self): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip_6( x, min=1e-5, max=1e5, output_names=output_names, op_version=10), lambda x: np.clip(x, 1e-5, 1e5), op_version=10) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, min=1e-5, max=1e5, output_names=output_names, op_version=10), lambda x: np.clip(x, 1e-5, 1e5), op_version=10) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, max=1e-5, output_names=output_names, op_version=10), lambda x: np.clip(x, -1e5, 1e-5), op_version=10) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, min=0.1, max=2.1, output_names=output_names, op_version=10), lambda x: np.clip(x, 0.1, 2.1), op_version=10)
def test_onnxt_runtime_clip(self): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, min=0, output_names=output_names), lambda x: numpy.clip(x, 0, 1e5)) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, max=0, output_names=output_names), lambda x: numpy.clip(x, -1e5, 0)) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, min=0.1, max=2.1, output_names=output_names), lambda x: numpy.clip(x, 0.1, 2.1))
def pyod_iforest_converter(scope, operator, container): op = operator.raw_operator opv = container.target_opset out = operator.outputs # We retrieve the unique input. X = operator.inputs[0] # In most case, computation happen in floats. # But it might be with double. ONNX is very strict # about types, every constant should have the same # type as the input. dtype = guess_numpy_type(X.type) detector = op.detector_ # Should be IForest from scikit-learn. lab_pred = OnnxSubEstimator(detector, X, op_version=opv) scores = OnnxIdentity(lab_pred[1], op_version=opv) # labels threshold = op.threshold_ above = OnnxLess(scores, np.array([threshold], dtype=dtype), op_version=opv) labels = OnnxCast(above, op_version=opv, to=onnx_proto.TensorProto.INT64, output_names=out[:1]) # probabilities train_scores = op.decision_scores_ scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1)) scores_ = OnnxMul(scores, np.array([-1], dtype=dtype), op_version=opv) print(scaler.min_) print(scaler.scale_) scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv) scaled_centered = OnnxAdd(scaled, scaler.min_.astype(dtype), op_version=opv) clipped = OnnxClip(scaled_centered, np.array([0], dtype=dtype), np.array([1], dtype=dtype), op_version=opv) clipped_ = OnnxAdd(OnnxMul(clipped, np.array([-1], dtype=dtype), op_version=opv), np.array([1], dtype=dtype), op_version=opv) scores_2d = OnnxConcat(clipped_, clipped, axis=1, op_version=opv, output_names=out[1:]) labels.add_to(scope, container) scores_2d.add_to(scope, container)
def test_onnx_clip(self): self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, np.array([0], dtype=np.float32), output_names=output_names), lambda x: np.clip(x, 0, 1e5)) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, np.array([-1000], dtype=np.float32), np.array([0], dtype=np.float32), output_names=output_names), lambda x: np.clip(x, -1e5, 0)) self.common_test_onnxt_runtime_unary( lambda x, output_names=None: OnnxClip( x, np.array([0.1], dtype=np.float32), np.array([2.1], dtype=np.float32), output_names=output_names), lambda x: np.clip(x, 0.1, 2.1))
def _onnx_grad_sigmoid_neg_log_loss_error(target_opset=None, dtype=numpy.float32, eps=1e-5, weight_name=None): """ The function the raw scores from a classifier, uses the sigmoid function to compute probabilities, then the log function to compute the loss. It creates the ONNX graph for this function and the associated gradient of the loss against the raw scores. Probabilites (class 1): :math:`p(s) = \\frac{1}{1 + \\exp(-s)}`. Loss (for two classes): :math:`L(y, s) = (1 - y)\\log(1 - p(s)) + y \\log(p(s))`. Gradient :math:`\\frac{dL(y, s)}{ds} = y - p(s)`. To avoid nan values, probabilies are clipped: :math:`p(s) = \\max(\\min(p(s), 1 - \\epsilon), \\epsilon)`. :math:`y \\in \\{0, 1\\}` (integer). *s* is a float. :param eps: to clip probabilities and avoid computing `log(0)` .. gdot:: :script: DOT-SECTION from mlprodict.onnxrt import OnnxInference from onnxcustom.utils.onnx_function import function_onnx_graph model_onnx = function_onnx_graph('grad_sigmoid_neg_log_loss_error') oinf = OnnxInference(model_onnx, inplace=False) print("DOT-SECTION", oinf.to_dot()) """ from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE from skl2onnx.algebra.onnx_ops import (OnnxSub, OnnxMul, OnnxSigmoid, OnnxLog, OnnxNeg, OnnxReduceSum, OnnxReshape, OnnxAdd, OnnxCast, OnnxClip) p1c = OnnxSigmoid('X2', op_version=target_opset) p1 = OnnxClip(p1c, numpy.array([eps], dtype=dtype), numpy.array([1 - eps], dtype=dtype), op_version=target_opset) p0 = OnnxSub(numpy.array([1], dtype=dtype), p1, op_version=target_opset) y1 = OnnxCast('X1', to=NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)], op_version=target_opset) y0 = OnnxSub(numpy.array([1], dtype=dtype), y1, op_version=target_opset) loss_obs = OnnxAdd(OnnxMul(y0, OnnxLog(p0, op_version=target_opset), op_version=target_opset), OnnxMul(y1, OnnxLog(p1, op_version=target_opset), op_version=target_opset), op_version=target_opset) loss_neg = OnnxNeg(loss_obs, op_version=target_opset) if weight_name is None: loss = OnnxReduceSum(loss_neg, op_version=target_opset) grad = OnnxSub(p1, y1, op_version=target_opset, output_names=['Y_grad']) else: loss = OnnxReduceSum(OnnxMul(loss_neg, OnnxReshape(weight_name, numpy.array( [-1, 1], dtype=numpy.int64), op_version=target_opset), op_version=target_opset), op_version=target_opset) grad = OnnxMul(OnnxSub(p1, y1, op_version=target_opset), OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), op_version=target_opset), output_names=['Y_grad'], op_version=target_opset) res = OnnxReshape(loss, numpy.array([-1], numpy.int64), op_version=target_opset, output_names=['Y']) var_type_int64 = dtype_to_var_type(numpy.int64) var_type = dtype_to_var_type(dtype) varsx = [('X1', var_type_int64([None, None])), ('X2', var_type([None, None]))] if weight_name is not None: varsx.append((weight_name, var_type([None]))) onx = res.to_onnx(varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], target_opset=target_opset, other_outputs=[grad]) if weight_name is not None: onx = add_initializer(onx, weight_name, numpy.array([1], dtype=dtype)) return onx