Exemple #1
0
def decorrelate_transformer_convertor2(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs
    X = operator.inputs[0]
    Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])
    Y.add_to(scope, container)
def custom_transformer_converter2(scope, operator, container):
    i0 = operator.inputs[0]
    outputs = operator.outputs
    op = operator.raw_operator
    opv = container.target_opset
    out = OnnxSubEstimator(op.norm_, i0, op_version=opv,
                           output_names=outputs)
    out.add_to(scope, container)
Exemple #3
0
def ordinal_encoder_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    X = operator.inputs[0]

    skl_ord = ordenc_to_sklearn(op.mapping)
    cat = OnnxSubEstimator(skl_ord,
                           X,
                           op_version=opv,
                           output_names=operator.outputs[:1])
    cat.add_to(scope, container)
Exemple #4
0
def decorrelate_transformer_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # We tell in ONNX language how to compute the unique output.
    # op_version=opv tells which opset is requested
    Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])
    Y.add_to(scope, container)
Exemple #5
0
def woe_encoder_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    X = operator.inputs[0]

    sub = OnnxSubEstimator(op.ordinal_encoder, X, op_version=opv)
    cast = OnnxCast(sub, op_version=opv, to=np.float32)
    skl_ord = woeenc_to_sklearn(op.mapping)
    cat = OnnxSubEstimator(skl_ord,
                           cast,
                           op_version=opv,
                           output_names=operator.outputs[:1],
                           input_types=[FloatTensorType()])
    cat.add_to(scope, container)
Exemple #6
0
def ordwoe_encoder_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    X = operator.inputs[0]

    sub = OnnxSubEstimator(op.encoder_, X, op_version=opv)
    cast = OnnxCast(sub, op_version=opv, to=np.float32)
    cat = OnnxSubEstimator(op.woe_,
                           cast,
                           op_version=opv,
                           input_types=[Int64TensorType()])
    idcat = OnnxIdentity(cat,
                         output_names=operator.outputs[:1],
                         op_version=opv)
    idcat.add_to(scope, container)
def validator_classifier_converter(scope, operator, container):
    input = operator.inputs[0]  # input in ONNX graph
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)
    opv = container.target_opset

    # We reuse existing converter and declare it as local
    # operator.
    model = op.estimator_
    onnx_op = OnnxSubEstimator(model, input, op_version=opv)

    rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv)
    great = OnnxGreater(rmax,
                        np.array([op.threshold], dtype=np.float32),
                        op_version=opv)
    valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, op_version=opv)

    r1 = OnnxIdentity(onnx_op[0],
                      output_names=[outputs[0].full_name],
                      op_version=opv)
    r2 = OnnxIdentity(onnx_op[1],
                      output_names=[outputs[1].full_name],
                      op_version=opv)
    r3 = OnnxIdentity(valid,
                      output_names=[outputs[2].full_name],
                      op_version=opv)

    r1.add_to(scope, container)
    r2.add_to(scope, container)
    r3.add_to(scope, container)
def validator_classifier_converter(scope, operator, container):
    input0 = operator.inputs[0]  # first input in ONNX graph
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)
    opv = container.target_opset

    # The model calls another one. The class `OnnxSubEstimator`
    # calls the converter for this operator.
    model = op.estimator_
    onnx_op = OnnxSubEstimator(model,
                               input0,
                               op_version=opv,
                               options={'zipmap': False})

    rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv)
    great = OnnxGreater(rmax,
                        np.array([op.threshold], dtype=np.float32),
                        op_version=opv)
    valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, op_version=opv)

    r1 = OnnxIdentity(onnx_op[0],
                      output_names=[outputs[0].full_name],
                      op_version=opv)
    r2 = OnnxIdentity(onnx_op[1],
                      output_names=[outputs[1].full_name],
                      op_version=opv)
    r3 = OnnxIdentity(valid,
                      output_names=[outputs[2].full_name],
                      op_version=opv)

    r1.add_to(scope, container)
    r2.add_to(scope, container)
    r3.add_to(scope, container)
 def to_onnx_operator(self, inputs=None, outputs=('Y', )):
     if inputs is None:
         raise RuntimeError("inputs should contain one name")
     opv = self.op_version
     i0 = self.get_inputs(inputs, 0)
     out = OnnxSubEstimator(self.norm_, i0, op_version=opv)
     return OnnxIdentity(out, op_version=self.op_version,
                         output_names=outputs)
def pyod_iforest_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # In most case, computation happen in floats.
    # But it might be with double. ONNX is very strict
    # about types, every constant should have the same
    # type as the input.
    dtype = guess_numpy_type(X.type)

    detector = op.detector_  # Should be IForest from scikit-learn.
    lab_pred = OnnxSubEstimator(detector, X, op_version=opv)
    scores = OnnxIdentity(lab_pred[1], op_version=opv)

    # labels
    threshold = op.threshold_
    above = OnnxLess(scores,
                     np.array([threshold], dtype=dtype),
                     op_version=opv)
    labels = OnnxCast(above,
                      op_version=opv,
                      to=onnx_proto.TensorProto.INT64,
                      output_names=out[:1])

    # probabilities
    train_scores = op.decision_scores_
    scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))
    scores_ = OnnxMul(scores, np.array([-1], dtype=dtype), op_version=opv)
    print(scaler.min_)
    print(scaler.scale_)

    scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)
    scaled_centered = OnnxAdd(scaled,
                              scaler.min_.astype(dtype),
                              op_version=opv)
    clipped = OnnxClip(scaled_centered,
                       np.array([0], dtype=dtype),
                       np.array([1], dtype=dtype),
                       op_version=opv)
    clipped_ = OnnxAdd(OnnxMul(clipped,
                               np.array([-1], dtype=dtype),
                               op_version=opv),
                       np.array([1], dtype=dtype),
                       op_version=opv)

    scores_2d = OnnxConcat(clipped_,
                           clipped,
                           axis=1,
                           op_version=opv,
                           output_names=out[1:])

    labels.add_to(scope, container)
    scores_2d.add_to(scope, container)
def custom_transformer_converter4(scope, operator, container):
    i0 = operator.inputs[0]
    outputs = operator.outputs
    op = operator.raw_operator
    opv = container.target_opset
    out = OnnxSubEstimator(op.norm_, i0, op_version=opv)
    final = OnnxIdentity(
        out[1], output_names=outputs, op_version=opv)
    final.add_to(scope, container)
 def to_onnx_operator(self, inputs=None, outputs=('Y', ),
                      target_opset=None, **kwargs):
     if inputs is None:
         raise RuntimeError("inputs should contain one name")
     opv = target_opset or self.op_version
     i0 = self.get_inputs(inputs, 0)
     out = OnnxSubEstimator(self.norm_, i0, op_version=opv,
                            output_names=outputs)
     return out
def subsub_mmtwo_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs
    X = operator.inputs[0]
    x2 = OnnxSubEstimator(op.est1_, X, op_version=opv)
    x2.set_onnx_name_prefix('AAA')
    x2_exp = OnnxExp(x2, op_version=opv)
    x3 = OnnxSubEstimator(op.est2_, x2_exp, op_version=opv)
    x3.set_onnx_name_prefix('BBB')
    final = OnnxIdentity(x3, op_version=opv, output_names=out[:1])
    final.add_to(scope, container)
Exemple #14
0
 def generate_onnx_graph(opv):
     dtype = np.float32 if cls_type == FloatTensorType else np.float64
     node = OnnxAdd(first_input,
                    np.array([0.1], dtype=dtype),
                    op_version=opv)
     lr = model()
     lr.fit(np.ones([10, 5]), np.arange(0, 10) % 3)
     out = OnnxSubEstimator(lr, node, op_version=1, options=options)
     if model == LogisticRegression:
         last = OnnxIdentity(out[1], output_names=['Y'], op_version=opv)
     else:
         last = OnnxIdentity(out, output_names=['Y'], op_version=opv)
     onx = last.to_onnx([('X1', cls_type((None, 5)))],
                        outputs=[('Y', cls_type())],
                        target_opset=opv)
     return onx
def custom_classifier_converter(scope, operator, container):
    op = operator.raw_operator
    X = operator.inputs[0]
    outputs = operator.outputs
    opv = container.target_opset
    y_list = [
        OnnxReshape(
            OnnxSubEstimator(est, X, op_version=opv)[1],
            np.array([-1, 1], dtype=np.int64), op_version=opv)
        for est in op.estimators_]
    y_matrix = OnnxConcat(*y_list, axis=1, op_version=opv)
    probs = OnnxSoftmax(y_matrix, axis=1, op_version=opv,
                        output_names=[outputs[1]])
    probs.add_to(scope, container)
    labels = OnnxArgMax(probs, axis=1, keepdims=0, op_version=opv,
                        output_names=[outputs[0]])
    labels.add_to(scope, container)