示例#1
0
def shape_calculator_transfer_transformer(operator):
    """
    Shape calculator :epkg:`TransferTransformer`.
    """
    op = operator.raw_operator
    alias = get_model_alias(type(op.estimator_))
    calc = get_shape_calculator(alias)

    scope = operator.scope_inst.temp()
    this_operator = scope.declare_local_operator(alias)
    this_operator.raw_operator = op.estimator_
    this_operator.inputs = operator.inputs
    res = _model_outputs(scope, op.estimator_, operator.inputs)
    this_operator.outputs.extend([
        scope.declare_local_variable("%sTTS" % r.onnx_name, r.type)
        for r in res
    ])
    this_operator.outputs = res
    calc(this_operator)

    if op.method == 'predict_proba':
        operator.outputs[0].type = this_operator.outputs[1].type
    elif op.method == 'transform':
        operator.outputs[0].type = this_operator.outputs[0].type
    else:
        raise NotImplementedError(  # pragma: no cover
            "Unable to defined the output for method='{}' and model='{}'.".
            format(op.method, op.__class__.__name__))
示例#2
0
def convert_transfer_transformer(scope, operator, container):
    """
    Converters for :epkg:`TransferTransformer`.
    """
    op = operator.raw_operator
    op_type = get_model_alias(type(op.estimator_))

    this_operator = scope.declare_local_operator(op_type)
    this_operator.raw_operator = op.estimator_
    this_operator.inputs = operator.inputs

    if isinstance(op.estimator_, ClassifierMixin):
        container.add_options(id(op.estimator_), {'zipmap': False})

    res = _model_outputs(scope.temp(), op.estimator_, operator.inputs)
    this_operator.outputs.extend([
        scope.declare_local_variable("%sTTC" % r.onnx_name, r.type)
        for r in res
    ])

    if op.method == 'predict_proba':
        index = 1
    elif op.method == 'transform':
        index = 0
    else:
        raise NotImplementedError(  # pragma: no cover
            "Unable to defined the output for method='{}' and model='{}'.".
            format(op.method, op.__class__.__name__))

    apply_identity(scope,
                   this_operator.outputs[index].onnx_name,
                   operator.outputs[0].full_name,
                   container,
                   operator_name=scope.get_unique_operator_name("IdentityTT"))
示例#3
0
def parser_transfer_transformer(scope, model, inputs, custom_parsers=None):
    """
    Parser for :epkg:`TransferTransformer`.
    """
    if custom_parsers is not None and model in custom_parsers:
        return custom_parsers[model](scope,
                                     model,
                                     inputs,
                                     custom_parsers=custom_parsers)

    if model.method == 'predict_proba':
        name = 'probabilities'
    elif model.method == 'transform':
        name = 'variable'
    else:
        raise NotImplementedError(  # pragma: no cover
            "Unable to defined the output for method='{}' and model='{}'.".
            format(model.method, model.__class__.__name__))

    prob = scope.declare_local_variable(name, FloatTensorType())
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)
    this_operator.inputs = inputs
    this_operator.outputs.append(prob)
    return this_operator.outputs
def subsub_mmtwo_parser(scope, model, inputs, custom_parsers=None):
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)
    this_operator.inputs.append(inputs[0])
    cls_type = inputs[0].type.__class__
    val = scope.declare_local_variable('variable', cls_type())
    this_operator.outputs.append(val)
    return this_operator.outputs
示例#5
0
 def parser(scope, model, inputs, custom_parsers=None):
     alias = get_model_alias(type(model))
     op = scope.declare_local_operator(alias, model)
     op.inputs = inputs
     n_features = sum(list(map(lambda x: x.type.shape[1], op.inputs)))
     variable = scope.declare_local_variable(
         "c_outputs", FloatTensorType([None, n_features]))
     op.outputs.append(variable)
     return op.outputs
示例#6
0
def parser(scope, model, inputs, custom_parsers=None):
    alias = get_model_alias(type(model))
    operator = scope.declare_local_operator(alias, model)
    operator.inputs = inputs
    for op_input in inputs:
        op_output = scope.declare_local_variable(op_input.raw_name,
                                                 copy.deepcopy(op_input.type))
        operator.outputs.append(op_output)
    return operator.outputs
def woe_encoder_parser(scope, model, inputs, custom_parsers=None):
    if len(inputs) != 1:
        raise RuntimeError(f"Unexpected number of inputs: {len(inputs)} != 1.")
    if inputs[0].type is None:
        raise RuntimeError(f"Unexpected type: {inputs[0]!r}.")
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)
    this_operator.inputs.append(inputs[0])
    this_operator.outputs.append(
        scope.declare_local_variable('catwoe', FloatTensorType()))
    return this_operator.outputs
示例#8
0
    def parser(scope, model, inputs):
        """Custom parser for parsing ONNX model outputs

        """
        alias = skl2onnx.get_model_alias(type(model))
        this_op = scope.declare_local_operator(alias, model)
        this_op.inputs = inputs
        this_op.outputs.append(
            scope.declare_local_variable('states',
                                         data_types.Int32TensorType([None])))
        this_op.outputs.append(
            scope.declare_local_variable('probs',
                                         data_types.FloatTensorType([None])))
        return this_op.outputs
def pyod_iforest_parser(scope, model, inputs, custom_parsers=None):
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)

    # inputs
    this_operator.inputs.append(inputs[0])

    # outputs
    cls_type = inputs[0].type.__class__
    val_y1 = scope.declare_local_variable('label', Int64TensorType())
    val_y2 = scope.declare_local_variable('probability', cls_type())
    this_operator.outputs.append(val_y1)
    this_operator.outputs.append(val_y2)

    # end
    return this_operator.outputs
示例#10
0
def decorrelate_transformer_parser(scope, model, inputs, custom_parsers=None):
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)

    # inputs
    this_operator.inputs.append(inputs[0])

    # outputs
    cls_type = inputs[0].type.__class__
    val_y1 = scope.declare_local_variable('nogemm', cls_type())
    val_y2 = scope.declare_local_variable('gemm', cls_type())
    this_operator.outputs.append(val_y1)
    this_operator.outputs.append(val_y2)

    # ends
    return this_operator.outputs
def validator_classifier_converter(scope, operator, container):
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)

    # We reuse existing converter and declare it
    # as a local operator.
    model = op.estimator_
    alias = get_model_alias(type(model))
    val_op = scope.declare_local_operator(alias, model)
    val_op.inputs = operator.inputs

    # We add an intermediate outputs.
    val_label = scope.declare_local_variable('val_label', Int64TensorType())
    val_prob = scope.declare_local_variable('val_prob', FloatTensorType())
    val_op.outputs.append(val_label)
    val_op.outputs.append(val_prob)

    # We adjust the output of the submodel.
    shape_calc = get_shape_calculator(alias)
    shape_calc(val_op)

    # We now handle the validation.
    val_max = scope.get_unique_variable_name('val_max')
    container.add_node('ReduceMax',
                       val_prob.full_name,
                       val_max,
                       name=scope.get_unique_operator_name('ReduceMax'),
                       axes=[1],
                       keepdims=0)

    th_name = scope.get_unique_variable_name('threshold')
    container.add_initializer(th_name, onnx_proto.TensorProto.FLOAT, [1],
                              [op.threshold])
    val_bin = scope.get_unique_variable_name('val_bin')
    apply_greater(scope, [val_max, th_name], val_bin, container)

    val_val = scope.get_unique_variable_name('validate')
    apply_cast(scope,
               val_bin,
               val_val,
               container,
               to=onnx_proto.TensorProto.INT64)

    # We finally link the intermediate output to the shared converter.
    apply_identity(scope, val_label.full_name, outputs[0].full_name, container)
    apply_identity(scope, val_prob.full_name, outputs[1].full_name, container)
    apply_identity(scope, val_val, outputs[2].full_name, container)
def validator_classifier_parser(scope, model, inputs, custom_parsers=None):
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)

    # inputs
    this_operator.inputs.append(inputs[0])

    # outputs
    val_label = scope.declare_local_variable('val_label', Int64TensorType())
    val_prob = scope.declare_local_variable('val_prob', FloatTensorType())
    val_val = scope.declare_local_variable('val_val', Int64TensorType())
    this_operator.outputs.append(val_label)
    this_operator.outputs.append(val_prob)
    this_operator.outputs.append(val_val)

    # ends
    return this_operator.outputs
def predictable_tsne_converter(scope, operator, container):
    """
    :param scope: name space, where to keep node names, get unused new names
    :param operator: operator to converter, same object as sent to
        *predictable_tsne_shape_calculator*
    :param container: contains the ONNX graph
    """
    # input = operator.inputs[0]      # input in ONNX graph
    output = operator.outputs[0]  # output in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)

    # First step is the k nearest-neighbours,
    # we reuse existing converter and declare it as local
    # operator
    model = op.estimator_
    alias = get_model_alias(type(model))
    knn_op = scope.declare_local_operator(alias, model)
    knn_op.inputs = operator.inputs

    # We add an intermediate outputs.
    knn_output = scope.declare_local_variable('knn_output', FloatTensorType())
    knn_op.outputs.append(knn_output)

    # We adjust the output of the submodel.
    shape_calc = get_shape_calculator(alias)
    shape_calc(knn_op)

    # We add the normalizer which needs a unique node name.
    name = scope.get_unique_operator_name('Scaler')

    # The parameter follows the specifications of ONNX
    # https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#ai.onnx.ml.Scaler
    attrs = dict(name=name,
                 scale=op.inv_std_.ravel().astype(float),
                 offset=op.mean_.ravel().astype(float))

    # Let's finally add the scaler which connects the output
    # of the k-nearest neighbours model to output of the whole model
    # declared in ONNX graph
    container.add_node('Scaler', [knn_output.onnx_name], [output.full_name],
                       op_domain='ai.onnx.ml',
                       **attrs)
def shape_calculator_transfer_transformer(operator):
    """
    Shape calculator for :epkg:`TransferTransformer`.
    """
    if len(operator.inputs) != 1:
        raise RuntimeError(  # pragma: no cover
            "Only one input (not %d) is allowed for model %r."
            "" % (len(operator.inputs), operator))
    op = operator.raw_operator
    alias = get_model_alias(type(op.estimator_))
    calc = get_shape_calculator(alias)

    options = (None if not hasattr(operator.scope, 'options') else
               operator.scope.options)
    if is_classifier(op.estimator_):
        if options is None:
            options = {}
        options = {id(op.estimator_): {'zipmap': False}}
    registered_models = dict(conv=_converter_pool,
                             shape=_shape_calculator_pool,
                             aliases=sklearn_operator_name_map)
    scope = Scope('temp', options=options, registered_models=registered_models)
    inputs = [
        Variable(v.onnx_name, v.onnx_name, type=v.type, scope=scope)
        for v in operator.inputs
    ]
    res = _parse_sklearn(scope, op.estimator_, inputs)
    this_operator = res[0]._parent
    calc(this_operator)

    if op.method == 'predict_proba':
        operator.outputs[0].type = this_operator.outputs[1].type
    elif op.method == 'transform':
        operator.outputs[0].type = this_operator.outputs[0].type
    else:
        raise NotImplementedError(  # pragma: no cover
            "Unable to defined the output for method='{}' and model='{}'.".
            format(op.method, op.__class__.__name__))
    if len(operator.inputs) != 1:
        raise RuntimeError(  # pragma: no cover
            "Only one input (not %d) is allowed for model %r."
            "" % (len(operator.inputs), operator))
def discretizer_transformer_converter(scope, operator, container):
    op = operator.raw_operator

    # We convert the discretizer into a tree.
    model = op.astree()

    # We add a placeholder to call the converter for
    # this model.
    alias = get_model_alias(type(model))
    op = scope.declare_local_operator(alias)
    op.inputs = operator.inputs
    op.raw_operator = model
    tree_out = scope.declare_local_variable(
        'treeout', operator.inputs[0].type.__class__())
    op.outputs.append(tree_out)

    out_name = operator.outputs[0].full_name
    apply_cast(scope,
               tree_out.full_name,
               out_name,
               container,
               to=onnx_proto.TensorProto.INT64)
示例#16
0
    def converter(scope, operator, container):
        opv = container.target_opset
        clf = operator.raw_operator
        output_states = operator.outputs[0]
        output_prob = operator.outputs[1]

        scaler_model = operator.raw_operator.scaler_model_
        clf_model = operator.raw_operator.clf_model_
        hmm_model = operator.raw_operator.hmm_model_
        n_features = scaler_model.n_features_in_
        n_classes = clf_model.n_classes_

        # First step is the scaler
        alias = skl2onnx.get_model_alias(type(scaler_model))
        scaler_op = scope.declare_local_operator(alias, scaler_model)
        scaler_op.inputs = operator.inputs
        scaler_output = scope.declare_local_variable(
            'scaler_output', data_types.FloatTensorType([None, n_features]))
        scaler_op.outputs.append(scaler_output)
        scaler_op.infer_types()

        # Second step is random forest classifier
        alias = skl2onnx.get_model_alias(type(clf_model))
        classifier_op = scope.declare_local_operator(alias, clf_model)
        classifier_op.inputs.append(scaler_output)
        classifier_output_labels = scope.declare_local_variable(
            'classifier_output_labels', data_types.Int32TensorType([None]))
        classifier_output_probabilities = scope.declare_local_variable(
            'classifier_output_probabilities',
            data_types.FloatTensorType([None, n_classes]))
        classifier_op.outputs.append(classifier_output_labels)
        classifier_op.outputs.append(classifier_output_probabilities)
        classifier_op.infer_types()

        # Take the second column from probabilities produced by classifier
        gather_indices_name = scope.get_unique_variable_name('gather_indices')
        container.add_initializer(gather_indices_name, TensorProto.INT32, [1],
                                  [1])
        gather_positive = onnx_ops.OnnxGather(classifier_output_probabilities,
                                              gather_indices_name,
                                              axis=1,
                                              output_names=[output_prob],
                                              op_version=opv)
        gather_positive.set_onnx_name_prefix("gather_positive")
        gather_positive.add_to(scope, container)

        # Translate probabilities of class-1 to discrete integers
        bins_discretizer = clf.bins_discretizer_
        alias = skl2onnx.get_model_alias(type(bins_discretizer))
        discretizer_op = scope.declare_local_operator(alias, bins_discretizer)
        discretizer_op.inputs.append(output_prob)
        states_2d = scope.declare_local_variable(
            'states_2d', data_types.FloatTensorType([None, 1]))
        discretizer_op.outputs.append(states_2d)
        discretizer_op.infer_types()

        # Output of discretizer is an N * 1 tensor, we need to flatten it as an
        # N dimensional array
        final_shape_name = scope.get_unique_variable_name("final_shape")
        container.add_initializer(final_shape_name, TensorProto.INT64, [1],
                                  [-1])

        hmm_input_name = scope.declare_local_variable(
            'hmm_input', data_types.Int32TensorType([None]))
        cast = onnx_ops.OnnxCast(onnx_ops.OnnxReshape(states_2d,
                                                      final_shape_name,
                                                      op_version=opv),
                                 to=TensorProto.INT32,
                                 output_names=[hmm_input_name],
                                 op_version=opv)
        cast.set_onnx_name_prefix("cast")
        cast.add_to(scope, container)

        # feed reshaped array to HMM
        hmm_output_name = scope.declare_local_variable(
            'hmm_output', data_types.Int32TensorType([None]))
        alias = skl2onnx.get_model_alias(type(hmm_model))
        hmm_op = scope.declare_local_operator(alias, hmm_model)
        hmm_op.inputs.append(hmm_input_name)
        hmm_op.outputs.append(hmm_output_name)
        classifier_op.infer_types()

        # an extra identity
        final_identity = onnx_ops.OnnxIdentity(hmm_output_name,
                                               output_names=[output_states],
                                               op_version=opv)
        final_identity.set_onnx_name_prefix("final_identity")
        final_identity.add_to(scope, container)