コード例 #1
0
def shape_calculator_transfer_transformer(operator):
    """
    Shape calculator :epkg:`TransferTransformer`.
    """
    op = operator.raw_operator
    alias = get_model_alias(type(op.estimator_))
    calc = get_shape_calculator(alias)

    scope = operator.scope_inst.temp()
    this_operator = scope.declare_local_operator(alias)
    this_operator.raw_operator = op.estimator_
    this_operator.inputs = operator.inputs
    res = _model_outputs(scope, op.estimator_, operator.inputs)
    this_operator.outputs.extend([
        scope.declare_local_variable("%sTTS" % r.onnx_name, r.type)
        for r in res
    ])
    this_operator.outputs = res
    calc(this_operator)

    if op.method == 'predict_proba':
        operator.outputs[0].type = this_operator.outputs[1].type
    elif op.method == 'transform':
        operator.outputs[0].type = this_operator.outputs[0].type
    else:
        raise NotImplementedError(  # pragma: no cover
            "Unable to defined the output for method='{}' and model='{}'.".
            format(op.method, op.__class__.__name__))
コード例 #2
0
def validator_classifier_converter(scope, operator, container):
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)

    # We reuse existing converter and declare it
    # as a local operator.
    model = op.estimator_
    alias = get_model_alias(type(model))
    val_op = scope.declare_local_operator(alias, model)
    val_op.inputs = operator.inputs

    # We add an intermediate outputs.
    val_label = scope.declare_local_variable('val_label', Int64TensorType())
    val_prob = scope.declare_local_variable('val_prob', FloatTensorType())
    val_op.outputs.append(val_label)
    val_op.outputs.append(val_prob)

    # We adjust the output of the submodel.
    shape_calc = get_shape_calculator(alias)
    shape_calc(val_op)

    # We now handle the validation.
    val_max = scope.get_unique_variable_name('val_max')
    container.add_node('ReduceMax',
                       val_prob.full_name,
                       val_max,
                       name=scope.get_unique_operator_name('ReduceMax'),
                       axes=[1],
                       keepdims=0)

    th_name = scope.get_unique_variable_name('threshold')
    container.add_initializer(th_name, onnx_proto.TensorProto.FLOAT, [1],
                              [op.threshold])
    val_bin = scope.get_unique_variable_name('val_bin')
    apply_greater(scope, [val_max, th_name], val_bin, container)

    val_val = scope.get_unique_variable_name('validate')
    apply_cast(scope,
               val_bin,
               val_val,
               container,
               to=onnx_proto.TensorProto.INT64)

    # We finally link the intermediate output to the shared converter.
    apply_identity(scope, val_label.full_name, outputs[0].full_name, container)
    apply_identity(scope, val_prob.full_name, outputs[1].full_name, container)
    apply_identity(scope, val_val, outputs[2].full_name, container)
コード例 #3
0
def predictable_tsne_converter(scope, operator, container):
    output = operator.outputs[0]
    op = operator.raw_operator
    model = op.estimator_
    alias = _get_sklearn_operator_name(type(model))
    knn_op = scope.declare_local_operator(alias, model)
    knn_op.inputs = operator.inputs
    knn_output = scope.declare_local_variable('knn_output', FloatTensorType())
    knn_op.outputs.append(knn_output)
    shape_calc = get_shape_calculator(alias)
    shape_calc(knn_op)
    name = scope.get_unique_operator_name('Scaler')
    attrs = dict(name=name,
                 scale=op.inv_std_.ravel().astype(float),
                 offset=op.mean_.ravel().astype(float))

    container.add_node('Scaler', [knn_output.onnx_name], [output.full_name],
                       op_domain='ai.onnx.ml',
                       **attrs)
コード例 #4
0
def predictable_tsne_converter(scope, operator, container):
    """
    :param scope: name space, where to keep node names, get unused new names
    :param operator: operator to converter, same object as sent to
        *predictable_tsne_shape_calculator*
    :param container: contains the ONNX graph
    """
    # input = operator.inputs[0]      # input in ONNX graph
    output = operator.outputs[0]  # output in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)

    # First step is the k nearest-neighbours,
    # we reuse existing converter and declare it as local
    # operator
    model = op.estimator_
    alias = _get_sklearn_operator_name(type(model))
    knn_op = scope.declare_local_operator(alias, model)
    knn_op.inputs = operator.inputs

    # We add an intermediate outputs.
    knn_output = scope.declare_local_variable('knn_output', FloatTensorType())
    knn_op.outputs.append(knn_output)

    # We adjust the output of the submodel.
    shape_calc = get_shape_calculator(alias)
    shape_calc(knn_op)

    # We add the normalizer which needs a unique node name.
    name = scope.get_unique_operator_name('Scaler')

    # The parameter follows the specifications of ONNX
    # https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#ai.onnx.ml.Scaler
    attrs = dict(name=name,
                 scale=op.inv_std_.ravel().astype(float),
                 offset=op.mean_.ravel().astype(float))

    # Let's finally add the scaler which connects the output
    # of the k-nearest neighbours model to output of the whole model
    # declared in ONNX graph
    container.add_node('Scaler', [knn_output.onnx_name], [output.full_name],
                       op_domain='ai.onnx.ml',
                       **attrs)
コード例 #5
0
def shape_calculator_transfer_transformer(operator):
    """
    Shape calculator for :epkg:`TransferTransformer`.
    """
    if len(operator.inputs) != 1:
        raise RuntimeError(  # pragma: no cover
            "Only one input (not %d) is allowed for model %r."
            "" % (len(operator.inputs), operator))
    op = operator.raw_operator
    alias = get_model_alias(type(op.estimator_))
    calc = get_shape_calculator(alias)

    options = (None if not hasattr(operator.scope, 'options') else
               operator.scope.options)
    if is_classifier(op.estimator_):
        if options is None:
            options = {}
        options = {id(op.estimator_): {'zipmap': False}}
    registered_models = dict(conv=_converter_pool,
                             shape=_shape_calculator_pool,
                             aliases=sklearn_operator_name_map)
    scope = Scope('temp', options=options, registered_models=registered_models)
    inputs = [
        Variable(v.onnx_name, v.onnx_name, type=v.type, scope=scope)
        for v in operator.inputs
    ]
    res = _parse_sklearn(scope, op.estimator_, inputs)
    this_operator = res[0]._parent
    calc(this_operator)

    if op.method == 'predict_proba':
        operator.outputs[0].type = this_operator.outputs[1].type
    elif op.method == 'transform':
        operator.outputs[0].type = this_operator.outputs[0].type
    else:
        raise NotImplementedError(  # pragma: no cover
            "Unable to defined the output for method='{}' and model='{}'.".
            format(op.method, op.__class__.__name__))
    if len(operator.inputs) != 1:
        raise RuntimeError(  # pragma: no cover
            "Only one input (not %d) is allowed for model %r."
            "" % (len(operator.inputs), operator))