Example #1
0
    def add_to(self, scope, container, operator=None):
        """
        Adds outputs to the container if not already added,
        registered the outputs if the node is not final.

        :param scope: scope
        :param container: container
        :param operator: overwrite inputs
        """
        if operator is not None:
            raise RuntimeError(
                "operator must be None, the operator to convert "
                "is specified in member 'op'.")
        try:
            op_type = sklearn_operator_name_map[type(self.op)]
        except KeyError:
            raise RuntimeError(
                "Unable to find a converter for model of type '{}'."
                "".format(self.op.__class__.__name__))

        this_operator = scope.declare_local_operator(op_type)
        this_operator.raw_operator = self.op
        this_operator.inputs = self.inputs
        if self.output_names is None:
            output = scope.declare_local_variable('sub_%s' % op_type)
            this_operator.outputs.append(output)
            self.outputs = [output]
        else:
            self.outputs = []
            for v in self.output_names:
                if isinstance(v, Variable):
                    output = scope.declare_local_variable(
                        '%s_%s' % (v.onnx_name, op_type))
                    apply_identity(scope, output.onnx_name, v.onnx_name,
                                   container)
                elif isinstance(v, str):
                    output = scope.declare_local_variable(v)
            self.outputs.append(output)
            this_operator.outputs.extend(self.outputs)
def validator_classifier_converter(scope, operator, container):
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)

    # We reuse existing converter and declare it
    # as a local operator.
    model = op.estimator_
    alias = get_model_alias(type(model))
    val_op = scope.declare_local_operator(alias, model)
    val_op.inputs = operator.inputs

    # We add an intermediate outputs.
    val_label = scope.declare_local_variable('val_label', Int64TensorType())
    val_prob = scope.declare_local_variable('val_prob', FloatTensorType())
    val_op.outputs.append(val_label)
    val_op.outputs.append(val_prob)

    # We adjust the output of the submodel.
    shape_calc = get_shape_calculator(alias)
    shape_calc(val_op)

    # We now handle the validation.
    val_max = scope.get_unique_variable_name('val_max')
    container.add_node('ReduceMax',
                       val_prob.full_name,
                       val_max,
                       name=scope.get_unique_operator_name('ReduceMax'),
                       axes=[1],
                       keepdims=0)

    th_name = scope.get_unique_variable_name('threshold')
    container.add_initializer(th_name, onnx_proto.TensorProto.FLOAT, [1],
                              [op.threshold])
    val_bin = scope.get_unique_variable_name('val_bin')
    apply_greater(scope, [val_max, th_name], val_bin, container)

    val_val = scope.get_unique_variable_name('validate')
    apply_cast(scope,
               val_bin,
               val_val,
               container,
               to=onnx_proto.TensorProto.INT64)

    # We finally link the intermediate output to the shared converter.
    apply_identity(scope, val_label.full_name, outputs[0].full_name, container)
    apply_identity(scope, val_prob.full_name, outputs[1].full_name, container)
    apply_identity(scope, val_val, outputs[2].full_name, container)