def dummy_converter(scope, operator, container):
    X = operator.inputs[0]
    out = operator.outputs

    id1 = OnnxIdentity(X, op_version=TARGET_OPSET)
    id2 = OnnxIdentity(id1, output_names=out[1:], op_version=TARGET_OPSET)
    id2.add_to(scope, container)
    def test_onnx_example_cdist_in_minkowski(self):
        x = numpy.array([1, 2, 1, 3, 2, 2, 2,
                         3]).astype(numpy.float32).reshape((4, 2))
        x2 = numpy.array([[1, 2], [2, 2], [2.1, 2.1],
                          [2, 2]]).astype(numpy.float32).reshape((4, 2))
        for pp in [1, 2]:
            with self.subTest(pp=pp):
                cop = OnnxIdentity('input',
                                   op_version=get_opset_number_from_onnx())
                cop2 = OnnxIdentity(onnx_cdist(
                    cop,
                    x2,
                    dtype=numpy.float32,
                    metric="minkowski",
                    p=pp,
                    op_version=get_opset_number_from_onnx()),
                                    output_names=['cdist'],
                                    op_version=get_opset_number_from_onnx())

                model_def = cop2.to_onnx(inputs=[('input',
                                                  FloatTensorType([None,
                                                                   None]))],
                                         outputs=[('cdist', FloatTensorType())
                                                  ])

                try:
                    sess = OnnxInference(model_def)
                except RuntimeError as e:
                    raise AssertionError("Issue\n{}".format(model_def)) from e
                res = sess.run({'input': x})['cdist']
                exp = scipy_cdist(x, x2, metric="minkowski", p=pp)
                self.assertEqualArray(exp, res, decimal=5)

        with self.subTest(pp=3):
            x = numpy.array([[6.1, 2.8, 4.7, 1.2], [5.7, 3.8, 1.7, 0.3],
                             [7.7, 2.6, 6.9, 2.3], [6.0, 2.9, 4.5, 1.5],
                             [6.8, 2.8, 4.8, 1.4], [5.4, 3.4, 1.5, 0.4],
                             [5.6, 2.9, 3.6, 1.3], [6.9, 3.1, 5.1, 2.3]],
                            dtype=numpy.float32)
            cop = OnnxAdd('input',
                          'input',
                          op_version=get_opset_number_from_onnx())
            cop2 = OnnxIdentity(onnx_cdist(
                cop,
                x,
                dtype=numpy.float32,
                metric="minkowski",
                p=3,
                op_version=get_opset_number_from_onnx()),
                                output_names=['cdist'],
                                op_version=get_opset_number_from_onnx())

            model_def = cop2.to_onnx(inputs=[('input',
                                              FloatTensorType([None, None]))],
                                     outputs=[('cdist', FloatTensorType())])

            sess = OnnxInference(model_def)
            res = sess.run({'input': x})['cdist']
            exp = scipy_cdist(x * 2, x, metric="minkowski", p=3)
            self.assertEqualArray(exp, res, decimal=4)
    def test_onnx_example_cdist_in(self):
        x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
        x2 = np.array([1.1, 2.1, 4.01, 5.01, 5.001, 4.001, 0,
                       0]).astype(np.float32).reshape((4, 2))
        cop = OnnxAdd('input', 'input')
        cop2 = OnnxIdentity(onnx_cdist(cop, x2, dtype=np.float32),
                            output_names=['cdist'])

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = scipy_cdist(x * 2, x2, metric="sqeuclidean")
        assert_almost_equal(exp, res[0], decimal=5)

        x = np.array(
            [[6.1, 2.8, 4.7, 1.2], [5.7, 3.8, 1.7, 0.3], [7.7, 2.6, 6.9, 2.3],
             [6.0, 2.9, 4.5, 1.5], [6.8, 2.8, 4.8, 1.4], [5.4, 3.4, 1.5, 0.4],
             [5.6, 2.9, 3.6, 1.3], [6.9, 3.1, 5.1, 2.3]],
            dtype=np.float32)
        cop = OnnxAdd('input', 'input')
        cop2 = OnnxIdentity(onnx_cdist(cop, x, dtype=np.float32),
                            output_names=['cdist'])

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = scipy_cdist(x * 2, x, metric="sqeuclidean")
        assert_almost_equal(exp, res[0], decimal=4)
Beispiel #4
0
def dummy_converter(scope, operator, container):
    X = operator.inputs[0]
    out = operator.outputs

    id1 = OnnxIdentity(X)
    id2 = OnnxIdentity(id1, output_names=out[1:])
    id2.add_to(scope, container)
def validator_classifier_converter(scope, operator, container):
    input = operator.inputs[0]  # input in ONNX graph
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)
    opv = container.target_opset

    # We reuse existing converter and declare it as local
    # operator.
    model = op.estimator_
    onnx_op = OnnxSubEstimator(model, input, op_version=opv)

    rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv)
    great = OnnxGreater(rmax,
                        np.array([op.threshold], dtype=np.float32),
                        op_version=opv)
    valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, op_version=opv)

    r1 = OnnxIdentity(onnx_op[0],
                      output_names=[outputs[0].full_name],
                      op_version=opv)
    r2 = OnnxIdentity(onnx_op[1],
                      output_names=[outputs[1].full_name],
                      op_version=opv)
    r3 = OnnxIdentity(valid,
                      output_names=[outputs[2].full_name],
                      op_version=opv)

    r1.add_to(scope, container)
    r2.add_to(scope, container)
    r3.add_to(scope, container)
    def test_onnx_example_cdist_bigger(self):

        from skl2onnx.algebra.complex_functions import onnx_cdist
        data = load_iris()
        X, y = data.data, data.target
        self.assertNotEmpty(y)
        X_train = X[::2]
        # y_train = y[::2]
        X_test = X[1::2]
        # y_test = y[1::2]
        onx = OnnxIdentity(onnx_cdist(OnnxIdentity('X',
                                                   op_version=TARGET_OPSET),
                                      X_train.astype(numpy.float32),
                                      metric="euclidean",
                                      dtype=numpy.float32,
                                      op_version=TARGET_OPSET),
                           output_names=['Y'],
                           op_version=TARGET_OPSET)
        final = onx.to_onnx(inputs=[('X', FloatTensorType([None, None]))],
                            outputs=[('Y', FloatTensorType())],
                            target_opset=TARGET_OPSET)

        oinf = OnnxInference(final, runtime="python")
        res = oinf.run({'X': X_train.astype(numpy.float32)})['Y']
        exp = scipy_cdist(X_train, X_train, metric="euclidean")
        self.assertEqualArray(exp, res, decimal=6)
        res = oinf.run({'X': X_test.astype(numpy.float32)})['Y']
        exp = scipy_cdist(X_test, X_train, metric="euclidean")
        self.assertEqualArray(exp, res, decimal=6)
    def test_onnx_remove_identities2(self):
        from skl2onnx.algebra.complex_functions import onnx_squareform_pdist
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxIdentity('input', op_version=get_opset_number_from_onnx())
        cdist = onnx_squareform_pdist(cop,
                                      dtype=numpy.float32,
                                      op_version=get_opset_number_from_onnx())
        cop2 = OnnxIdentity(cdist,
                            output_names=['cdist'],
                            op_version=get_opset_number_from_onnx())

        model_def = cop2.to_onnx({'input': FloatTensorType()},
                                 outputs=[('cdist', FloatTensorType())],
                                 target_opset=get_opset_number_from_onnx())
        stats = onnx_statistics(model_def, optim=False)
        self.assertIn('subgraphs', stats)
        self.assertGreater(stats['subgraphs'], 1)
        self.assertGreater(stats['op_Identity'], 2)

        new_model = onnx_remove_node_identity(model_def)
        stats2 = onnx_statistics(new_model, optim=False)
        self.assertEqual(stats['subgraphs'], stats2['subgraphs'])
        self.assertLesser(stats2['op_Identity'], 2)

        oinf1 = OnnxInference(model_def)
        oinf2 = OnnxInference(new_model)
        y1 = oinf1.run({'input': x})['cdist']
        y2 = oinf2.run({'input': x})['cdist']
        self.assertEqualArray(y1, y2)
        self.assertLesser(stats2['op_Identity'], 1)
def validator_classifier_converter(scope, operator, container):
    input0 = operator.inputs[0]  # first input in ONNX graph
    outputs = operator.outputs  # outputs in ONNX graph
    op = operator.raw_operator  # scikit-learn model (mmust be fitted)
    opv = container.target_opset

    # The model calls another one. The class `OnnxSubEstimator`
    # calls the converter for this operator.
    model = op.estimator_
    onnx_op = OnnxSubEstimator(model,
                               input0,
                               op_version=opv,
                               options={'zipmap': False})

    rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv)
    great = OnnxGreater(rmax,
                        np.array([op.threshold], dtype=np.float32),
                        op_version=opv)
    valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, op_version=opv)

    r1 = OnnxIdentity(onnx_op[0],
                      output_names=[outputs[0].full_name],
                      op_version=opv)
    r2 = OnnxIdentity(onnx_op[1],
                      output_names=[outputs[1].full_name],
                      op_version=opv)
    r3 = OnnxIdentity(valid,
                      output_names=[outputs[2].full_name],
                      op_version=opv)

    r1.add_to(scope, container)
    r2.add_to(scope, container)
    r3.add_to(scope, container)
def custom_transformer_converter1ww(scope, operator, container):
    i0 = operator.inputs[0]
    outputs = operator.outputs
    op = operator.raw_operator
    opv = container.target_opset
    idin = OnnxIdentity(i0, op_version=opv)
    out = OnnxSubEstimator(op.norm_, idin, op_version=opv)
    final = OnnxIdentity(out, op_version=opv,
                         output_names=outputs)
    final.add_to(scope, container)
def custom_cluster_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs
    X = operator.inputs[0]
    dtype = guess_numpy_type(X.type)
    dist = OnnxMatMul(X, op.clusters_.astype(dtype), op_version=opv)
    label = OnnxArgMax(dist, axis=1, op_version=opv)
    Yl = OnnxIdentity(label, op_version=opv, output_names=out[:1])
    Yp = OnnxIdentity(dist, op_version=opv, output_names=out[1:])
    Yl.add_to(scope, container)
    Yp.add_to(scope, container)
    def test_onnx_example_cdist_in(self):
        from skl2onnx.algebra.complex_functions import onnx_cdist
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        x2 = numpy.array([1.1, 2.1, 4.01, 5.01, 5.001, 4.001, 0,
                          0]).astype(numpy.float32).reshape((4, 2))
        cop = OnnxAdd('input',
                      'input',
                      op_version=get_opset_number_from_onnx())
        cop2 = OnnxIdentity(onnx_cdist(
            cop,
            x2,
            dtype=numpy.float32,
            op_version=get_opset_number_from_onnx()),
                            output_names=['cdist'],
                            op_version=get_opset_number_from_onnx())

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist',
                                           FloatTensorType(None, None))],
                                 target_opset=get_opset_number_from_onnx())

        sess = OnnxInference(model_def)
        res = sess.run({'input': x})
        exp = scipy_cdist(x * 2, x2, metric="sqeuclidean")
        self.assertEqualArray(exp, res['cdist'], decimal=5)

        x = numpy.array(
            [[6.1, 2.8, 4.7, 1.2], [5.7, 3.8, 1.7, 0.3], [7.7, 2.6, 6.9, 2.3],
             [6., 2.9, 4.5, 1.5], [6.8, 2.8, 4.8, 1.4], [5.4, 3.4, 1.5, 0.4],
             [5.6, 2.9, 3.6, 1.3], [6.9, 3.1, 5.1, 2.3]],
            dtype=numpy.float32)
        cop = OnnxAdd('input',
                      'input',
                      op_version=get_opset_number_from_onnx())
        cop2 = OnnxIdentity(onnx_cdist(
            cop,
            x,
            dtype=numpy.float32,
            op_version=get_opset_number_from_onnx()),
                            output_names=['cdist'],
                            op_version=get_opset_number_from_onnx())

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist', FloatTensorType())],
                                 target_opset=get_opset_number_from_onnx())

        sess = OnnxInference(model_def)
        res = sess.run({'input': x})
        exp = scipy_cdist(x * 2, x, metric="sqeuclidean")
        self.assertEqualArray(exp, res['cdist'], decimal=4)
    def test_onnx_example_cdist_in_custom_ops(self):
        x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
        x2 = np.array([1.1, 2.1, 4.01, 5.01, 5.001, 4.001, 0, 0]).astype(
            np.float32).reshape((4, 2))
        opv = _TARGET_OPSET_
        cop = OnnxAdd(
            'input', 'input', op_version=opv)
        cop2 = OnnxIdentity(
            OnnxCDist(cop, x2, op_version=opv),
            output_names=['cdist'],
            op_version=opv)

        model_def = cop2.to_onnx(
            inputs=[('input', FloatTensorType([None, None]))],
            outputs=[('cdist', FloatTensorType())])

        try:
            sess = InferenceSession(model_def.SerializeToString())
        except RuntimeError as e:
            if "CDist is not a registered" in str(e):
                return
        res = sess.run(None, {'input': x})
        exp = scipy_cdist(x * 2, x2, metric="sqeuclidean")
        assert_almost_equal(exp, res[0], decimal=5)

        x = np.array([[6.1, 2.8, 4.7, 1.2],
                      [5.7, 3.8, 1.7, 0.3],
                      [7.7, 2.6, 6.9, 2.3],
                      [6.0, 2.9, 4.5, 1.5],
                      [6.8, 2.8, 4.8, 1.4],
                      [5.4, 3.4, 1.5, 0.4],
                      [5.6, 2.9, 3.6, 1.3],
                      [6.9, 3.1, 5.1, 2.3]], dtype=np.float32)
        cop = OnnxAdd(
            'input', 'input', op_version=opv)
        cop2 = OnnxIdentity(
            OnnxCDist(cop, x,
                      op_version=opv),
            output_names=['cdist'],
            op_version=opv)

        model_def = cop2.to_onnx(
            inputs=[('input', FloatTensorType([None, None]))],
            outputs=[('cdist', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = scipy_cdist(x * 2, x, metric="sqeuclidean")
        assert_almost_equal(exp, res[0], decimal=4)
Beispiel #13
0
def custom_linear_classifier_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs
    X = operator.inputs[0]
    dtype = guess_numpy_type(X.type)
    raw = OnnxAdd(OnnxMatMul(X, op.coef_.astype(dtype), op_version=opv),
                  op.intercept_.astype(dtype),
                  op_version=opv)
    prob = OnnxSigmoid(raw, op_version=opv)
    label = OnnxArgMax(prob, axis=1, op_version=opv)
    Yl = OnnxIdentity(label, op_version=opv, output_names=out[:1])
    Yp = OnnxIdentity(prob, op_version=opv, output_names=out[1:])
    Yl.add_to(scope, container)
    Yp.add_to(scope, container)
    def test_pdist(self):
        from skl2onnx.algebra.complex_functions import onnx_squareform_pdist
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('input', 'input', op_version=TARGET_OPSET)
        cdist = onnx_squareform_pdist(cop,
                                      dtype=numpy.float32,
                                      op_version=TARGET_OPSET)
        cop2 = OnnxIdentity(cdist,
                            output_names=['cdist'],
                            op_version=TARGET_OPSET)

        model_def = cop2.to_onnx({'input': FloatTensorType()},
                                 outputs=[('cdist', FloatTensorType())],
                                 target_opset=TARGET_OPSET)

        sess = OnnxInference(model_def)
        res = sess.run({'input': x})
        self.assertEqual(list(res.keys()), ['cdist'])

        exp = squareform(pdist(x * 2, metric="sqeuclidean"))
        self.assertEqualArray(exp, res['cdist'])

        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (2, 3))
        res = sess.run({'input': x})
        self.assertEqual(list(res.keys()), ['cdist'])
Beispiel #15
0
        def squareform_pdist(X, **kwargs):
            opv = TARGET_OPSET
            diff = OnnxSub('next_in',
                           'next',
                           output_names=['diff'],
                           op_version=opv)
            id_next = OnnxIdentity('next_in',
                                   output_names=['next_out'],
                                   op_version=opv)
            norm = OnnxReduceSumSquare(diff,
                                       output_names=['norm'],
                                       axes=[1],
                                       op_version=opv)
            flat = OnnxSqueezeApi11(norm,
                                    output_names=['scan_out'],
                                    axes=[1],
                                    op_version=opv)
            scan_body = id_next.to_onnx(
                OrderedDict([('next_in', FloatTensorType()),
                             ('next', FloatTensorType())]),
                outputs=[('next_out', FloatTensorType([None, None])),
                         ('scan_out', FloatTensorType([None]))],
                other_outputs=[flat])

            node = OnnxScan(X,
                            X,
                            output_names=['scan0_{idself}', 'scan1_{idself}'],
                            num_scan_inputs=1,
                            body=scan_body.graph,
                            op_version=opv,
                            **kwargs)
            return node[1]
Beispiel #16
0
    def test_onnx_subgraphs2(self):
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd(OnnxIdentity('input', op_version=TARGET_OPSET),
                      'input',
                      op_version=TARGET_OPSET)
        cdist = onnx_squareform_pdist(cop,
                                      dtype=numpy.float32,
                                      op_version=TARGET_OPSET)
        id1 = [id(a) for a in cdist.onx_op.graph_algebra['body']]
        cdist2 = onnx_squareform_pdist(cop,
                                       dtype=numpy.float32,
                                       op_version=TARGET_OPSET)
        id2 = [id(a) for a in cdist2.onx_op.graph_algebra['body']]
        self.assertNotEqual(id1, id2)
        cop2 = OnnxAdd(cdist,
                       cdist2,
                       output_names=['cdist'],
                       op_version=TARGET_OPSET)

        model_def = cop2.to_onnx({'input': FloatTensorType([None, None])},
                                 outputs=[('cdist',
                                           FloatTensorType([None, None]))],
                                 target_opset=TARGET_OPSET)
        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        self.assertEqual(len(res), 1)
    def test_onnx_example_cdist_in_euclidean(self):
        x2 = numpy.array([1.1, 2.1, 4.01, 5.01, 5.001, 4.001, 0,
                          0]).astype(numpy.float32).reshape((4, 2))
        cop = OnnxAdd('input',
                      'input',
                      op_version=get_opset_number_from_onnx())
        cop2 = OnnxIdentity(onnx_cdist(
            cop,
            x2,
            dtype=numpy.float32,
            metric='euclidean',
            op_version=get_opset_number_from_onnx()),
                            output_names=['cdist'],
                            op_version=get_opset_number_from_onnx())

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist', FloatTensorType())],
                                 target_opset=get_opset_number_from_onnx())

        new_model = onnx_remove_node_identity(model_def)
        stats = onnx_statistics(model_def, optim=False)
        stats2 = onnx_statistics(new_model, optim=False)
        self.assertEqual(stats.get('op_Identity', 0), 3)
        self.assertEqual(stats2.get('op_Identity', 0), 1)
Beispiel #18
0
 def generate_onnx_graph(opv):
     dtype = np.float32 if cls_type == FloatTensorType else np.float64
     node = OnnxAdd(first_input,
                    np.array([0.1], dtype=dtype),
                    op_version=opv)
     lr = model()
     lr.fit(np.ones([10, 5]), np.arange(0, 10) % 3)
     out = OnnxSubEstimator(lr, node, op_version=1, options=options)
     if model == LogisticRegression:
         last = OnnxIdentity(out[1], output_names=['Y'], op_version=opv)
     else:
         last = OnnxIdentity(out, output_names=['Y'], op_version=opv)
     onx = last.to_onnx([('X1', cls_type((None, 5)))],
                        outputs=[('Y', cls_type())],
                        target_opset=opv)
     return onx
    def to_algebra(self, op_version=None):
        """
        Converts the variable into an operator.
        """
        if self.alg_ is not None:
            return self.alg_

        if self.cst is not None:
            self.alg_ = OnnxIdentity(self.cst, op_version=op_version)
            self.alg_inputs_ = None
            return self.alg_

        new_inputs = [
            self._graph_guess_dtype(i, inp)
            for i, inp in enumerate(self.inputs)
        ]
        self.alg_inputs_ = new_inputs
        vars = [v[1] for v in new_inputs]
        var = self.fct(*vars)
        if not isinstance(var, OnnxVar):
            raise RuntimeError(  # pragma: no cover
                "var is not from type OnnxVar but %r." % type(var))

        self.alg_ = var.to_algebra(op_version=op_version)
        return self.alg_
Beispiel #20
0
 def output_names(self, value):
     """
     Updates 'output_names' of attribute 'unique'
     or every output name of attribute 'values'.
     """
     if self.values is None:
         if (hasattr(self.unique, 'to_onnx')
                 or hasattr(self.unique, 'add_to')):
             if len(value) > 1:
                 self.values = tuple(
                     OnnxIdentity(self.unique[i],
                                  output_names=value[i:i + 1],
                                  op_version=self.unique.op_version)
                     for i in range(0, len(value)))
                 self.unique = None
                 return
             self.unique.output_names = value
             return
         raise NotImplementedError(  # pragma: no cover
             "Not implemented yet, value=%r, unique=%r values=%r." %
             (value, self.unique, self.values))
     if self.values is not None and len(self.values) == len(value):
         for name, v in zip(value, self.values):
             v.output_names = [name]
         return
     raise NotImplementedError(  # pragma: no cover
         "Not implemented yet, value=%r, unique=%r values=%r." %
         (value, self.unique, self.values))
Beispiel #21
0
 def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct):
     onx = onnx_cl('X', output_names=['Y'])
     X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
     model_def = onx.to_onnx({'X': X.astype(numpy.float32)})
     # no inplace
     oinf = OnnxInference(model_def, inplace=False)
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
     # inplace
     oinf = OnnxInference(model_def, input_inplace=False, inplace=True)
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
     # inplace2
     onx2 = OnnxIdentity(onnx_cl('X'), output_names=['Y'])
     model_def2 = onx2.to_onnx({'X': X.astype(numpy.float32)})
     oinf = OnnxInference(model_def2, input_inplace=False, inplace=True)
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(np_fct(X), got['Y'], decimal=6)
     # input inplace
     expe = np_fct(X)
     oinf = OnnxInference(model_def, input_inplace=True, inplace=True)
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(expe, got['Y'], decimal=6)
Beispiel #22
0
    def test_onnx_example_pdist_in(self):
        opv = _TARGET_OPSET_
        x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
        cop = OnnxAdd('input', 'input', op_version=opv)
        cop2 = OnnxIdentity(onnx_squareform_pdist(cop,
                                                  dtype=np.float32,
                                                  op_version=opv),
                            output_names=['pdist'],
                            op_version=opv)

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('pdist', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = squareform(pdist(x * 2, metric="sqeuclidean"))
        assert_almost_equal(exp, res[0])

        x = np.array([1, 2, 4, 5]).astype(np.float32).reshape((2, 2))
        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = squareform(pdist(x * 2, metric="sqeuclidean"))
        assert_almost_equal(exp, res[0])

        x = np.array([1, 2, 4, 5, 5, 6]).astype(np.float32).reshape((2, 3))
        x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((2, 3))
        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = squareform(pdist(x * 2, metric="sqeuclidean"))
        assert_almost_equal(exp, res[0])
    def test_onnx_remove_redundant_subgraphs_full(self):
        from skl2onnx.algebra.complex_functions import onnx_squareform_pdist
        cop = OnnxAdd(OnnxIdentity('input',
                                   op_version=get_opset_number_from_onnx()),
                      'input',
                      op_version=get_opset_number_from_onnx())
        cdist = onnx_squareform_pdist(cop,
                                      dtype=numpy.float32,
                                      op_version=get_opset_number_from_onnx())
        cdist2 = onnx_squareform_pdist(cop,
                                       dtype=numpy.float32,
                                       op_version=get_opset_number_from_onnx())
        cop2 = OnnxAdd(cdist,
                       cdist2,
                       output_names=['cdist'],
                       op_version=get_opset_number_from_onnx())

        model_def = cop2.to_onnx({'input': FloatTensorType()},
                                 outputs=[('cdist', FloatTensorType())],
                                 target_opset=get_opset_number_from_onnx())
        stats = onnx_statistics(model_def, optim=False)
        new_model = onnx_optimisations(model_def)
        stats2 = onnx_statistics(new_model, optim=False)
        self.assertLess(stats2['size'], stats['size'])
        self.assertLess(stats2['nnodes'], stats['nnodes'])
        self.assertLess(stats2['op_Identity'], stats['op_Identity'])
    def test_onnx_example_algebra(self):
        initial = np.array([0, 0]).astype(np.float32).reshape((2,))
        x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))

        opv = _TARGET_OPSET_
        add_node = OnnxAdd(
            'sum_in', 'next', output_names=['sum_out'],
            op_version=opv)
        id_node = OnnxIdentity(
            add_node, output_names=['scan_out'],
            op_version=opv)
        scan_body = id_node.to_onnx(
            {'sum_in': initial, 'next': initial},
            outputs=[('sum_out', FloatTensorType()),
                     ('scan_out', FloatTensorType())])

        node = OnnxScan('initial', 'x', output_names=['y', 'z'],
                        num_scan_inputs=1, body=scan_body.graph,
                        op_version=opv)
        model_def = node.to_onnx(
            {'initial': initial, 'x': x},
            outputs=[('y', FloatTensorType()),
                     ('z', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'initial': initial, 'x': x})

        y = np.array([9, 12]).astype(np.float32).reshape((2,))
        z = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))
        assert_almost_equal(y, res[0])
        assert_almost_equal(z, res[1])
Beispiel #25
0
    def test_onnx_rename_node_scan(self):
        def squareform_pdist(X, **kwargs):
            opv = TARGET_OPSET
            diff = OnnxSub('next_in',
                           'next',
                           output_names=['diff'],
                           op_version=opv)
            id_next = OnnxIdentity('next_in',
                                   output_names=['next_out'],
                                   op_version=opv)
            norm = OnnxReduceSumSquare(diff,
                                       output_names=['norm'],
                                       axes=[1],
                                       op_version=opv)
            flat = OnnxSqueezeApi11(norm,
                                    output_names=['scan_out'],
                                    axes=[1],
                                    op_version=opv)
            scan_body = id_next.to_onnx(
                OrderedDict([('next_in', FloatTensorType()),
                             ('next', FloatTensorType())]),
                outputs=[('next_out', FloatTensorType([None, None])),
                         ('scan_out', FloatTensorType([None]))],
                other_outputs=[flat])

            node = OnnxScan(X,
                            X,
                            output_names=['scan0_{idself}', 'scan1_{idself}'],
                            num_scan_inputs=1,
                            body=scan_body.graph,
                            op_version=opv,
                            **kwargs)
            return node[1]

        rows = []

        def flog(*s):
            rows.append(" ".join(map(str, s)))

        opv = TARGET_OPSET
        onnx_fct = OnnxIdentity(squareform_pdist('x'),
                                output_names='Y',
                                op_version=opv)
        model_def = onnx_fct.to_onnx(inputs=[('x', FloatTensorType())])

        oinf1 = OnnxInference(model_def)
        new_model = onnx_rename_names(model_def,
                                      verbose=1,
                                      fLOG=flog,
                                      strategy='type')
        total = "\n".join(rows)
        self.assertNotIn('name: "Re_ReduceSumSquare"', str(new_model))
        self.assertIn("'Re_ReduceSumSquare' -> 'n_24'", total)
        oinf2 = OnnxInference(new_model)
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        y1 = oinf1.run({'x': x})
        y2 = oinf2.run({'x': x})
        self.assertEqualArray(y1['Y'], y2['Y'])
Beispiel #26
0
def decorrelate_transformer_convertor(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs
    X = operator.inputs[0]
    subop = OnnxSubEstimator(op.pca_, X, op_version=opv)
    Y = OnnxIdentity(subop, op_version=opv, output_names=out[:1])
    Y.add_to(scope, container)
 def to_onnx_operator(self, inputs=None, outputs=('Y', )):
     if inputs is None:
         raise RuntimeError("inputs should contain one name")
     opv = self.op_version
     i0 = self.get_inputs(inputs, 0)
     out = OnnxSubEstimator(self.norm_, i0, op_version=opv)
     return OnnxIdentity(out, op_version=self.op_version,
                         output_names=outputs)
Beispiel #28
0
    def test_onnx_example_pdist(self):
        x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))

        opv = _TARGET_OPSET_
        diff = OnnxSub('next_in',
                       'next',
                       output_names=['diff'],
                       op_version=opv)
        id_next = OnnxIdentity('next_in',
                               output_names=['next_out'],
                               op_version=opv)
        norm = OnnxReduceSumSquare(diff,
                                   output_names=['norm'],
                                   axes=[1],
                                   op_version=opv)
        flat = OnnxSqueezeApi11(norm,
                                output_names=['scan_out'],
                                axes=[1],
                                op_version=opv)
        scan_body = id_next.to_onnx(OrderedDict([('next_in', x),
                                                 ('next', FloatTensorType())]),
                                    outputs=[
                                        ('next_out', FloatTensorType([3, 2])),
                                        ('scan_out', FloatTensorType([3]))
                                    ],
                                    other_outputs=[flat],
                                    target_opset=opv)

        sess = InferenceSession(scan_body.SerializeToString())
        res = sess.run(None, {'next_in': x, 'next': x[:1]})
        assert_almost_equal(x, res[0])
        exp = np.array([0., 18., 20.], dtype=np.float32)
        assert_almost_equal(exp, res[1])

        node = OnnxScan('x',
                        'x',
                        output_names=['y', 'z'],
                        num_scan_inputs=1,
                        body=scan_body.graph,
                        op_version=opv)
        model_def = node.to_onnx({'x': x},
                                 outputs=[('y', FloatTensorType([3, 2])),
                                          ('z', FloatTensorType([3, 3]))])
        try:
            onnx.checker.check_model(model_def)
        except ValidationError as e:
            if StrictVersion(onnx__version__) <= StrictVersion("1.5.0"):
                warnings.warn(e)
            else:
                raise e

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'x': x})

        exp = squareform(pdist(x, metric="sqeuclidean"))
        assert_almost_equal(x, res[0])
        assert_almost_equal(exp, res[1])
Beispiel #29
0
 def test_grad_helper_mul(self):
     opv = opset
     xi = OnnxIdentity('X', op_version=opv)
     node = OnnxMul(xi, xi, op_version=opv, output_names=['Y'])
     onx = node.to_onnx({'X': FloatTensorType([None, 10])},
                        {'Y': FloatTensorType([None, 10])},
                        target_opset=opv)
     new_onx = onnx_derivative(onx)
     self.check_runtime(new_onx, 'test_grad_helper_mul')
def pyod_iforest_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # In most case, computation happen in floats.
    # But it might be with double. ONNX is very strict
    # about types, every constant should have the same
    # type as the input.
    dtype = guess_numpy_type(X.type)

    detector = op.detector_  # Should be IForest from scikit-learn.
    lab_pred = OnnxSubEstimator(detector, X, op_version=opv)
    scores = OnnxIdentity(lab_pred[1], op_version=opv)

    # labels
    threshold = op.threshold_
    above = OnnxLess(scores,
                     np.array([threshold], dtype=dtype),
                     op_version=opv)
    labels = OnnxCast(above,
                      op_version=opv,
                      to=onnx_proto.TensorProto.INT64,
                      output_names=out[:1])

    # probabilities
    train_scores = op.decision_scores_
    scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))
    scores_ = OnnxMul(scores, np.array([-1], dtype=dtype), op_version=opv)
    print(scaler.min_)
    print(scaler.scale_)

    scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)
    scaled_centered = OnnxAdd(scaled,
                              scaler.min_.astype(dtype),
                              op_version=opv)
    clipped = OnnxClip(scaled_centered,
                       np.array([0], dtype=dtype),
                       np.array([1], dtype=dtype),
                       op_version=opv)
    clipped_ = OnnxAdd(OnnxMul(clipped,
                               np.array([-1], dtype=dtype),
                               op_version=opv),
                       np.array([1], dtype=dtype),
                       op_version=opv)

    scores_2d = OnnxConcat(clipped_,
                           clipped,
                           axis=1,
                           op_version=opv,
                           output_names=out[1:])

    labels.add_to(scope, container)
    scores_2d.add_to(scope, container)