Exemple #1
0
    def test_onnx_rename_names_type(self):
        rows = []

        def flog(*s):
            rows.append(" ".join(map(str, s)))

        dtype = numpy.float32
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd('X',
                      numpy.array([1], dtype=dtype),
                      op_version=TARGET_OPSET)
        cop2 = OnnxAdd('X',
                       numpy.array([1], dtype=dtype),
                       op_version=TARGET_OPSET)
        cop3 = OnnxAdd('X',
                       numpy.array([2], dtype=dtype),
                       op_version=TARGET_OPSET,
                       output_names=['inter'])
        cop4 = OnnxSub(OnnxMul(cop, cop3, op_version=TARGET_OPSET),
                       cop2,
                       output_names=['final'],
                       op_version=TARGET_OPSET)
        model_def = cop4.to_onnx({'X': x})
        oinf1 = OnnxInference(model_def)
        new_model = onnx_rename_names(model_def,
                                      verbose=1,
                                      fLOG=flog,
                                      strategy='type')
        total = "\n".join(rows)
        self.assertIn("'Ad_Addcst' -> 'i_05'", total)
        oinf2 = OnnxInference(new_model)
        y1 = oinf1.run({'X': x})
        y2 = oinf2.run({'X': x})
        self.assertEqualArray(y1['final'], y2['final'])
 def test_onnxt_runtime_empty(self):
     idi = numpy.identity(2, dtype=numpy.float32)
     onx = OnnxAdd('X', idi, output_names=['Y'], op_version=TARGET_OPSET)
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     model_def.ir_version = get_ir_version(TARGET_OPSET)
     oinf = OnnxInference(model_def, runtime='empty')
     self.assertNotEmpty(oinf)
Exemple #3
0
    def test_onnxview(self):

        idi = numpy.identity(2)
        onx = OnnxAdd('X',
                      idi,
                      output_names=['Y'],
                      op_version=get_opset_number_from_onnx())
        model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})

        mg = OnnxNotebook()
        mg.add_context({"model": model_def})
        cmd = "--help"
        res, out, _ = self.capture(lambda: mg.onnxview(cmd))
        self.assertEmpty(res)
        self.assertIn("notebook", out)

        mg = OnnxNotebook()
        mg.add_context({"model": model_def})
        cmd = "model"
        res = mg.onnxview(cmd)
        self.assertNotEmpty(res)
        self.assertIn('RenderJsDot', str(res))

        mg = OnnxNotebook()
        mg.add_context({"model": model_def})
        cmd = "-r 1 model"
        res = mg.onnxview(cmd)
        self.assertNotEmpty(res)
        self.assertIn('RenderJsDot', str(res))
    def test_onnxt_idi(self):
        idi = numpy.identity(2)
        onx = OnnxAdd('X', idi, output_names=['Y'])
        model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})

        oinf = OnnxInference(model_def)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)

        sb = model_def.SerializeToString()
        oinf = OnnxInference(sb)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)

        sb = BytesIO(model_def.SerializeToString())
        oinf = OnnxInference(sb)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)

        temp = get_temp_folder(__file__, "temp_onnxrt_idi")
        name = os.path.join(temp, "m.onnx")
        with open(name, "wb") as f:
            f.write(model_def.SerializeToString())

        oinf = OnnxInference(name)
        res = str(oinf)
        self.assertIn('op_type: "Add"', res)
    def test_complex(self):
        for dt, var, pr in ((np.complex64, Complex64TensorType, 14),
                            (np.complex128, Complex128TensorType, 15)):
            X = np.array([[1 - 2j, -12j], [-1 - 2j, 1 + 2j]]).astype(dt)

            for opv in (10, 11, 12, 13, TARGET_OPSET):
                if opv > TARGET_OPSET:
                    continue
                out = OnnxAdd('X',
                              np.array([1 + 2j]),
                              output_names=['Y'],
                              op_version=opv)
                onx = out.to_onnx([('X', var((None, 2)))],
                                  outputs=[('Y', var())],
                                  target_opset=opv)
                self.assertIn('elem_type: %d' % pr, str(onx))

                try:
                    ort = InferenceSession(onx.SerializeToString())
                except InvalidGraph as e:
                    if "Type Error: Type 'tensor(complex" in str(e):
                        continue
                    raise e
                assert ort is not None
                got = ort.run(None, {'X': X})[0]
                assert_almost_equal(X + np.array([1 + 2j]), got)
Exemple #6
0
    def test_onnx_init_sparse_coo(self):
        row = np.array([0, 0, 1, 3, 1], dtype=np.float32)
        col = np.array([0, 2, 1, 3, 1], dtype=np.float32)
        data = np.array([1, 1, 1, 1, 1], dtype=np.float32)
        X = coo_matrix((data, (row, col)), shape=(4, 4))

        node = OnnxAdd(
            'X', X, output_names=['Y'],
            op_version=TARGET_OPSET)

        model_def = node.to_onnx(
            {'X': X}, outputs=[('Y', FloatTensorType())])

        try:
            sess = InferenceSession(model_def.SerializeToString())
        except (RuntimeError, OrtInvalidArgument):
            # Sparse tensor is not supported for constant.
            return
        try:
            res = sess.run(None, {'X': X})[0]
        except RuntimeError as e:
            # Sparse tensor is not supported for constant.
            warnings.warn(
                "Unable to run with %r\n---\n%s\n%s" % (
                    {'X': X}, model_def, e))
            return
        assert_almost_equal(X + X, res)
Exemple #7
0
def _onnx_linear_regression(target_opset=None, dtype=numpy.float32):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X, A, B) = A X + B`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('linear_regression')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import (OnnxMatMul, OnnxAdd)
    res = OnnxAdd(OnnxMatMul('X', 'A', op_version=target_opset),
                  'B',
                  op_version=target_opset,
                  output_names=['Y'])

    var_type = dtype_to_var_type(dtype)
    varsx = [('X', var_type([None, None])), ('A', var_type([None, None])),
             ('B', var_type([None, None]))]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type())],
                      target_opset=target_opset,
                      other_outputs=[res])
    return onx
    def test_onnx_example_cdist_in(self):
        x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
        x2 = np.array([1.1, 2.1, 4.01, 5.01, 5.001, 4.001, 0,
                       0]).astype(np.float32).reshape((4, 2))
        cop = OnnxAdd('input', 'input')
        cop2 = OnnxIdentity(onnx_cdist(cop, x2, dtype=np.float32),
                            output_names=['cdist'])

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = scipy_cdist(x * 2, x2, metric="sqeuclidean")
        assert_almost_equal(exp, res[0], decimal=5)

        x = np.array(
            [[6.1, 2.8, 4.7, 1.2], [5.7, 3.8, 1.7, 0.3], [7.7, 2.6, 6.9, 2.3],
             [6.0, 2.9, 4.5, 1.5], [6.8, 2.8, 4.8, 1.4], [5.4, 3.4, 1.5, 0.4],
             [5.6, 2.9, 3.6, 1.3], [6.9, 3.1, 5.1, 2.3]],
            dtype=np.float32)
        cop = OnnxAdd('input', 'input')
        cop2 = OnnxIdentity(onnx_cdist(cop, x, dtype=np.float32),
                            output_names=['cdist'])

        model_def = cop2.to_onnx(inputs=[('input',
                                          FloatTensorType([None, None]))],
                                 outputs=[('cdist', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        exp = scipy_cdist(x * 2, x, metric="sqeuclidean")
        assert_almost_equal(exp, res[0], decimal=4)
Exemple #9
0
    def test_onnx_if_algebra_indirect_unnamed_clear_input(self):

        opv = TARGET_OPSET
        x1 = np.array([[0, 3], [7, 0]], dtype=np.float32)
        x2 = np.array([[1, 0], [2, 0]], dtype=np.float32)

        node_xy = OnnxMul('x1', 'x2', op_version=opv)
        node_then = OnnxAdd(
            'x1', 'xy', output_names=['absxythen'], op_version=opv)
        then_body = node_then.to_onnx(
            {'x1': x1, 'xy': x2}, target_opset=opv,
            outputs=[('absxythen', FloatTensorType())])
        node_else = OnnxSub(
            'x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node_else.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxyelse', FloatTensorType())])

        cond = OnnxGreater(
            OnnxReduceSum('x1', op_version=opv),
            OnnxReduceSum('x2', op_version=opv),
            op_version=opv)
        ifnode = OnnxIf(cond, then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv, output_names=['y'],
                        global_context={'xy': node_xy},
                        clear_subgraph_inputs=True)
        model_def = ifnode.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('y', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'x1': x1, 'x2': x2})
        assert_almost_equal(x1 + x1 * x2, res[0])
Exemple #10
0
    def test_onnx_subgraphs2(self):
        x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape(
            (3, 2))
        cop = OnnxAdd(OnnxIdentity('input', op_version=TARGET_OPSET),
                      'input',
                      op_version=TARGET_OPSET)
        cdist = onnx_squareform_pdist(cop,
                                      dtype=numpy.float32,
                                      op_version=TARGET_OPSET)
        id1 = [id(a) for a in cdist.onx_op.graph_algebra['body']]
        cdist2 = onnx_squareform_pdist(cop,
                                       dtype=numpy.float32,
                                       op_version=TARGET_OPSET)
        id2 = [id(a) for a in cdist2.onx_op.graph_algebra['body']]
        self.assertNotEqual(id1, id2)
        cop2 = OnnxAdd(cdist,
                       cdist2,
                       output_names=['cdist'],
                       op_version=TARGET_OPSET)

        model_def = cop2.to_onnx({'input': FloatTensorType([None, None])},
                                 outputs=[('cdist',
                                           FloatTensorType([None, None]))],
                                 target_opset=TARGET_OPSET)
        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'input': x})
        self.assertEqual(len(res), 1)
Exemple #11
0
def _onnx_axpy(target_opset=None, dtype=numpy.float32):
    """
    Returns the ONNX graph for function
    :math:`Y = f(X1, X2, \\alpha) = \\alpha X1 + X2`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('axpy')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul
    res = OnnxAdd(OnnxMul('X1', 'alpha', op_version=target_opset),
                  'X2',
                  op_version=target_opset,
                  output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type()), ('X2', var_type()), ('alpha', var_type([1]))]
    onx = res.to_onnx(varsx,
                      outputs=[('Y', var_type())],
                      target_opset=target_opset)
    return onx
Exemple #12
0
def _onnx_axpyw2(target_opset=None, dtype=numpy.float32):
    """
    Returns the ONNX graph for function
    :math:`Y, Z = f(X1, X2, G, \\alpha, \\beta) = (Y, Z)`
    where :math:`Z = \\beta G + \\alpha X1` and
    :math:`Y = \\beta * Z + \\alpha X1 + X2`.

    .. gdot::
        :script: DOT-SECTION

        from mlprodict.onnxrt import OnnxInference
        from onnxcustom.utils.onnx_function import function_onnx_graph

        model_onnx = function_onnx_graph('axpy')
        oinf = OnnxInference(model_onnx, inplace=False)

        print("DOT-SECTION", oinf.to_dot())
    """
    from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul
    s1 = OnnxMul('X1', 'alpha', op_version=target_opset)
    s2 = OnnxMul('G', 'beta', op_version=target_opset)
    Z = OnnxAdd(s1, s2, op_version=target_opset, output_names=['Z'])
    s2_2 = OnnxMul(Z, 'beta', op_version=target_opset)
    s2_3 = OnnxAdd(s1, s2_2, op_version=target_opset)
    Y = OnnxAdd(s2_3, 'X2', op_version=target_opset, output_names=['Y'])
    var_type = dtype_to_var_type(dtype)
    varsx = [('X1', var_type()), ('X2', var_type()), ('G', var_type()),
             ('alpha', var_type([1])), ('beta', var_type([1]))]
    onx = Y.to_onnx(varsx,
                    outputs=[('Y', var_type()), ('Z', var_type())],
                    target_opset=target_opset,
                    other_outputs=[Z])
    return onx
Exemple #13
0
    def test_onnx_if_algebra_direct(self):

        opv = TARGET_OPSET
        x1 = np.array([[0, 3], [7, 0]], dtype=np.float32)
        x2 = np.array([[1, 0], [2, 0]], dtype=np.float32)

        node = OnnxAdd(
            'x1', 'x2', output_names=['absxythen'], op_version=opv)
        then_body = node.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxythen', FloatTensorType())])
        node = OnnxSub(
            'x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxyelse', FloatTensorType())])
        del else_body.graph.input[:]
        del then_body.graph.input[:]

        cond = OnnxGreater(
            OnnxReduceSum('x1', op_version=opv),
            OnnxReduceSum('x2', op_version=opv),
            op_version=opv)
        ifnode = OnnxIf(cond, then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv, output_names=['y'])
        model_def = ifnode.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('y', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'x1': x1, 'x2': x2})
        assert_almost_equal(x1 + x2, res[0])
Exemple #14
0
    def test_pipeline_add(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        pca = PCA(n_components=2)
        pca.fit(X)

        add = OnnxAdd('X',
                      numpy.full((1, X.shape[1]), 1, dtype=numpy.float32),
                      output_names=['Yadd'])
        onx = add.to_onnx(inputs=[('X', FloatTensorType((None, X.shape[1])))],
                          outputs=[('Yadd', FloatTensorType(
                              (None, X.shape[1])))])

        tr = OnnxTransformer(onx)
        tr.fit()

        pipe = make_pipeline(tr, LogisticRegression())
        pipe.fit(X, y)
        pred = pipe.predict(X)
        self.assertEqual(pred.shape, (150, ))
        model_onnx = to_onnx(pipe, X.astype(numpy.float32))

        oinf = OnnxInference(model_onnx)
        y1 = pipe.predict(X)
        y2 = oinf.run({'X': X.astype(numpy.float32)})
        self.assertEqual(list(y2), ['output_label', 'output_probability'])
        self.assertEqualArray(y1, y2['output_label'])
        y1 = pipe.predict_proba(X)
        probas = DataFrame(list(y2['output_probability'])).values
        self.assertEqualArray(y1, probas, decimal=5)
 def test_onnxt_runtime_add_raise(self):
     idi = numpy.identity(2).astype(numpy.float32)
     onx = OnnxAdd('X', idi, output_names=['Y2'],
                   op_version=TARGET_OPSET)
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     self.assertRaise(lambda: OnnxInference(model_def, runtime='onnxruntime-1'),
                      ValueError)
    def test_onnx_remove_redundant_subgraphs_full(self):
        from skl2onnx.algebra.complex_functions import onnx_squareform_pdist
        cop = OnnxAdd(OnnxIdentity('input',
                                   op_version=get_opset_number_from_onnx()),
                      'input',
                      op_version=get_opset_number_from_onnx())
        cdist = onnx_squareform_pdist(cop,
                                      dtype=numpy.float32,
                                      op_version=get_opset_number_from_onnx())
        cdist2 = onnx_squareform_pdist(cop,
                                       dtype=numpy.float32,
                                       op_version=get_opset_number_from_onnx())
        cop2 = OnnxAdd(cdist,
                       cdist2,
                       output_names=['cdist'],
                       op_version=get_opset_number_from_onnx())

        model_def = cop2.to_onnx({'input': FloatTensorType()},
                                 outputs=[('cdist', FloatTensorType())],
                                 target_opset=get_opset_number_from_onnx())
        stats = onnx_statistics(model_def, optim=False)
        new_model = onnx_optimisations(model_def)
        stats2 = onnx_statistics(new_model, optim=False)
        self.assertLess(stats2['size'], stats['size'])
        self.assertLess(stats2['nnodes'], stats['nnodes'])
        self.assertLess(stats2['op_Identity'], stats['op_Identity'])
    def test_onnxt_reduce_size(self):
        idi = numpy.identity(2)
        onx = OnnxAdd('X', idi, output_names=['Y'],
                      op_version=get_opset_number_from_onnx())
        model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})

        oinf = OnnxInference(model_def, runtime="python_compiled")
        res = oinf.run({'X': idi.astype(numpy.float32)})
        self.assertEqual(idi * 2, res['Y'])

        oinf.reduce_size(False)
        res = oinf.run({'X': idi.astype(numpy.float32)})
        self.assertEqual(idi * 2, res['Y'])
        st = BytesIO()
        try:
            pickle.dump(oinf, st)
        except AttributeError:
            # missing obj
            pass

        oinf = OnnxInference(model_def, runtime="python_compiled")
        res = oinf.run({'X': idi.astype(numpy.float32)})
        self.assertEqual(idi * 2, res['Y'])

        oinf.reduce_size(True)
        res = oinf.run({'X': idi.astype(numpy.float32)})
        self.assertEqual(idi * 2, res['Y'])
        st = BytesIO()
        pickle.dump(oinf, st)
        val = st.getvalue()
        oinf2 = pickle.load(BytesIO(val))
        self.assertNotEmpty(oinf2)
Exemple #18
0
    def test_if2(self):

        opv = TARGET_OPSET
        x1 = numpy.array([[0, 3], [7, 0]], dtype=numpy.float32)
        x2 = numpy.array([[1, 0], [2, 0]], dtype=numpy.float32)

        node = OnnxAdd(
            'x1', 'x2', output_names=['absxythen'], op_version=opv)
        then_body = node.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxythen', FloatTensorType())])
        node = OnnxSub(
            'x1', 'x2', output_names=['absxyelse'], op_version=opv)
        else_body = node.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('absxyelse', FloatTensorType())])
        del else_body.graph.input[:]
        del then_body.graph.input[:]

        cond = OnnxGreater(
            OnnxReduceSum('x1', op_version=opv),
            OnnxReduceSum('x2', op_version=opv),
            op_version=opv)
        ifnode = OnnxIf(cond, then_branch=then_body.graph,
                        else_branch=else_body.graph,
                        op_version=opv, output_names=['y'])
        model_def = ifnode.to_onnx(
            {'x1': x1, 'x2': x2}, target_opset=opv,
            outputs=[('y', FloatTensorType())])
        oinf = OnnxInference(model_def)
        dot = oinf.to_dot()
        self.assertIn("Gr_Greater -> Gr_C0;", dot)
 def test_onnxt_add(self):
     idi = numpy.identity(2)
     onx = OnnxAdd('X', idi, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     oinf = OnnxInference(model_def, runtime="python")
     res = oinf.switch_initializers_dtype()
     self.assertEqual(len(res), 1)
     self.assertEqual(res[0][:4], ('pass1', '+', 'init', 'Ad_Addcst'))
 def test_onnxt_json(self):
     idi = numpy.identity(2)
     idi2 = numpy.identity(2) * 2
     onx = OnnxAdd(OnnxAdd('X', idi), idi2, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     oinf = OnnxInference(model_def)
     js = oinf.to_json()
     self.assertIn('"initializers": {', js)
Exemple #21
0
 def test_onnxt_runtime_add(self):
     idi = numpy.identity(2)
     onx = OnnxAdd('X', idi, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)
     oinf = OnnxInference(model_def, runtime='onnxruntime2')
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(idi + X, got['Y'], decimal=6)
def pyod_iforest_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # In most case, computation happen in floats.
    # But it might be with double. ONNX is very strict
    # about types, every constant should have the same
    # type as the input.
    dtype = guess_numpy_type(X.type)

    detector = op.detector_  # Should be IForest from scikit-learn.
    lab_pred = OnnxSubEstimator(detector, X, op_version=opv)
    scores = OnnxIdentity(lab_pred[1], op_version=opv)

    # labels
    threshold = op.threshold_
    above = OnnxLess(scores,
                     np.array([threshold], dtype=dtype),
                     op_version=opv)
    labels = OnnxCast(above,
                      op_version=opv,
                      to=onnx_proto.TensorProto.INT64,
                      output_names=out[:1])

    # probabilities
    train_scores = op.decision_scores_
    scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))
    scores_ = OnnxMul(scores, np.array([-1], dtype=dtype), op_version=opv)
    print(scaler.min_)
    print(scaler.scale_)

    scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)
    scaled_centered = OnnxAdd(scaled,
                              scaler.min_.astype(dtype),
                              op_version=opv)
    clipped = OnnxClip(scaled_centered,
                       np.array([0], dtype=dtype),
                       np.array([1], dtype=dtype),
                       op_version=opv)
    clipped_ = OnnxAdd(OnnxMul(clipped,
                               np.array([-1], dtype=dtype),
                               op_version=opv),
                       np.array([1], dtype=dtype),
                       op_version=opv)

    scores_2d = OnnxConcat(clipped_,
                           clipped,
                           axis=1,
                           op_version=opv,
                           output_names=out[1:])

    labels.add_to(scope, container)
    scores_2d.add_to(scope, container)
Exemple #23
0
def build_ort_add(op_version=12):
    node1 = OnnxAdd('x', 'y', op_version=op_version)
    node2 = OnnxAdd(node1, 'y', op_version=op_version)
    node = OnnxAdd(node2, 'y', op_version=op_version, output_names=['z'])
    onx = node.to_onnx(inputs=[('x', FloatTensorType()),
                               ('y', FloatTensorType())],
                       target_opset=op_version)
    sess = InferenceSession(onx.SerializeToString())
    return lambda x, y: sess.run(None, {'x': x, 'y': y})
 def test_onnxt_run(self):
     idi = numpy.identity(2)
     idi2 = numpy.identity(2) * 2
     onx = OnnxAdd(OnnxAdd('X', idi), idi2, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     oinf = OnnxInference(model_def)
     X = numpy.array([[1, 1], [3, 3]])
     y = oinf.run({'X': X.astype(numpy.float32)})
     exp = numpy.array([[4, 1], [3, 6]], dtype=numpy.float32)
     self.assertEqual(list(y), ['Y'])
     self.assertEqualArray(y['Y'], exp)
 def test_code_add_except(self):
     idi = numpy.identity(2, dtype=numpy.float32)
     onx = OnnxAdd('X', idi, output_names=['Y'], op_version=TARGET_OPSET)
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
                             target_opset=TARGET_OPSET)
     model_def.ir_version = get_ir_version(TARGET_OPSET)
     oinf = OnnxInference(model_def, runtime='onnxruntime1')
     try:
         oinf.to_python()
     except ValueError:
         pass
Exemple #26
0
 def test_grad_helper_exc(self):
     opv = opset
     node = OnnxAdd('X',
                    numpy.array([1], dtype=numpy.float32),
                    op_version=opv,
                    output_names=['Y'])
     onx = node.to_onnx({'X': FloatTensorType([None, 10])},
                        {'Y': FloatTensorType([None, 10])},
                        target_opset=opv)
     self.assertRaise(lambda: onnx_derivative(onx, weights=[], options=1),
                      TypeError)
Exemple #27
0
 def test_grad_helper_noweight(self):
     opv = opset
     node = OnnxAdd('X',
                    numpy.array([1], dtype=numpy.float32),
                    op_version=opv,
                    output_names=['Y'])
     onx = node.to_onnx({'X': FloatTensorType([None, 10])},
                        {'Y': FloatTensorType([None, 10])},
                        target_opset=opv)
     new_onx = onnx_derivative(onx, weights=[])
     self.check_runtime(new_onx, 'test_grad_helper_noweight')
Exemple #28
0
 def generate_onnx_graph(dim, nbnode, input_name='X1'):
     i1 = input_name
     for i in range(nbnode - 1):
         i2 = (np.ones((1, dim)) * nbnode * 10).astype(np.float32)
         node = OnnxAdd(i1, i2)
         i1 = node
     i2 = (np.ones((1, dim)) * nbnode * 10).astype(np.float32)
     node = OnnxAdd(i1, i2, output_names=['Y'])
     onx = node.to_onnx([(input_name, FloatTensorType((None, dim)))],
                        outputs=[('Y', FloatTensorType())])
     return onx
    def test_onnx_init_dense(self):
        X = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))

        node = OnnxAdd('X', X, output_names=['Y'], op_version=TARGET_OPSET)

        model_def = node.to_onnx({'X': X}, outputs=[('Y', FloatTensorType())])

        sess = InferenceSession(model_def.SerializeToString())
        res = sess.run(None, {'X': X})[0]

        assert_almost_equal(X + X, res)
Exemple #30
0
 def test_add(self):
     idi = numpy.identity(2)
     onx = OnnxAdd('X', idi, output_names=['Y'])
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
                             target_opset=12)
     X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)
     sess = InferenceSession(model_def.SerializeToString())
     got = sess.run(None, {'X': X})
     exp = idi + X
     self.assertEqual(exp.shape, got[0].shape)
     self.assertEqual(list(exp.ravel()), list(got[0].ravel()))
     self.assertIn("version: 7", str(model_def))