Exemplo n.º 1
0
def _rewrite_op_no_grad(onx):
    """
    Rewrites operators with no gradient.
    """
    set_types = set(n.op_type for n in onx.graph.node)
    if "Reciprocal" in set_types:
        from skl2onnx.algebra.onnx_ops import OnnxDiv  # pylint: disable=E0611
        from skl2onnx.common.data_types import FloatTensorType
        from .onnx_rewriter import onnx_rewrite_operator

        opset = None
        for op in onx.opset_import:
            if op.domain in ('', 'ai.onnx'):
                opset = op.version
        if opset is None:  # pragma: no cover
            from .. import get_max_opset
            opset = get_max_opset()

        node = OnnxDiv(numpy.array([1], dtype=numpy.float32),
                       'X',
                       output_names=['Y'],
                       op_version=opset)
        rewrite_onx = node.to_onnx(inputs={'X': FloatTensorType()},
                                   outputs={'Y': FloatTensorType()},
                                   target_opset=opset)
        onx = onnx_rewrite_operator(onx, 'Reciprocal', rewrite_onx)

    return onx
Exemplo n.º 2
0
 def conv(scope, operator, container):
     W = operator.raw_operator.W
     S = operator.raw_operator.S
     X = operator.inputs[0]
     out = operator.outputs
     op = OnnxDiv(OnnxSub(X, W), S, output_names=out)
     op.add_to(scope, container)
Exemplo n.º 3
0
    def test_onnx_rewrite_operator(self):
        opset = get_max_opset()
        node1 = OnnxReciprocal('X', output_names=['Y'],
                               op_version=opset)
        onx1 = node1.to_onnx(
            inputs={'X': FloatTensorType()},
            outputs={'Y': FloatTensorType()},
            target_opset=opset)
        onx1.graph.name = "jjj"
        oinf1 = OnnxInference(onx1)

        node2 = OnnxDiv(numpy.array([1], dtype=numpy.float32),
                        'X', output_names=['Y'],
                        op_version=opset)
        onx2 = node2.to_onnx(
            inputs={'X': FloatTensorType()},
            outputs={'Y': FloatTensorType()},
            target_opset=opset)
        oinf2 = OnnxInference(onx2)
        X = numpy.array([[5, 6]], dtype=numpy.float32)
        y1 = oinf1.run({'X': X})['Y']
        y2 = oinf2.run({'X': X})['Y']
        self.assertEqualArray(y1, y2)

        onx3 = onnx_rewrite_operator(onx1, 'Reciprocal', onx2)
        self.assertNotIn('Reciprocal', str(onx3))
        oinf3 = OnnxInference(onx3)
        y3 = oinf3.run({'X': X})['Y']
        self.assertEqualArray(y1, y3)
Exemplo n.º 4
0
 def conv(scope, operator, container):
     W = operator.raw_operator.W
     S = operator.raw_operator.S
     X = operator.inputs[0]
     out = operator.outputs
     op = OnnxDiv(
         OnnxSub(X, W, op_version=container.target_opset),
         S, output_names=out,
         op_version=container.target_opset)
     op.add_to(scope, container)
 def test_algebra_to_onnx(self):
     X = numpy.random.randn(5, 4)
     beta = numpy.array([1, 2, 3, 4]) / 10
     beta32 = beta.astype(numpy.float32)
     onnxExpM = OnnxExp(OnnxMatMul('X', beta32))
     cst = numpy.ones((1, 3), dtype=numpy.float32)
     onnxExpM1 = OnnxAdd(onnxExpM, cst)
     onnxPred = OnnxDiv(onnxExpM, onnxExpM1)
     inputs = {'X': X[:1].astype(numpy.float32)}
     model_onnx = onnxPred.to_onnx(inputs)
     s1 = str(model_onnx)
     model_onnx = onnxPred.to_onnx(inputs)
     s2 = str(model_onnx)
     assert s1 == s2
Exemplo n.º 6
0
 def test_onnx_simple_text_plot_toy(self):
     x = numpy.random.randn(10, 3).astype(numpy.float32)
     node1 = OnnxAdd('X', x, op_version=15)
     node2 = OnnxSub('X', x, op_version=15)
     node3 = OnnxAbs(node1, op_version=15)
     node4 = OnnxAbs(node2, op_version=15)
     node5 = OnnxDiv(node3, node4, op_version=15)
     node6 = OnnxAbs(node5, output_names=['Y'], op_version=15)
     onx = node6.to_onnx({'X': x.astype(numpy.float32)},
                         outputs={'Y': x},
                         target_opset=15)
     text = onnx_simple_text_plot(onx, verbose=False)
     expected = textwrap.dedent("""
     Add(X, Ad_Addcst) -> Ad_C0
       Abs(Ad_C0) -> Ab_Y0
     Identity(Ad_Addcst) -> Su_Subcst
       Sub(X, Su_Subcst) -> Su_C0
         Abs(Su_C0) -> Ab_Y02
         Div(Ab_Y0, Ab_Y02) -> Di_C0
           Abs(Di_C0) -> Y
     """).strip(" \n")
     self.assertIn(expected, text)
     text2, out, err = self.capture(
         lambda: onnx_simple_text_plot(onx, verbose=True))
     self.assertEqual(text, text2)
     self.assertIn('BEST:', out)
     self.assertEmpty(err)
 def to_onnx_operator(self, inputs=None, outputs=('Y', )):
     if inputs is None:
         raise RuntimeError("inputs should contain one name")
     i0 = self.get_inputs(inputs, 0)
     W = self.W_
     S = self.S_
     return OnnxDiv(OnnxSub(i0, W), S, output_names=outputs)
Exemplo n.º 8
0
 def to_onnx_operator(self, inputs=None, outputs=('Y', )):
     if inputs is None:
         raise RuntimeError("inputs should contain one name")
     i0 = self.get_inputs(inputs, 0)
     W = self.W_.astype(np.float32)
     S = self.S_.astype(np.float32)
     # case if there are multiple output nodes
     return OnnxDiv(OnnxSub(i0, W, op_version=self.op_version), S,
                    output_names=outputs, op_version=self.op_version)
Exemplo n.º 9
0
 def to_onnx_operator(self, inputs=None, outputs=('Y', )):
     if inputs is None:
         raise RuntimeError("Parameter inputs should contain at least "
                            "one name.")
     i0 = self.get_inputs(inputs, 0)
     W = self.W_.astype(np.float32)
     S = self.S_.astype(np.float32)
     return OnnxDiv(OnnxSub(i0, W, op_version=12), S,
                    output_names=outputs,
                    op_version=12)
Exemplo n.º 10
0
 def test_algebra_to_onnx(self):
     X = numpy.random.randn(5, 4)
     beta = numpy.array([1, 2, 3, 4]) / 10
     beta32 = beta.astype(numpy.float32)
     onnxExpM = OnnxExp(OnnxMatMul('X', beta32))
     cst = numpy.ones((1, 3), dtype=numpy.float32)
     onnxExpM1 = OnnxAdd(onnxExpM, cst)
     onnxPred = OnnxDiv(onnxExpM, onnxExpM1)
     inputs = {'X': X[:1].astype(numpy.float32)}
     model_onnx = onnxPred.to_onnx(inputs)
     s1 = str(model_onnx)
     model_onnx = onnxPred.to_onnx(inputs)
     s2 = str(model_onnx)
     assert s1 == s2
     nin = list(onnxExpM1.enumerate_initial_types())
     nno = list(onnxExpM1.enumerate_nodes())
     nva = list(onnxExpM1.enumerate_variables())
     self.assertEqual(len(nin), 0)
     self.assertEqual(len(nno), 3)
     self.assertEqual(len(nva), 0)
 def to_onnx_operator(self,
                      inputs=None,
                      outputs=('Y', ),
                      target_opset=None,
                      **kwargs):
     if inputs is None:
         raise RuntimeError("inputs should contain one name")
     i0 = self.get_inputs(inputs, 0)
     W = self.W_.astype(np.float32)
     S = self.S_.astype(np.float32)
     return OnnxDiv(OnnxSub(i0, W, op_version=self.op_version),
                    S,
                    output_names=outputs,
                    op_version=self.op_version)
Exemplo n.º 12
0
def build_leaky_relu_decomposed(alpha=0.5, target_opset=15):
    signo = OnnxSign('X', op_version=target_opset)
    sign = OnnxDiv(OnnxAdd(signo,
                           numpy.array([1], dtype=numpy.float32),
                           op_version=target_opset),
                   numpy.array([2], dtype=numpy.float32),
                   op_version=target_opset)
    fact = OnnxAdd(OnnxMul(sign,
                           numpy.array([1 - alpha], dtype=numpy.float32),
                           op_version=target_opset),
                   numpy.array([alpha], dtype=numpy.float32),
                   op_version=target_opset)
    x = OnnxMul('X', fact, op_version=target_opset, output_names=['Y'])
    return x.to_onnx({'X': FloatTensorType()},
                     outputs={'Y': FloatTensorType()},
                     target_opset=target_opset)
Exemplo n.º 13
0
def live_decorrelate_transformer_converter(scope, operator, container):
    # shortcuts
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]

    # We guess its type. If the operator ingests float (or double),
    # it outputs float (or double).
    proto_dtype = guess_proto_type(X.type)
    dtype = guess_numpy_type(X.type)

    # Lines in comment specify the numpy computation
    # the ONNX code implements.
    # mean_ = numpy.mean(X, axis=0, keepdims=True)
    mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv)

    # This is trick I often use. The converter automatically
    # chooses a name for every output. In big graph,
    # it is difficult to know which operator is producing which output.
    # This line just tells every node must prefix its ouputs with this string.
    # It also applies to all inputs nodes unless this method
    # was called for one of these nodes.
    mean.set_onnx_name_prefix('mean')

    # X2 = X - mean_
    X2 = OnnxSub(X, mean, op_version=opv)

    # V = X2.T @ X2 / X2.shape[0]
    N = OnnxGatherElements(OnnxShape(X, op_version=opv),
                           numpy.array([0], dtype=numpy.int64),
                           op_version=opv)
    Nf = OnnxCast(N, to=proto_dtype, op_version=opv)

    # Every output involved in N and Nf is prefixed by 'N'.
    Nf.set_onnx_name_prefix('N')

    V = OnnxDiv(OnnxMatMul(OnnxTranspose(X2, op_version=opv),
                           X2,
                           op_version=opv),
                Nf,
                op_version=opv)
    V.set_onnx_name_prefix('V1')

    # V += numpy.identity(V.shape[0]) * self.alpha
    V = OnnxAdd(V,
                op.alpha * numpy.identity(op.nf_, dtype=dtype),
                op_version=opv)
    V.set_onnx_name_prefix('V2')

    # L, P = numpy.linalg.eig(V)
    LP = OnnxEig(V, eigv=True, op_version=opv)
    LP.set_onnx_name_prefix('LP')

    # Linv = L ** (-0.5)
    # Notation LP[0] means OnnxPow is taking the first output
    # of operator OnnxEig, LP[1] would mean the second one
    # LP is not allowed as it is ambiguous
    Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype), op_version=opv)
    Linv.set_onnx_name_prefix('Linv')

    # diag = numpy.diag(Linv)
    diag = OnnxMul(OnnxEyeLike(numpy.zeros((op.nf_, op.nf_),
                                           dtype=numpy.int64),
                               k=0,
                               op_version=opv),
                   Linv,
                   op_version=opv)
    diag.set_onnx_name_prefix('diag')

    # root = P @ diag @ P.transpose()
    trv = OnnxTranspose(LP[1], op_version=opv)
    coef_left = OnnxMatMul(LP[1], diag, op_version=opv)
    coef_left.set_onnx_name_prefix('coef_left')
    coef = OnnxMatMul(coef_left, trv, op_version=opv)
    coef.set_onnx_name_prefix('coef')

    # Same part as before.
    Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])
    Y.set_onnx_name_prefix('Y')

    # The last line specifies the final output.
    # Every node involved in the computation is added to the ONNX
    # graph at this stage.
    Y.add_to(scope, container)
Exemplo n.º 14
0
def live_decorrelate_transformer_converter(scope, operator, container):
    op = operator.raw_operator
    opv = container.target_opset
    out = operator.outputs

    # We retrieve the unique input.
    X = operator.inputs[0]
    proto_dtype = guess_proto_type(X.type)

    dtype = guess_numpy_type(X.type)

    # new part

    # mean_ = numpy.mean(X, axis=0, keepdims=True)
    mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv)
    mean.set_onnx_name_prefix('mean')

    # X2 = X - mean_
    X2 = OnnxSub(X, mean, op_version=opv)

    # V = X2.T @ X2 / X2.shape[0]
    N = OnnxGatherElements(OnnxShape(X, op_version=opv),
                           numpy.array([0], dtype=numpy.int64),
                           op_version=opv)
    Nf = OnnxCast(N, to=proto_dtype, op_version=opv)
    Nf.set_onnx_name_prefix('N')

    V = OnnxDiv(OnnxMatMul(OnnxTranspose(X2, op_version=opv),
                           X2,
                           op_version=opv),
                Nf,
                op_version=opv)
    V.set_onnx_name_prefix('V1')

    # V += numpy.identity(V.shape[0]) * self.alpha
    V = OnnxAdd(V,
                op.alpha * numpy.identity(op.nf_, dtype=dtype),
                op_version=opv)
    V.set_onnx_name_prefix('V2')

    # L, P = numpy.linalg.eig(V)
    LP = OnnxEig(V, eigv=True, op_version=opv)
    LP.set_onnx_name_prefix('LP')

    # Linv = L ** (-0.5)
    Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype), op_version=opv)
    Linv.set_onnx_name_prefix('Linv')

    # diag = numpy.diag(Linv)
    diag = OnnxMul(OnnxEyeLike(numpy.array([op.nf_, op.nf_],
                                           dtype=numpy.int64),
                               k=0,
                               op_version=opv),
                   Linv,
                   op_version=opv)
    diag.set_onnx_name_prefix('diag')

    # root = P @ diag @ P.transpose()
    trv = OnnxTranspose(LP[1], op_version=opv)
    coef_left = OnnxMatMul(LP[1], diag, op_version=opv)
    coef_left.set_onnx_name_prefix('coef_left')
    coef = OnnxMatMul(coef_left, trv, op_version=opv)
    coef.set_onnx_name_prefix('coef')

    # Same part as before.
    Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])
    Y.set_onnx_name_prefix('Y')
    Y.add_to(scope, container)