예제 #1
0
def register_custom_nms_op():
    # experimenting custom op registration.
    from torch.onnx.symbolic_helper import parse_args
    from torch.onnx.symbolic_opset9 import view, select, index_select, scatter
    @parse_args('v', 'v', 'f', 'f', 'i')
    def symbolic_nmsfilt(g, boxes, scores, iou_threshold, score_threshold, max_output_boxes):
        # if should return all
        if max_output_boxes <= 0:
            max_output_boxes = 10000
        shape = g.op("Shape", scores)  # original shape
        boxes = view(g, boxes, (1, -1, 4))
        max_output_per_class = g.op('Constant', value_t=torch.tensor([max_output_boxes], dtype=torch.long))
        iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float))
        score_threshold = g.op('Constant', value_t=torch.tensor([score_threshold], dtype=torch.float))
        # center_point_box == 1 is for our center_x, centr_y, width, height format
        nms_out = g.op('NonMaxSuppression',
                       boxes, view(g, scores, (1, 1, -1)), max_output_per_class, iou_threshold, score_threshold,
                       center_point_box_i=1)
        idx = view(g, select(g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), (-1,))
        scores = view(g, scores, (-1,))
        flat_shape = g.op("Shape", scores)
        src = index_select(g, scores, 0, idx)
        src = view(g, src, (-1,))
        filt = g.op("ConstantOfShape", flat_shape)
        filt = scatter(g, filt, 0, idx, src)
        return view(g, filt, shape)

    from torch.onnx import register_custom_op_symbolic
    register_custom_op_symbolic('mtorch_ops::nmsfilt', symbolic_nmsfilt, 10)
예제 #2
0
def enable_custom_autograd_support():
    # Initialize static objects needed to run custom autograd.Function's.

    from onnxruntime.capi._pybind_state import (
        register_forward_runner,
        register_backward_runner,
        unregister_python_functions,
    )
    from torch.onnx import register_custom_op_symbolic
    from ._custom_autograd_function_exporter import _export
    from ._custom_autograd_function_runner import call_python_forward_function, call_python_backward_function
    from onnxruntime.training.ortmodule.torch_cpp_extensions import torch_interop_utils
    import atexit

    register_forward_runner(call_python_forward_function)
    register_backward_runner(call_python_backward_function)

    # Unregister all python functions automatically upon normal interpreter termination.
    atexit.register(unregister_python_functions)
    # Clear all gradient functions, to avoid a deadlock issue.
    # Check the called function for more detailed comments.
    atexit.register(torch_interop_utils.clear_all_grad_fns)

    try:
        # This is for the latest Pytorch nightly after this commit:
        # https://github.com/pytorch/pytorch/commit/11bc435622e6b7207bbf37ed1aafe999e1f296ec
        register_custom_op_symbolic("prim::PythonOp", _export, 1)
    except:
        # This applies to Pytorch 1.9 and 1.9.1.
        register_custom_op_symbolic("::prim_PythonOp", _export, 1)

    custom_autograd_function_enabler.state = True
예제 #3
0
    def test_aten_embedding_1(self):
        _onnx_opset_version = 12

        @parse_args('v', 'v', 'i', 'b', 'b')
        def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):
            custom_attributes_json = (
                '{'
                f'"padding_idx":{str(padding_idx)},'
                f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
                f'"sparse":{str(sparse).lower()}'
                '}'
            )
            output = g.op("com.microsoft::ATenOp", weight, indices, name_s='aten::embedding',
                          custom_attributes_json_s=custom_attributes_json)
            return output

        register_custom_op_symbolic('::embedding', embedding, _onnx_opset_version)

        class Model(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.emb = torch.nn.Embedding(4, 8)

            def forward(self, x, y):
                res = self.emb(x)
                res = res + y
                return torch.ones(res.shape[0])

        model = Model()
        x = torch.ones(32, dtype=torch.long)
        y = torch.randn(1, 8)
        self.assertONNX(model, (x, y), opset_version=_onnx_opset_version)

        unregister_custom_op_symbolic('::embedding', _onnx_opset_version)
예제 #4
0
def register_custom_nms_op():
    # experimenting custom op registration.
    from torch.onnx.symbolic_helper import parse_args
    from torch.onnx.symbolic_opset9 import view, select

    @parse_args('v', 'v', 'f', 'i')
    def symbolic_nms(g, boxes, scores, iou_threshold, max_output_boxes):
        # if should return all
        if max_output_boxes <= 0:
            max_output_boxes = 10000
        boxes = view(g, boxes, (1, -1, 4))
        max_output_per_class = g.op('Constant',
                                    value_t=torch.tensor([max_output_boxes],
                                                         dtype=torch.long))
        iou_threshold = g.op('Constant',
                             value_t=torch.tensor([iou_threshold],
                                                  dtype=torch.float))
        # center_point_box == 1 is for our center_x, centr_y, width, height format
        nms_out = g.op('NonMaxSuppression',
                       boxes,
                       view(g, scores, (1, 1, -1)),
                       max_output_per_class,
                       iou_threshold,
                       center_point_box_i=1)
        idx = select(
            g, nms_out, 1,
            g.op('Constant', value_t=torch.tensor([2], dtype=torch.long)))
        return view(g, idx, (-1, ))

    from torch.onnx import register_custom_op_symbolic
    register_custom_op_symbolic('mtorch_ops::nms', symbolic_nms, 10)
예제 #5
0
def register_custom_op():
    def my_group_norm(g, input, num_groups, scale, bias, eps):
        return g.op("mydomain::testgroupnorm", input, num_groups, scale, bias, epsilon_f=0.)

    from torch.onnx import register_custom_op_symbolic

    register_custom_op_symbolic("mynamespace::custom_group_norm", my_group_norm, 9)
예제 #6
0
    def test_contrib_op_with_loop(self):
        class M(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.gelu = torch.nn.GELU()

            def forward(self, x):
                res = []
                res2 = []
                for i in range(x.size(0)):
                    if len(res) > 0:
                        res2.append(res[0])
                    else:
                        res2.append(self.gelu(x[0]))
                    res.append(x[0])
                return torch.stack(res), torch.stack(res2)

        def symbolic_custom_gelu(g, input):
            return g.op("com.microsoft::Gelu", input).setType(input.type())

        from torch.onnx import register_custom_op_symbolic
        register_custom_op_symbolic("::gelu", symbolic_custom_gelu, 1)

        x = torch.randn(3, 3, 4, requires_grad=True)
        model = torch.jit.script(M())
        run_model_test(self, model, input=(x, ))
예제 #7
0
    def test_custom_pythonop_pytorch(self):

        # register_custom_op_symbolic(
        #   '<namespace>::inverse', my_inverse, <opset_version>)
        register_custom_op_symbolic('::inverse', my_inverse, 1)

        x = torch.randn(3, 3)

        # Export model to ONNX
        f = io.BytesIO()
        torch.onnx.export(CustomInverse(), (x, ), f)
        onnx_model = load(io.BytesIO(f.getvalue()))
        self.assertIn('domain: "ai.onnx.contrib"', str(onnx_model))

        model = CustomInverse()
        pt_outputs = model(x)

        so = _ort.SessionOptions()
        so.register_custom_ops_library(_get_library_path())

        # Run the exported model with ONNX Runtime
        ort_sess = _ort.InferenceSession(f.getvalue(), so)
        ort_inputs = dict((ort_sess.get_inputs()[i].name, input.cpu().numpy())
                          for i, input in enumerate((x, )))
        ort_outputs = ort_sess.run(None, ort_inputs)

        # Validate PyTorch and ONNX Runtime results
        numpy.testing.assert_allclose(pt_outputs.cpu().numpy(),
                                      ort_outputs[0],
                                      rtol=1e-03,
                                      atol=1e-05)
예제 #8
0
    def test_aten_embedding_2(self):
        _onnx_opset_version = 12

        @parse_args('v', 'v', 'i', 'b', 'b')
        def embedding(g, weight, indices, padding_idx, scale_grad_by_freq,
                      sparse):
            custom_attributes_json = (
                '{'
                f'"padding_idx":{str(padding_idx)},'
                f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
                f'"sparse":{str(sparse).lower()}'
                '}')
            output = g.at("embedding",
                          weight,
                          indices,
                          custom_attributes_json_s=custom_attributes_json)

            # do shape inference and set it via setType
            indices_shape = _get_tensor_sizes(indices)
            if indices_shape is not None and hasattr(weight.type(),
                                                     'with_sizes'):
                output_type = weight.type().with_sizes(
                    indices_shape + [_get_tensor_dim_size(weight, 1)])
                output.setType(output_type)
            return output

        register_custom_op_symbolic('::embedding', embedding,
                                    _onnx_opset_version)

        class Model(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.emb = torch.nn.Embedding(4, 8)

            def forward(self, x, y):
                res = self.emb(x)
                res = res + y
                return torch.ones(res.shape[0])

        model = Model()
        x = torch.ones(32, dtype=torch.long)
        y = torch.randn(1, 8)
        self.assertONNX(model, (x, y),
                        opset_version=_onnx_opset_version,
                        input_names=['input_1', 'input_2'],
                        dynamic_axes={
                            "input_1": {
                                0: "dim_0"
                            },
                            'input_2': {
                                0: "dim_1",
                                1: "dim_2"
                            }
                        },
                        keep_initializers_as_inputs=False,
                        operator_export_type=torch.onnx.OperatorExportTypes.
                        ONNX_ATEN_FALLBACK)

        unregister_custom_op_symbolic('::embedding', _onnx_opset_version)
예제 #9
0
    def test_aten_embedding_2(self):
        _onnx_opset_version = 12

        @parse_args("v", "v", "i", "b", "b")
        def embedding(g, weight, indices, padding_idx, scale_grad_by_freq,
                      sparse):
            custom_attributes_json = (
                "{"
                f'"padding_idx":{str(padding_idx)},'
                f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
                f'"sparse":{str(sparse).lower()}'
                "}")
            output = g.op("com.microsoft::ATenOp",
                          weight,
                          indices,
                          name_s="aten::embedding",
                          custom_attributes_json_s=custom_attributes_json)

            # do shape inference and set it via setType
            indices_shape = _get_tensor_sizes(indices)
            if indices_shape is not None and hasattr(weight.type(),
                                                     "with_sizes"):
                output_type = weight.type().with_sizes(
                    indices_shape + [_get_tensor_dim_size(weight, 1)])
                output.setType(output_type)
            return output

        register_custom_op_symbolic("::embedding", embedding,
                                    _onnx_opset_version)

        class Model(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.emb = torch.nn.Embedding(4, 8)

            def forward(self, x, y):
                res = self.emb(x)
                res = res + y
                return torch.ones(res.shape[0])

        model = Model()
        x = torch.ones(32, dtype=torch.long)
        y = torch.randn(1, 8)
        self.assertONNX(model, (x, y),
                        opset_version=_onnx_opset_version,
                        input_names=["input_1", "input_2"],
                        dynamic_axes={
                            "input_1": {
                                0: "dim_0"
                            },
                            "input_2": {
                                0: "dim_1",
                                1: "dim_2"
                            }
                        })

        unregister_custom_op_symbolic("::embedding", _onnx_opset_version)
def register_custom_op():
    # experimenting custom op registration.
    from torch.onnx.symbolic_helper import parse_args
    from torch.onnx.symbolic_opset9 import select, unsqueeze, squeeze, _cast_Long

    @parse_args('v', 'v', 'f')
    def symbolic_multi_label_nms(
        g, boxes, scores, iou_threshold
    ):  #, max_output_per_class, iou_threshold, score_threshold):
        boxes = unsqueeze(g, boxes, 0)
        scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
        max_output_per_class = g.op('Constant',
                                    value_t=torch.tensor([2000],
                                                         dtype=torch.long))
        iou_threshold = g.op('Constant',
                             value_t=torch.tensor([iou_threshold],
                                                  dtype=torch.float))
        nms_out = g.op('NonMaxSuppression', boxes, scores,
                       max_output_per_class, iou_threshold)
        return squeeze(
            g,
            select(
                g, nms_out, 1,
                g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))),
            1)

    @parse_args('v', 'v', 'f', 'i', 'i', 'i')
    def symbolic_roi_align(g, input, rois, spatial_scale, pooled_height,
                           pooled_width, sampling_ratio):
        batch_indices = _cast_Long(
            g,
            squeeze(
                g,
                select(
                    g, rois, 1,
                    g.op('Constant',
                         value_t=torch.tensor([0], dtype=torch.long))), 1),
            False)
        rois = select(
            g, rois, 1,
            g.op('Constant',
                 value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
        return g.op('RoiAlign',
                    input,
                    rois,
                    batch_indices,
                    spatial_scale_f=spatial_scale,
                    output_height_i=pooled_height,
                    output_width_i=pooled_width,
                    sampling_ratio_i=sampling_ratio)

    from torch.onnx import register_custom_op_symbolic
    register_custom_op_symbolic('DetectionHub::nms', symbolic_multi_label_nms,
                                10)
    register_custom_op_symbolic('DetectionHub::roi_align_forward',
                                symbolic_roi_align, 10)
예제 #11
0
def enable_custom_autograd_support():
    from onnxruntime.capi._pybind_state import register_forward_runner, register_backward_runner
    from torch.onnx import register_custom_op_symbolic
    from ._custom_autograd_function_exporter import _export
    from ._custom_autograd_function_runner import call_python_forward_function, call_python_backward_function

    register_forward_runner(call_python_forward_function)
    register_backward_runner(call_python_backward_function)

    register_custom_op_symbolic('::prim_PythonOp', _export, 1)
예제 #12
0
            def register_custom_op():
                # experimenting custom op registration.
                from torch.onnx.symbolic import parse_args, _cast_Int, _cast_Long

                def symbolic_multi_label_nms(g, boxes, scores,
                                             max_output_per_class,
                                             iou_threshold, score_threshold):
                    return g.op('NonMaxSuppression', boxes, scores,
                                max_output_per_class, iou_threshold,
                                score_threshold)

                from torch.onnx import register_custom_op_symbolic
                register_custom_op_symbolic('roi_ops::multi_label_nms',
                                            symbolic_multi_label_nms)
def enable_custom_autograd_support():
    from onnxruntime.capi._pybind_state import register_forward_runner, register_backward_runner, unregister_python_functions
    from torch.onnx import register_custom_op_symbolic
    from ._custom_autograd_function_exporter import _export
    from ._custom_autograd_function_runner import call_python_forward_function, call_python_backward_function
    import atexit

    register_forward_runner(call_python_forward_function)
    register_backward_runner(call_python_backward_function)

    # Unregister all python functions automatically upon normal interpreter termination.
    atexit.register(unregister_python_functions)

    register_custom_op_symbolic('::prim_PythonOp', _export, 1)
예제 #14
0
def register_custom_op():
    """
    This function registers symbolic functions for
    custom ops that are implemented as part of ONNX Runtime
    """

    # Symbolic definition
    def inverse(g, self):
        return g.op("com.microsoft::Inverse", self).setType(self.type())

    def gelu(g, self):
        return g.op("com.microsoft::Gelu", self).setType(self.type())

    def triu(g, self, diagonal):
        return g.op("com.microsoft::Trilu", self, diagonal,
                    upper_i=1).setType(self.type())

    def tril(g, self, diagonal):
        return g.op("com.microsoft::Trilu", self, diagonal,
                    upper_i=0).setType(self.type())

    # Op Registration
    register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version)
    register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version)
    register_custom_op_symbolic('::triu', triu, _onnx_opset_version)
    register_custom_op_symbolic('::tril', tril, _onnx_opset_version)
예제 #15
0
    def test_register_aten_custom_op_symbolic(self):
        self.addCleanup(unregister_custom_op_symbolic, "aten::gelu", 1)

        def gelu(g, self):
            return g.op("com.microsoft::Gelu", self).setType(self.type())

        register_custom_op_symbolic("aten::gelu", gelu, 1)
        model = torch.nn.GELU()
        x = torch.randn(3, 3)
        f = io.BytesIO()
        torch.onnx.export(model, (x, ), f, opset_version=self.opset_version)
        graph = onnx.load(io.BytesIO(f.getvalue()))

        self.assertEqual(graph.graph.node[0].op_type, "Gelu")
        self.assertEqual(graph.opset_import[1].domain, "com.microsoft")
예제 #16
0
    def test_custom_opsets_gelu(self):
        def gelu(g, self):
            return g.op("com.microsoft::Gelu", self).setType(self.type())

        register_custom_op_symbolic("::gelu", gelu, 1)
        model = torch.nn.GELU()
        x = torch.randn(3, 3)
        f = io.BytesIO()
        torch.onnx.export(model, (x, ), f,
                          opset_version=self.opset_version, custom_opsets={"com.microsoft": 1})

        graph = onnx.load(io.BytesIO(f.getvalue()))
        assert graph.graph.node[0].op_type == "Gelu"
        assert graph.opset_import[0].version == self.opset_version
        assert graph.opset_import[1].domain == 'com.microsoft'
        assert graph.opset_import[1].version == 1
예제 #17
0
def register_custom_op():
    def my_group_norm(g, input, num_groups, scale, bias, eps):
        return g.op("mydomain::testgroupnorm",
                    input,
                    num_groups,
                    scale,
                    bias,
                    epsilon_f=0.)

    from torch.onnx import register_custom_op_symbolic, set_custom_domain_version

    # Optional: register custom domain version. If not registered, default version is 1
    # set_custom_domain_version("mydomain", 2)

    register_custom_op_symbolic("mynamespace::custom_group_norm",
                                my_group_norm, 9)
def register_custom_op():
    """
    This function registers symbolic functions for
    custom ops that are implemented as part of ONNX Runtime
    """

    # Symbolic definition
    def inverse(g, self):
        return g.op("com.microsoft::Inverse", self)

    def gelu(g, self):
        return g.op("com.microsoft::Gelu", self)

    # Op Registration
    register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version)
    register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version)
예제 #19
0
    def test_register_custom_op(self):
        class MyClip(torch.autograd.Function):
            @staticmethod
            def forward(ctx, input, scalar):
                ctx.save_for_backward(input)
                return input.clamp(min=scalar)

        class MyRelu(torch.autograd.Function):
            @staticmethod
            def forward(ctx, input):
                ctx.save_for_backward(input)
                return input.clamp(min=0)

        class MyModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.clip = MyClip.apply
                self.relu = MyRelu.apply

            def forward(self, x):
                h = self.clip(x, 2)
                h = self.relu(h)
                return h

        def symbolic_pythonop(ctx: torch.onnx.SymbolicContext, g, *args,
                              **kwargs):
            n = ctx.cur_node
            name = kwargs["name"]
            if name == "MyClip":
                return g.op("Clip",
                            args[0],
                            min_f=args[1],
                            outputs=n.outputsSize())
            elif name == "MyRelu":
                return g.op("Relu", args[0], outputs=n.outputsSize())
            else:
                return symbolic_helper._unimplemented(
                    "prim::PythonOp", "unknown node kind: " + name)

        from torch.onnx import register_custom_op_symbolic

        register_custom_op_symbolic("prim::PythonOp", symbolic_pythonop, 1)

        x = torch.randn(2, 3, 4, requires_grad=True)
        model = MyModule()
        onnx_test_common.run_model_test(self, model, input_args=(x, ))
예제 #20
0
def register_custom_op(is_ortmodule=False):
    """
    This function registers symbolic functions for
    custom ops that are implemented as part of ONNX Runtime
    """

    # Symbolic definition
    def inverse(g, self):
        return g.op("com.microsoft::Inverse", self)

    def gelu(g, self):
        return g.op("com.microsoft::Gelu", self)

    def triu(g, self, diagonal):
        return g.op("com.microsoft::Trilu", self, diagonal, upper_i=1)

    def tril(g, self, diagonal):
        return g.op("com.microsoft::Trilu", self, diagonal, upper_i=0)

    # Op Registration
    register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version)
    register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version)
    register_custom_op_symbolic('::triu', triu, _onnx_opset_version)
    register_custom_op_symbolic('::tril', tril, _onnx_opset_version)

    if is_ortmodule:

        @parse_args('v', 'v', 'i', 'b', 'b')
        def embedding(g, weight, indices, padding_idx, scale_grad_by_freq,
                      sparse):
            custom_attributes_json = (
                '{'
                f'"padding_idx":{str(padding_idx)},'
                f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
                f'"sparse":{str(sparse).lower()}'
                '}')
            return g.op("com.microsoft::ATenOp",
                        weight,
                        indices,
                        name_s='aten::embedding',
                        custom_attributes_json_s=custom_attributes_json)

        register_custom_op_symbolic('::embedding', embedding,
                                    _onnx_opset_version)
예제 #21
0
    def test_custom_opsets_inverse(self):
        class CustomInverse(torch.nn.Module):
            def forward(self, x):
                return torch.inverse(x) + x

        def inverse(g, self):
            return g.op("com.microsoft::Inverse", self).setType(self.type())

        register_custom_op_symbolic("::inverse", inverse, 1)
        model = CustomInverse()
        x = torch.randn(2, 3, 3)
        f = io.BytesIO()
        torch.onnx.export(model, (x, ), f,
                          opset_version=self.opset_version, custom_opsets={"com.microsoft": 1})

        graph = onnx.load(io.BytesIO(f.getvalue()))
        self.assertEqual(graph.graph.node[0].op_type, "Inverse")
        self.assertEqual(graph.opset_import[0].version, self.opset_version)
        self.assertEqual(graph.opset_import[1].domain, "com.microsoft")
        self.assertEqual(graph.opset_import[1].version, 1)
예제 #22
0
    def test_custom_add(self):
        op_source = """
        #include <torch/script.h>

        torch::Tensor custom_add(torch::Tensor self, torch::Tensor other) {
          return self + other;
        }

        static auto registry =
          torch::RegisterOperators("custom_namespace::custom_add", &custom_add);
        """

        torch.utils.cpp_extension.load_inline(
            name="custom_add",
            cpp_sources=op_source,
            is_python_module=False,
            verbose=True,
        )

        class CustomAddModel(torch.nn.Module):
            def forward(self, a, b):
                return torch.ops.custom_namespace.custom_add(a, b)

        def symbolic_custom_add(g, self, other):
            return g.op("Add", self, other)

        from torch.onnx import register_custom_op_symbolic

        register_custom_op_symbolic(
            "custom_namespace::custom_add", symbolic_custom_add, 9
        )

        x = torch.randn(2, 3, 4, requires_grad=False)
        y = torch.randn(2, 3, 4, requires_grad=False)

        model = CustomAddModel()
        onnxir, _ = do_export(model, (x, y), opset_version=11)
        onnx_model = onnx.ModelProto.FromString(onnxir)
        prepared = c2.prepare(onnx_model)
        caffe2_out = prepared.run(inputs=[x.cpu().numpy(), y.cpu().numpy()])
        np.testing.assert_array_equal(caffe2_out[0], model(x, y).cpu().numpy())
예제 #23
0
    def export_onnx_model(self, output_dir, verbose=False):
        from torch.onnx import register_custom_op_symbolic
        from torch.onnx.symbolic_helper import parse_args
        import sys
        torch_version = torch.__version__.split('.')
        if int(torch_version[0]) == 1 and int(torch_version[1]) < 7:
            NndctScreenLogger().error(
                f'Only supprt exporting onnx model with pytorch 1.7 and later version'
            )
            return

        @parse_args("v", "i", "i", "f", "i", "i", "i", "i")
        def symbolic_fix_neuron(g, input, valmin, valmax, valamp, zero_point,
                                method, device_id, inplace):
            #print(f'{valmax} {valamp} {method} {device_id}')
            if valamp < sys.float_info.min:
                scale = torch.tensor(sys.float_info.max).float(
                )  # Avoid exportor generating double type
            else:
                scale = torch.tensor(
                    1.0 /
                    valamp).float()  # Avoid exportor generating double type
            zero_point = torch.tensor(
                0, dtype=torch.int8)  # ONNX requires zero_point to be tensor
            return g.op("DequantizeLinear",
                        g.op("QuantizeLinear", input, scale, zero_point),
                        scale, zero_point)

        register_custom_op_symbolic("vai::fix_neuron", symbolic_fix_neuron, 9)
        output_file = os.path.join(
            output_dir, f"{self.quantizer.quant_model._get_name()}_int.onnx")
        opset_version = torch.onnx.symbolic_helper._onnx_stable_opsets[-1]
        device = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_DEVICE)
        self.quantizer.reset_status_for_exporting()
        model, input_args = to_device(self.quantizer.quant_model,
                                      self._example_inputs, device)
        torch.onnx.export(self.quantizer.quant_model,
                          input_args,
                          output_file,
                          verbose=verbose,
                          opset_version=opset_version)
예제 #24
0
 def __init__(self,
              filename: str,
              image_size: int,
              input_dtype: str = 'uint8',
              n_channels=3,
              n_batch=1,
              opset_version=11,
              **kwargs):
     super(OnnxExporter, self).__init__(
         image_size=image_size,
         input_dtype=input_dtype,
         filename=filename,
         n_channels=n_channels,
         n_batch=n_batch,
     )
     self.export_args = kwargs
     self.opset_version = opset_version
     self.export_args.update({'opset_version': opset_version})
     """
     adapted from
     https://github.com/pytorch/vision/blob/74679cc566f98398db13df0312cc11188733f1f3/torchvision/ops/_register_onnx_ops.py#L7
     extend opset version
     """
     opset_version = [9, 10]
     for _onnx_opset_version in opset_version:
         register_custom_op_symbolic('torchvision::nms',
                                     type(self).symbolic_multi_label_nms,
                                     _onnx_opset_version)
         register_custom_op_symbolic('torchvision::roi_align',
                                     type(self).roi_align,
                                     _onnx_opset_version)
         register_custom_op_symbolic('torchvision::roi_pool',
                                     type(self).roi_pool,
                                     _onnx_opset_version)
예제 #25
0
    def test_register_custom_op(self):
        class MyClip(torch.autograd.Function):
            @staticmethod
            def forward(ctx, input, scalar):
                ctx.save_for_backward(input)
                return input.clamp(min=scalar)

        class MyRelu(torch.autograd.Function):
            @staticmethod
            def forward(ctx, input):
                ctx.save_for_backward(input)
                return input.clamp(min=0)

        class MyModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.clip = MyClip.apply
                self.relu = MyRelu.apply

            def forward(self, x):
                h = self.clip(x, 2)
                h = self.relu(h)
                return h

        def symbolic_pythonop(g, n, *args, **kwargs):
            name = kwargs['name']
            if name == "MyClip":
                return g.op("Clip", args[0], min_f=args[1])
            elif name == "MyRelu":
                return g.op("Relu", args[0])
            else:
                return _unimplemented("prim::PythonOp",
                                      "unknown node kind: " + name)

        from torch.onnx import register_custom_op_symbolic
        register_custom_op_symbolic('::prim_PythonOp', symbolic_pythonop, 1)

        x = torch.randn(2, 3, 4, requires_grad=True)
        model = MyModule()
        run_model_test(self, model, input=(x, ))
예제 #26
0
    def test_aten_embedding_1(self):
        _onnx_opset_version = 12

        @parse_args("v", "v", "i", "b", "b")
        def embedding(g, weight, indices, padding_idx, scale_grad_by_freq,
                      sparse):
            custom_attributes_json = (
                "{"
                f'"padding_idx":{str(padding_idx)},'
                f'"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},'
                f'"sparse":{str(sparse).lower()}'
                "}")
            output = g.at(
                "embedding",
                weight,
                indices,
                custom_attributes_json_s=custom_attributes_json,
            )
            return output

        register_custom_op_symbolic("::embedding", embedding,
                                    _onnx_opset_version)

        class Model(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.emb = torch.nn.Embedding(4, 8)

            def forward(self, x, y):
                res = self.emb(x)
                res = res + y
                return torch.ones(res.shape[0])

        model = Model()
        x = torch.ones(32, dtype=torch.long)
        y = torch.randn(1, 8)
        self.assertONNX(model, (x, y), opset_version=_onnx_opset_version)

        unregister_custom_op_symbolic("::embedding", _onnx_opset_version)
def register_onnx_ops():
    def convolution_sampler_ops(g, input1, input2, kH, kW, patchH, patchW,
                                padH, padW, dilationH, dilationW,
                                dilation_patchH, dilation_patchW, dH, dW):
        return g.op("thales::correlation_layer",
                    input1,
                    input2,
                    kH_i=kH.node()['value'].item(),
                    kW_i=kW.node()['value'].item(),
                    patchH_i=patchH.node()['value'].item(),
                    patchW_i=patchW.node()['value'].item(),
                    padH_i=padH.node()['value'].item(),
                    padW_i=padW.node()['value'].item(),
                    dilationH_i=dilationH.node()['value'].item(),
                    dilationW_i=dilationW.node()['value'].item(),
                    dilation_patchH_i=dilation_patchH.node()['value'].item(),
                    dilation_patchW_i=dilation_patchW.node()['value'].item(),
                    dH_i=dH.node()['value'].item(),
                    dW_i=dW.node()['value'].item())

    from torch.onnx import register_custom_op_symbolic
    register_custom_op_symbolic("correlation_sampler::correlation_forward",
                                convolution_sampler_ops, 1)
def enable_custom_autograd_support():
    # Initialize static objects needed to run custom autograd.Function's.

    from onnxruntime.capi._pybind_state import register_forward_runner, register_backward_runner, unregister_python_functions
    from torch.onnx import register_custom_op_symbolic
    from ._custom_autograd_function_exporter import _export
    from ._custom_autograd_function_runner import call_python_forward_function, call_python_backward_function
    import atexit

    register_forward_runner(call_python_forward_function)
    register_backward_runner(call_python_backward_function)

    # Unregister all python functions automatically upon normal interpreter termination.
    atexit.register(unregister_python_functions)

    try:
        # This is for the latest Pytorch nightly after this commit:
        # https://github.com/pytorch/pytorch/commit/11bc435622e6b7207bbf37ed1aafe999e1f296ec
        register_custom_op_symbolic('prim::PythonOp', _export, 1)
    except:
        # This applies to Pytorch 1.9 and 1.9.1.
        register_custom_op_symbolic('::prim_PythonOp', _export, 1)

    custom_autograd_function_enabler.state = True
# For inference of the onnx model, you will need onnxruntime-gpu 1.6.0 (or nightly build).

import torch
import numpy as np
import argparse
import transformers
from torch.onnx import register_custom_op_symbolic
from torch.onnx.symbolic_helper import parse_args
from packaging import version

@parse_args('v', 'v', 'v', 'v','v', 'v', 'v', 'i', 'i')
def my_longformer_attention(g, input, weight, bias, mask, global_weight, global_bias, global_mask, num_heads, window):
  return g.op("com.microsoft::LongformerAttention", input, weight, bias, mask, global_weight, global_bias, global_mask, num_heads_i=num_heads, window_i=window)

# namespace is onnxruntime which is registered in longformer_attention.cpp
register_custom_op_symbolic('onnxruntime::LongformerAttention', my_longformer_attention, 9)

# TODO: update the path according to output of "python setup.py install" when your python version is not 3.6
torch.ops.load_library(r'build/lib.linux-x86_64-3.6/longformer_attention.cpython-36m-x86_64-linux-gnu.so')

# mapping from model name to pretrained model name
MODELS = {
    "longformer-base-4096": "allenai/longformer-base-4096",
    "longformer-random-tiny": "patrickvonplaten/longformer-random-tiny"  # A tiny model for debugging
}

is_debug = False

def parse_arguments():
    parser = argparse.ArgumentParser()
예제 #30
0
def _register_custom_op():
    from torch.onnx.symbolic_helper import parse_args, scalar_type_to_onnx
    from torch.onnx.symbolic_opset9 import select, unsqueeze, squeeze, _cast_Long, reshape

    @parse_args('v', 'v', 'f')
    def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
        boxes = unsqueeze(g, boxes, 0)
        scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
        max_output_per_class = g.op('Constant',
                                    value_t=torch.tensor([sys.maxsize],
                                                         dtype=torch.long))
        iou_threshold = g.op('Constant',
                             value_t=torch.tensor([iou_threshold],
                                                  dtype=torch.float))
        nms_out = g.op('NonMaxSuppression', boxes, scores,
                       max_output_per_class, iou_threshold)
        return squeeze(
            g,
            select(
                g, nms_out, 1,
                g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))),
            1)

    @parse_args('v', 'v', 'f', 'i', 'i', 'i')
    def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width,
                  sampling_ratio):
        batch_indices = _cast_Long(
            g,
            squeeze(
                g,
                select(
                    g, rois, 1,
                    g.op('Constant',
                         value_t=torch.tensor([0], dtype=torch.long))), 1),
            False)
        rois = select(
            g, rois, 1,
            g.op('Constant',
                 value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
        return g.op('RoiAlign',
                    input,
                    rois,
                    batch_indices,
                    spatial_scale_f=spatial_scale,
                    output_height_i=pooled_height,
                    output_width_i=pooled_width,
                    sampling_ratio_i=sampling_ratio)

    @parse_args('v', 'v', 'f', 'i', 'i')
    def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
        roi_pool = g.op('MaxRoiPool',
                        input,
                        rois,
                        pooled_shape_i=(pooled_height, pooled_width),
                        spatial_scale_f=spatial_scale)
        return roi_pool, None

    from torch.onnx import register_custom_op_symbolic
    register_custom_op_symbolic('torchvision::nms', symbolic_multi_label_nms,
                                10)
    register_custom_op_symbolic('torchvision::roi_align', roi_align, 10)
    register_custom_op_symbolic('torchvision::roi_pool', roi_pool, 10)