Example #1
0
def _make_module(in_shape, ratio):
    x = helper.make_tensor_value_info('x', TensorProto.FLOAT, in_shape)
    y = helper.make_tensor_value_info('y', TensorProto.FLOAT, in_shape)
    z = helper.make_tensor_value_info('z', TensorProto.FLOAT, in_shape)

    add = onnx.helper.make_node(
        'Add',
        inputs=['x', 'y'],
        outputs=['sum'],
    )

    dropout = onnx.helper.make_node(
        'Dropout',
        inputs=['sum'],
        outputs=['z'],
        ratio=ratio,
    )

    graph_def = helper.make_graph([add, dropout], 'test-model', [x, y], [z])

    op = onnx.OperatorSetIdProto()
    op.version = 10
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])

    return model_def
Example #2
0
 def graph_def_to_onnx_model(
     cls,
     graph_def,
     input_names=None,
     output_names=None,
     input_shapes=None,
     constants=None,
     value_info=None,
     opset_version=None,
     workspace=None,
     verbose=True,
     enable_onnx_checker=True,
 ):
     opset_id = onnx.OperatorSetIdProto()
     opset_id.domain = ''  # ONNX default domain
     opset_id.version = cls._check_opset_version(opset_version)
     model = helper.make_model(
         cls.graph_def_to_onnx_graph(
             graph_def,
             input_names,
             output_names,
             input_shapes,
             constants,
             value_info,
             opset_id.version,
             workspace,
             verbose,
         ),
         opset_imports=[opset_id],  # Current supported opset version
         producer_name='onnx-dragon',  # Producer name
     )
     if enable_onnx_checker:
         onnx.checker.check_model(model)
     return model
Example #3
0
def _make_module(in_shape, in_type, out_type, op_version):
    attributes_dict = {}
    inputs = []
    initializers = []
    nodes = []

    attributes_dict['to'] = str(out_type) if op_version == 1 else out_type

    input = helper.make_tensor_value_info('input', in_type, in_shape)
    output = helper.make_tensor_value_info('output', out_type, in_shape)
    inputs.append(input)

    input_name = 'input'
    if out_type in [TensorProto.UINT8, TensorProto.INT32]:
        tensor = helper.make_tensor(
            'preprocess',
            in_type,
            dims=in_shape,
            vals=(np.random.rand(*in_shape) * 100).astype(
                onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[in_type]).flatten().tolist(
                ))
        mul = helper.make_node("Constant",
                               inputs=[],
                               outputs=["mul_const"],
                               value=tensor,
                               name='mul_constant')
        nodes.append(mul)
        pre_node = onnx.helper.make_node(
            'Mul',
            inputs=[input_name, 'mul_const'],
            outputs=['end_preprocess'],
        )
        nodes.append(pre_node)
        input_name = 'end_preprocess'

    node = onnx.helper.make_node('Cast',
                                 inputs=[input_name],
                                 outputs=['output'],
                                 **attributes_dict)
    nodes.append(node)
    # inputs.append(input_name)

    graph_def = helper.make_graph(
        nodes,
        'test-cast-model',
        inputs,
        [output],
    )

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])
    return model_def
Example #4
0
def _caffe2_net_to_onnx_model(predict_net, init_net, value_info):
    graph = caffe2.python.onnx.frontend.caffe2_net_to_onnx_graph(
        predict_net, init_net, value_info)
    if not graph.name:
        graph.name = 'Graph'

    opset_id = onnx.OperatorSetIdProto()
    opset_id.domain = ''
    opset_id.version = 11
    model = onnx.helper.make_model(graph, opset_imports=[opset_id])
    onnx.checker.check_model(model)
    return model
Example #5
0
    def test_pooling(self):
        for op in ["MaxPool", "GlobalAveragePool"]:
            B = helper.make_tensor_value_info('B', TensorProto.FLOAT,
                                              [1, 1, 5, 5])
            A = helper.make_tensor_value_info('A', TensorProto.FLOAT,
                                              [1, 1, 5, 5])
            a_value = np.random.randn(1, 1, 5, 5).astype(np.float32)
            A_init = helper.make_tensor('A', TensorProto.FLOAT, [1, 1, 5, 5],
                                        a_value.reshape(25).tolist())
            node = onnx.helper.make_node(op, ['A'], ['B'],
                                         name=op,
                                         kernel_shape=[3, 3],
                                         pads=[1, 1, 1, 1])
            graph = helper.make_graph([node], 'test_graph_1', [A], [B],
                                      [A_init])
            q_config = {op: self.q_config}
            quantize_params = {
                "A": [np.float32(10.), np.uint8(0)],
                "B": [np.float32(10.), np.uint8(0)]
            }
            quantizable_op_types = [op]
            for opset_version in [12, 13]:
                opset = onnx.OperatorSetIdProto()
                opset.version = opset_version
                model = helper.make_model(graph, opset_imports=[opset])
                self.static_test(model, q_config, quantize_params,
                                 quantizable_op_types)

            A = helper.make_tensor_value_info('A', TensorProto.FLOAT,
                                              [1, 1, 5, 5])
            B = helper.make_tensor_value_info('B', TensorProto.FLOAT,
                                              [1, 1, 3, 3])
            D = helper.make_tensor_value_info('D', TensorProto.FLOAT,
                                              [1, 1, 5, 5])
            conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'],
                                              name='Conv',
                                              kernel_shape=[3, 3],
                                              pads=[1, 1, 1, 1])
            pool_node = onnx.helper.make_node(op, ['C'], ['D'], name=op)
            graph = helper.make_graph([conv_node, pool_node], 'test_graph_1',
                                      [A, B], [D])
            model = helper.make_model(graph)

            q_config = {"Conv": self.q_config, op: self.q_config}
            quantize_params = {
                "A": [np.float32(10.), np.uint8(0)],
                "B": [np.float32(10.), np.uint8(0)],
                "C": [np.float32(10.), np.uint8(0)],
                "D": [np.float32(10.), np.uint8(0)]
            }
            quantizable_op_types = ["Conv", op]
            self.static_test(model, q_config, quantize_params,
                             quantizable_op_types)
Example #6
0
def _make_module(in_shape, axes, op_version):

    x = helper.make_tensor_value_info('x', TensorProto.FLOAT, in_shape)
    y = helper.make_tensor_value_info('y', TensorProto.FLOAT, in_shape)
    add = onnx.helper.make_node(
        'Add',
        inputs=['x', 'y'],
        outputs=['sum'],
    )

    initializers = []

    # infer output shape
    out = np.ones(in_shape)
    out_shape = np.expand_dims(out, axis=axes).shape
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           out_shape)

    # unsqueeze-1 and unsqueeze-11
    if op_version == 11:
        unsqueeze = onnx.helper.make_node('Unsqueeze',
                                          inputs=['sum'],
                                          outputs=['output'],
                                          axes=axes)
    else:
        # unsqueeze-13
        axes_len = []
        axes_len.append(len(axes))
        axes = onnx.helper.make_tensor('axes', onnx.TensorProto.INT64,
                                       axes_len,
                                       np.array(axes).astype(np.int64))
        initializers.append(axes)
        unsqueeze = onnx.helper.make_node(
            'Unsqueeze',
            inputs=['sum', 'axes'],
            outputs=['output'],
        )

    graph_def = helper.make_graph([add, unsqueeze],
                                  'test-model', [x, y], [output],
                                  initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])

    return model_def
Example #7
0
def graph_to_file(graph: xpb2.GraphProto,
                  filename: str,
                  _producer: str = "sclblonnx",
                  onnx_opset_version=12,
                  **kwargs):
    """ graph_to_file stores an onnx graph to a .onnx file

    Stores a graph to a file

    Args:
        graph: An onnx graph
        filename: The filename of the resulting file
        _producer: Optional string with producer name. Default 'sclblonnx'
        onnx_opset_version: Optional version number for ONNX opset. Default 12
    Returns:
        True if successful, False otherwise.
    """
    if not filename:
        _print("Unable to save: Please specify a filename.")
        return False

    if type(graph) is not xpb2.GraphProto:
        _print("Unable to save: Graph is not an ONNX graph")

    try:
        if not 'opset_imports' in kwargs:
            op = onnx.OperatorSetIdProto()
            op.version = onnx_opset_version
            mod = xhelp.make_model(graph,
                                   producer_name=_producer,
                                   opset_imports=[op],
                                   **kwargs)
        else:
            mod = xhelp.make_model(graph, producer_name=_producer, **kwargs)
    except Exception as e:
        print("Unable to convert graph to model: " + str(e))
        return False

    try:
        xsave(mod, filename, **kwargs)
    except Exception as e:
        print("Unable to save the model: " + str(e))
        return False

    return True
Example #8
0
def _make_module(in_shape, axis, op_version):
    inputs = []
    outputs = []
    initializers = []
    attributes_dict = {}
    nodes = []

    # input
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    inputs.append('input')

    # output
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT, in_shape)
    outputs.append('output')

    # axis
    if axis is not None:
        attributes_dict['axis'] = axis

    # Softmax node
    node = onnx.helper.make_node(
        'Softmax',
        inputs=inputs,
        outputs=outputs,
        **attributes_dict
    )
    nodes.append(node)

    graph_def = helper.make_graph(
        nodes,
        'test-model',
        [input],
        [output],
        initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def, producer_name='onnx', opset_imports=[op])

    return model_def
Example #9
0
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import onnx
from onnx import helper, TensorProto

INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1])
INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1])
OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1])

nodes = [
    helper.make_node(
        'Mul',
        ['input1', 'input2'],
        ['output'],
    ),
]
graph_def = helper.make_graph(
    nodes,
    'mul',
    [
        INPUT_1,
        INPUT_2
    ],
    [OUTPUT],
)
model_def = helper.make_model(graph_def, producer_name='mul.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'mul.onnx')
Example #10
0
def _make_module(in_shape, padding, constant_value, mode, op_version,
                 value_format):

    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)

    initializers = None
    nodes = []

    out_shape = in_shape.copy()
    out_shape[2] += padding[2] + padding[6]
    out_shape[3] += padding[3] + padding[7]

    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           out_shape)

    if op_version == 1:
        if constant_value is None:
            node = onnx.helper.make_node('Pad',
                                         inputs=['input'],
                                         outputs=['output'],
                                         mode=mode,
                                         paddings=padding)
        else:
            node = onnx.helper.make_node('Pad',
                                         inputs=['input'],
                                         outputs=['output'],
                                         mode=mode,
                                         paddings=padding,
                                         value=constant_value)
    elif op_version == 2:
        if constant_value is None:
            node = onnx.helper.make_node('Pad',
                                         inputs=['input'],
                                         outputs=['output'],
                                         mode=mode,
                                         pads=padding)
        else:
            node = onnx.helper.make_node('Pad',
                                         inputs=['input'],
                                         outputs=['output'],
                                         mode=mode,
                                         pads=padding,
                                         value=constant_value)
    else:
        # opset 11/13
        initializers = []
        dims_list = []
        dims_list.append(len(padding))
        pads = helper.make_tensor("pads",
                                  TensorProto.INT64,
                                  dims=dims_list,
                                  vals=padding)

        if value_format == 'initializer':
            initializers.append(pads)
        else:
            pads_node = helper.make_node('Constant',
                                         inputs=[],
                                         outputs=['pads'],
                                         value=pads)
            nodes.append(pads_node)

        inputs = ['input', 'pads']
        if constant_value is not None:
            cv_list = []
            cv_list.append(constant_value)
            cv = helper.make_tensor("constant_value",
                                    TensorProto.FLOAT,
                                    dims=[1],
                                    vals=cv_list)

            initializers.append(cv)
            inputs.append('constant_value')

        node = onnx.helper.make_node('Pad',
                                     inputs=inputs,
                                     outputs=['output'],
                                     mode=mode)

    nodes.append(node)

    graph_def = helper.make_graph(nodes,
                                  'test-model', [input], [output],
                                  initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])

    return model_def
Example #11
0
def check(graph: xpb2.GraphProto,
          _producer: str = "sclblonnx",
          _onnx_check: bool = True,
          _sclbl_check: bool = True,
          _verbose: bool = True,
          **kwargs):
    """ check whether or not an existing graph can be converted using the Scailable platform

    We assume that a user will use graph_to_file() in this package to store the model. This

     Args:
        graph: an ONNX graph
        _producer: String optional
        _onnx_check: Bool, default True. Run ONNX checker.check().
        _sclbl_check: Bool, default True.  Run Scailable checks.
        _verbose: Print user feedback; default True (note, errors are always printed).
        **kwargs

    Returns:
        True if the graph passes all the test. False otherwise.
    """
    # Check if this is a valid graph:
    if type(graph) is not xpb2.GraphProto:
        _print("Graph is not a valid ONNX graph.")
        return False

    # Convert to model:
    try:
        if not 'opset_imports' in kwargs:
            op = onnx.OperatorSetIdProto()
            op.version = 12
            mod = xhelp.make_model(graph,
                                   producer_name=_producer,
                                   opset_imports=[op],
                                   **kwargs)
        else:
            mod = xhelp.make_model(graph, producer_name=_producer, **kwargs)
    except Exception as e:
        _print("Unable to create the model: " + str(e))
        return False

    # Standard ONNX checking:
    if _onnx_check and False:
        try:
            checker.check_model(mod, **kwargs)
        except Exception as e:
            _print("Model fails on standard ONNX checker: " + str(e))
            return False

    if _sclbl_check:

        # User feedback
        _print(
            "Running Scailable specific checks for WASM conversion. \nUse _sclbl_check=False to turn off",
            "MSG", (not _verbose))

        # input / output checking:
        if not graph.input:
            _print("This graph does not contain any inputs.")
            return False

        if not graph.output:
            _print("This graph does not contain any outputs.")
            return False

        # Sclbl checking:
        if not glob.ONNX_VERSION_INFO:
            if not _load_version_info():
                _print("Unable to load the ONNX_VERSION INFO.")

        # Check general ONNX version:
        if version.parse(xversion) < version.parse(
                glob.ONNX_VERSION_INFO['onnx_version']['version_min']):
            _print(
                "Your current onnx version is lower then our support minimum. Please update your ONNX to {}"
                .format(glob.ONNX_VERSION_INFO['onnx_version']['version_min']))
            return False

        if version.parse(xversion) > version.parse(
                glob.ONNX_VERSION_INFO['onnx_version']['version_max']):
            _print(
                "Your current onnx version is higher then our support max. Please downgrade your ONNX version to {}"
                .format(glob.ONNX_VERSION_INFO['onnx_version']['version_max']))
            return False

        if mod.ir_version < glob.ONNX_VERSION_INFO['onnx_version'][
                'ir_version_min']:
            _print(
                "Your current IR version is lower then our support minimum. Please update to {}"
                .format(
                    glob.ONNX_VERSION_INFO['onnx_version']['ir_version_min']))
            return False

        if mod.ir_version > glob.ONNX_VERSION_INFO['onnx_version'][
                'ir_version_max']:
            _print(
                "Your current IR version is higher then our support max. Please downgrade to {}"
                .format(
                    glob.ONNX_VERSION_INFO['onnx_version']['ir_version_max']))
            return False

        # Interate through opset and check:
        for key in mod.opset_import:
            v = key.version
            if v < glob.ONNX_VERSION_INFO['onnx_version']['opset_min']:
                _print(
                    "One or more operators use an opset version that is too low. Please update to {}"
                    .format(
                        glob.ONNX_VERSION_INFO['onnx_version']['opset_min']))
                return False

            if v > glob.ONNX_VERSION_INFO['onnx_version']['opset_max']:
                _print(
                    "One or more operators use an opset version that is too high. Please downgrade to {}"
                    .format(
                        glob.ONNX_VERSION_INFO['onnx_version']['opset_max']))
                return False

        # Check individual nodes:
        not_supported = []
        for n in graph.node:
            op = n.op_type
            if op not in glob.ONNX_VERSION_INFO['operators']:
                not_supported.append(op)
        if not_supported:
            _print("The operator(s) {} are currently not supported.".format(
                not_supported))
            return False

        # Check dynamic
        for inputs in graph.input:
            if not inputs.type.tensor_type.shape.dim:
                _print(
                    "Your graph contains dynamically sized inputs, this is currently not supported."
                )
                return False
            for elem in inputs.type.tensor_type.shape.dim:
                if elem.dim_value == 0 or elem.dim_value == "":
                    _print(
                        "Your graph contains dynamically size inputs, this is currently not supported."
                    )

    if not _sclbl_check and not _onnx_check:
        _print("Set _sclbl_check or _onnx_check to True to run any checks.")

    _print("Your graph was successfully checked.", "MSG", (not _verbose))
    return True
Example #12
0
def _make_module(in_shape, axis, repeat, op_version):
    inputs = []
    outputs = []
    initializers = []
    attributes_dict = {}
    nodes = []

    # input
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    inputs.append('input')

    # other inputs according to op_version
    if (op_version == 1):
        # tile
        tiles_tensor = helper.make_tensor('tiles',
                                          TensorProto.FLOAT,
                                          dims=[1],
                                          vals=repeat)
        inputs.append('tiles')
        tiles_node = helper.make_node('Constant',
                                      inputs=[],
                                      outputs=['tiles'],
                                      value=tiles_tensor)
        nodes.append(tiles_node)

        # axis
        axis_tensor = helper.make_tensor('axis',
                                         TensorProto.FLOAT,
                                         dims=[1],
                                         vals=axis)
        inputs.append('axis')
        axis_node = helper.make_node('Constant',
                                     inputs=[],
                                     outputs=['axis'],
                                     value=axis_tensor)
        nodes.append(axis_node)

    else:
        # op_version 6/11
        # repeats
        repeats_tensor = helper.make_tensor('repeats',
                                            TensorProto.INT64,
                                            dims=[len(repeat)],
                                            vals=repeat)
        initializers.append(repeats_tensor)
        inputs.append('repeats')

    # output
    if op_version == 1:
        out_shape = copy.deepcopy(in_shape)
        out_shape[axis[0]] *= repeat[0]
    else:
        out_shape = np.tile(np.ones(in_shape), tuple(repeat)).shape
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           out_shape)
    outputs.append('output')

    node = onnx.helper.make_node('Tile',
                                 inputs=inputs,
                                 outputs=outputs,
                                 **attributes_dict)
    nodes.append(node)

    graph_def = helper.make_graph(nodes,
                                  'test-model', [input], [output],
                                  initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='onnx',
                                  opset_imports=[op])

    return model_def
Example #13
0
def _make_module(in_shape, start, end, axes, step, outshape, op_version,
                 value_format):
    input_names = []
    output_names = []
    inputs = []
    outputs = []
    initializers = []
    nodes = []
    attributes_dict = {}

    # input
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    input_names.append('input')
    inputs.append(input)

    # output
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           outshape)
    output_names.append('output')
    outputs.append(output)

    # opset 1
    if op_version == 1:
        if axes is not None:
            attributes_dict['axes'] = axes

        attributes_dict['starts'] = start
        attributes_dict['ends'] = end
    else:
        # opset 10/11/13
        # starts
        start_tensor = helper.make_tensor('starts',
                                          TensorProto.INT64,
                                          dims=[len(start)],
                                          vals=start)

        if value_format == 'initializer':
            initializers.append(start_tensor)
        else:
            start_node = helper.make_node('Constant',
                                          inputs=[],
                                          outputs=['starts'],
                                          value=start_tensor)
            nodes.append(start_node)

        input_names.append('starts')

        # ends
        end_tensor = helper.make_tensor('ends',
                                        TensorProto.INT64,
                                        dims=[len(end)],
                                        vals=end)

        if value_format == 'initializer':
            initializers.append(end_tensor)
        else:
            end_node = helper.make_node('Constant',
                                        inputs=[],
                                        outputs=['ends'],
                                        value=end_tensor)
            nodes.append(end_node)

        input_names.append('ends')

        # axes
        if axes is not None:
            axes_tensor = helper.make_tensor('axes',
                                             TensorProto.INT64,
                                             dims=[len(end)],
                                             vals=axes)

            if value_format == 'initializer':
                initializers.append(axes_tensor)
            else:
                axes_node = helper.make_node('Constant',
                                             inputs=[],
                                             outputs=['axes'],
                                             value=axes_tensor)
                nodes.append(axes_node)

            input_names.append('axes')

        # steps
        if step is not None:
            step_tensor = helper.make_tensor('steps',
                                             TensorProto.INT64,
                                             dims=[len(step)],
                                             vals=step)

            if value_format == 'initializer':
                initializers.append(step_tensor)
            else:
                step_node = helper.make_node('Constant',
                                             inputs=[],
                                             outputs=['steps'],
                                             value=step_tensor)
                nodes.append(step_node)

            input_names.append('steps')

    slice_node = onnx.helper.make_node('Slice',
                                       inputs=input_names,
                                       outputs=output_names,
                                       **attributes_dict)
    nodes.append(slice_node)

    graph_def = helper.make_graph(nodes,
                                  'test-model',
                                  inputs,
                                  outputs,
                                  initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])

    return model_def
Example #14
0
def _make_module(in_shape, minimum, maximum, op_version, value_format):

    inputs = []
    outputs = []
    initializers = []
    nodes = []
    attributes_dict = {}

    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    inputs.append('input')

    output = helper.make_tensor_value_info('output', TensorProto.FLOAT, in_shape)
    outputs.append('output')

    # opset 1/6
    if op_version == 1 or op_version == 6:
        if minimum is not None:
            attributes_dict['min'] = minimum

        if maximum is not None:
            attributes_dict['max'] = maximum
    else:
        # opset 11/12/13
        if minimum is not None:
            mins = []
            mins.append(minimum)
            min_tensor = helper.make_tensor(
                'min',
                TensorProto.FLOAT,
                dims=[1],
                vals=mins
            )
            if value_format == 'initializer':
                initializers.append(min_tensor)
            else:
                min_node = helper.make_node(
                    'Constant',
                    inputs=[],
                    outputs=['min'],
                    value=min_tensor)
                nodes.append(min_node)
            inputs.append('min')

        if maximum is not None:
            maxes = []
            maxes.append(maximum)
            max_tensor = helper.make_tensor(
                'max',
                TensorProto.FLOAT,
                dims=[1],
                vals=maxes
            )
            if value_format == 'initializer':
                initializers.append(max_tensor)
            else:
                max_node = helper.make_node(
                    'Constant',
                    inputs=[],
                    outputs=['max'],
                    value=max_tensor)
                nodes.append(max_node)
            inputs.append('max')

    clip = onnx.helper.make_node(
        'Clip',
        inputs=inputs,
        outputs=outputs,
        **attributes_dict
    )
    nodes.append(clip)

    graph_def = helper.make_graph(
        nodes,
        'test-model',
        [input],
        [output],
        initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def, producer_name='kendryte', opset_imports=[op])

    return model_def
Example #15
0
import onnx
from onnx import helper, TensorProto

IN8 = helper.make_tensor_value_info('in8', TensorProto.INT8, [3])
IN16 = helper.make_tensor_value_info('in16', TensorProto.BFLOAT16, [3])
OUT8 = helper.make_tensor_value_info('out8', TensorProto.INT8, [3])
OUT16 = helper.make_tensor_value_info('out16', TensorProto.BFLOAT16, [3])

nodes = [
    helper.make_node(
        'Cast',
        ['in8'],
        ['out16'],
        to=getattr(TensorProto, 'BFLOAT16')
    ),
    helper.make_node(
        'Cast',
        ['in16'],
        ['out8'],
        to=getattr(TensorProto, 'INT8')
    ),
]
graph_def = helper.make_graph(
    nodes,
    'unstable_types',
    [IN8, IN16],
    [OUT8, OUT16],
)
model_def = helper.make_model(graph_def, producer_name='unstable_types.py', opset_imports=[onnx.OperatorSetIdProto(version=13)])
onnx.save(model_def, 'unstable_types.onnx')
Example #16
0
import onnx

model = onnx.load('resource/YOLOv4/yolov4tiny_1x416x416xBGRxByte.onnx')
op = onnx.OperatorSetIdProto()
op.version = 12
update_model = onnx.helper.make_model(model.graph, opset_imports=[op])
onnx.save(update_model, 'resource/YOLOv4/yolov4tiny_1x416x416xBGRxByte.opset12.onnx')
Example #17
0
def _make_module(in_shape, axis, split, output_size, op_version, value_format):
    input_names = []
    output_names = []
    inputs = []
    outputs = []
    initializers = []
    nodes = []
    attributes_dict = {}

    # input
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    input_names.append('input')
    inputs.append(input)

    # output
    out_shape = copy.deepcopy(in_shape)
    dim = axis if axis is not None else 0
    for i in range(output_size):
        output_name = 'output_{0}'.format(i)
        output_names.append(copy.deepcopy(output_name))
        out_shape[dim] = split[
            i] if split is not None else in_shape[dim] // output_size
        output = helper.make_tensor_value_info(output_name, TensorProto.FLOAT,
                                               out_shape)
        outputs.append(copy.deepcopy(output))

    if axis is not None:
        attributes_dict['axis'] = axis

    # opset 1/2/11
    if op_version == 1 or op_version == 2 or op_version == 11:
        if split is not None:
            attributes_dict['split'] = split
    else:
        # opset 13
        if split is not None:
            split_tensor = helper.make_tensor('split',
                                              TensorProto.INT64,
                                              dims=[len(split)],
                                              vals=split)

            if value_format == 'initializer':
                initializers.append(split_tensor)
            else:
                const_node = helper.make_node('Constant',
                                              inputs=[],
                                              outputs=['split'],
                                              value=split_tensor)
                nodes.append(const_node)

            input_names.append('split')

    split_node = onnx.helper.make_node('Split',
                                       inputs=input_names,
                                       outputs=output_names,
                                       **attributes_dict)
    nodes.append(split_node)

    graph_def = helper.make_graph(nodes,
                                  'test-model',
                                  inputs,
                                  outputs,
                                  initializer=initializers)

    op = onnx.OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])

    return model_def
Example #18
0
def clean(graph: xpb2.GraphProto,
          _optimize: bool = True,
          _simplify: bool = True,
          _remove_initializer: bool = True,
          _producer: str = "sclblonnx",
          _verbose: bool = True,
          **kwargs):
    """ clean cleans an ONNX graph using onnx tooling

    This method will attempt to clean the supplied graph by
    a. Removing initializers from input
    b. Optimizing it using onnxoptimizer.optimize
    c. Simplifying it using onnxsim.simplify

    If one of these fails the method will print an error message and return the unaltered graph.

    Args:
        graph: An ONNX graph
        _optimize: Boolean, default True. Optimize the model using onnxoptimizer.
        _simplify: Boolean, default True. Simplify the model using simplify.
        _remove_initializer: Boolean, default True. Remove initializers from input.
        _producer: Optional string with producer name. Default 'sclblonnx' (used for internal conversion)
        _verbose: Print user feedback; default True (note, errors are always printed).
        **kwargs

    Returns:
        The cleaned ONNX graph, or the old graph if an error occurs.
    """
    try:
        if not 'opset_imports' in kwargs:
            op = onnx.OperatorSetIdProto()
            op.version = 12
            mod = xhelp.make_model(graph,
                                   producer_name=_producer,
                                   opset_imports=[op],
                                   **kwargs)
        else:
            mod = xhelp.make_model(graph, producer_name=_producer, **kwargs)
    except Exception as e:
        _print("Unable to create the model: " + str(e))
        return graph

    if _optimize:
        try:
            mod = onnxoptimizer.optimize(mod, glob.OPTIMIZER_PASSES, **kwargs)
        except Exception as e:
            _print("Unable to optimize your model: " + str(e))
            return graph

    if _simplify:
        try:
            mod, _ = simplify(mod, **kwargs)
        except Exception as e:
            _print("Unable to simplify your model: " + str(e))
            return graph

    # From: onnxruntime/tools/python/remove_initializer_from_input.py
    graph = mod.graph
    if _remove_initializer:
        inputs = graph.input
        name_to_input = {}
        for input in inputs:
            name_to_input[input.name] = input
        for initializer in graph.initializer:
            if initializer.name in name_to_input:
                inputs.remove(name_to_input[initializer.name])

    _print("The graph was successfully cleaned.", "MSG", (not _verbose))
    return graph
Example #19
0
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import onnx
from onnx import helper, TensorProto

INPUT_1 = helper.make_tensor_value_info('bar', TensorProto.FLOAT, [1])
INPUT_2 = helper.make_tensor_value_info('baz', TensorProto.FLOAT, [1])
OUTPUT = helper.make_tensor_value_info('foo', TensorProto.FLOAT, [1])

nodes = [
    helper.make_node(
        'Mul',
        ['bar', 'baz'],
        ['foo'],
    ),
]
graph_def = helper.make_graph(
    nodes,
    'mul',
    [INPUT_1, INPUT_2],
    [OUTPUT],
)
model_def = helper.make_model(
    graph_def,
    producer_name='barfoo.py',
    opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'barfoo.onnx')
Example #20
0
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import onnx
from onnx import helper, TensorProto

IN = helper.make_tensor_value_info('in', TensorProto.FLOAT, [7])
OUT = helper.make_tensor_value_info('out', TensorProto.INT8, [7])

nodes = [
    helper.make_node(
        'Cast',
        ['in'],
        ['out'],
        to=getattr(TensorProto, 'INT8'),
    ),
]
graph_def = helper.make_graph(
    nodes,
    'float_to_int8',
    [IN],
    [OUT],
)
model_def = helper.make_model(graph_def, producer_name='float_to_int8.py', opset_imports=[onnx.OperatorSetIdProto(version=13)])
onnx.save(model_def, 'float_to_int8.onnx')