Beispiel #1
0
def make_tensor(name, data_type, dims, vals, raw=False):
    '''
    Make a TensorProto with specified arguments.  If raw is False, this
    function will choose the corresponding proto field to store the
    values based on data_type. If raw is True, use "raw_data" proto
    field to store the values, and values should be of type bytes in
    this case.
    '''
    tensor = TensorProto()
    tensor.data_type = data_type
    tensor.name = name

    if data_type == TensorProto.STRING:
        assert not raw, "Can not use raw_data to store string type"
        tensor.string_data.extend(vals)

    if (data_type == TensorProto.COMPLEX64
            or data_type == TensorProto.COMPLEX128):
        vals = split_complex_to_pairs(vals)
    if raw:
        tensor.raw_data = vals
    else:
        field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
            mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
        getattr(tensor, field).extend(vals)

    tensor.dims.extend(dims)
    return tensor
Beispiel #2
0
def make_tensor(
        name: Text,
        data_type: int,
        dims: Sequence[int],
        vals: Any,
        raw: bool = False
) -> TensorProto:
    '''
    Make a TensorProto with specified arguments.  If raw is False, this
    function will choose the corresponding proto field to store the
    values based on data_type. If raw is True, use "raw_data" proto
    field to store the values, and values should be of type bytes in
    this case.

    Arguments:
        name (string): tensor name
        data_type (int): a value such as onnx.TensorProto.FLOAT
        dims (List[int]): shape
        vals: values
        raw (bool): if True, vals contains the seralized content of the tensor,
            otherwise, vals should be a list of values of the type defined by *data_type*

    Returns:
        TensorProto
    '''
    tensor = TensorProto()
    tensor.data_type = data_type
    tensor.name = name

    if data_type == TensorProto.STRING:
        assert not raw, "Can not use raw_data to store string type"

    # Check number of vals specified equals tensor size
    expected_size = 1 if (not raw) else (mapping.TENSOR_TYPE_TO_NP_TYPE[data_type].itemsize)
    # Flatten a numpy array if its rank > 1
    if type(vals) is np.ndarray and len(vals.shape) > 1:
        vals = vals.flatten()
    for d in dims:
        expected_size = expected_size * d

    if len(vals) != expected_size:
        raise ValueError("Number of values does not match tensor's size. Expected {}, but it is {}. "
            .format(expected_size, len(vals)))

    if raw:
        tensor.raw_data = vals
    else:
        if (data_type == TensorProto.COMPLEX64
                or data_type == TensorProto.COMPLEX128):
            vals = split_complex_to_pairs(vals)
        # floa16/bfloat16 are stored as uint16
        elif (data_type == TensorProto.FLOAT16
                or data_type == TensorProto.BFLOAT16):
            vals = np.array(vals).astype(np.float16).view(dtype=np.uint16).flatten().tolist()
        field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
            mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
        getattr(tensor, field).extend(vals)
    tensor.dims.extend(dims)
    return tensor
Beispiel #3
0
def from_array(
        arr,
        name=None):  # type: (np.ndarray[Any], Optional[Text]) -> TensorProto
    """Converts a numpy array to a tensor def.

    Inputs:
        arr: a numpy array.
        name: (optional) the name of the tensor.
    Returns:
        tensor_def: the converted tensor def.
    """
    tensor = TensorProto()
    tensor.dims.extend(arr.shape)
    if name:
        tensor.name = name

    if arr.dtype == object:
        # Special care for strings.
        tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
        # TODO: Introduce full string support.
        # We flatten the array in case there are 2-D arrays are specified
        # We throw the error below if we have a 3-D array or some kind of other
        # object. If you want more complex shapes then follow the below instructions.
        # Unlike other types where the shape is automatically inferred from
        # nested arrays of values, the only reliable way now to feed strings
        # is to put them into a flat array then specify type astype(object)
        # (otherwise all strings may have different types depending on their length)
        # and then specify shape .reshape([x, y, z])
        flat_array = arr.flatten()
        for e in flat_array:
            if isinstance(e, text_type):
                tensor.string_data.append(e.encode('utf-8'))
            elif isinstance(e, np.ndarray):
                for s in e:
                    if isinstance(s, text_type):
                        tensor.string_data.append(s.encode('utf-8'))
                    elif isinstance(s, bytes):
                        tensor.string_data.append(s)
            elif isinstance(e, bytes):
                tensor.string_data.append(e)
            else:
                raise NotImplementedError(
                    "Unrecognized object in the object array, expect a string, or array of bytes: ",
                    str(type(e)))
        return tensor

    # For numerical types, directly use numpy raw bytes.
    try:
        dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
    except KeyError:
        raise RuntimeError("Numpy data type not understood yet: {}".format(
            str(arr.dtype)))
    tensor.data_type = dtype
    tensor.raw_data = arr.tobytes()  # note: tobytes() is only after 1.9.
    if sys.byteorder == 'big':
        # Convert endian from big to little
        convert_endian(tensor)

    return tensor
Beispiel #4
0
def make_external_tensor(name, data_type, dims, raw_data=None, **kwargs):
    tensor = TensorProto()
    tensor.data_type = data_type
    tensor.name = name
    tensor.dims.extend(dims)
    if raw_data is not None:
        tensor.raw_data = raw_data
    external_data_helper.set_external_data(tensor, **kwargs)
    order_repeated_field(tensor.external_data, 'key', kwargs.keys())
    return tensor
Beispiel #5
0
def create_onnx_components():
    """创建onnx的基本组件:node, graph, model,可以看到onnx是如何本onnx.proto文件对应的
    参考:https://github.com/onnx/onnx/blob/master/onnx/examples/Protobufs.ipynb
    """
    # ------ 创建int变量: 传入数值和描述即可 ------ 
    arg1 = helper.make_attribute("this is INT", 64)
    
    arg2 = helper.make_attribute("this is float/1", 3.14)
    
    arg3 = helper.make_attribute("this is STRING", "helloworld")
    
    arg4 = helper.make_attribute("this is INTS", [1,2,3,4])
    
    # ------ 创建TensorProto ------
    tensor0 = helper.make_tensor_value_info()   # ?
    
    array1 = np.array([[1,2,3],[4,5,6]])
    tensor1 = numpy_helper.from_array(array1)   # 从numpy获取tensorProto
    
    with open('ts.pb', 'wb') as f:
        f.write(tensor1.SerializeToString())     # 保存tensorProto
    tensor2 = TensorProto()
    with open('ts.pb', 'rb') as f:
        tensor2.ParseFromString(f.read())       # 读取tensorProto
    with
    
    
    # ------ 创建node ------ 
    node1 = helper.make_node("Relu", ["X"], ["Y"])  # op_type="Relu"
    
    node2 = helper.make_node("Conv", ["X", "W", "Y"], kernel=3, stride=1, pad=1)
    print(node2)
    print(helper.printable_node(node2))    # 这就是常看到的onnx形式:%Y = Conv[]
    
    # ------ 创建graph ------ 
    node_list = []
    arg_list = []
    graph1 = helper.make_graph(
    [
        helper.make_node("FC", ["X", "W1", "B1"], ["H1"]),
        helper.make_node("Relu", ["H1"], ["R1"]),
        helper.make_node("FC", ["R1", "W2", "B2"], ["Y"]),
    ],
    "MLP",
    [
        helper.make_tensor_value_info('X' , TensorProto.FLOAT, [1]),
        helper.make_tensor_value_info('W1', TensorProto.FLOAT, [1]),
        helper.make_tensor_value_info('B1', TensorProto.FLOAT, [1]),
        helper.make_tensor_value_info('W2', TensorProto.FLOAT, [1]),
        helper.make_tensor_value_info('B2', TensorProto.FLOAT, [1]),
    ],
    [
        helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1]),
    ])
Beispiel #6
0
    def test_check_string_tensor(self):
        tensor = TensorProto()
        tensor.data_type = TensorProto.STRING
        tensor.dims.append(1)
        tensor.string_data.append('Test'.encode('utf-8'))
        checker.check_tensor(tensor)

        del tensor.string_data[:]
        tensor.raw_data = 'Test'.encode('utf-8')
        # string data should not be stored in raw_data field
        self.assertRaises(checker.ValidationError, checker.check_tensor, tensor)
Beispiel #7
0
    def test_get_inputs(self):
        model = OnnxModel(model_proto=ModelProto(
            graph=GraphProto(initializer=[TensorProto(name='y')],
                             input=[
                                 ValueInfoProto(name='x'),
                                 ValueInfoProto(name='y'),
                                 ValueInfoProto(name='z')
                             ])),
                          input_data_formats=[None, None])

        self.assertEqual(model.get_inputs(),
                         [ValueInfoProto(name='x'),
                          ValueInfoProto(name='z')])
Beispiel #8
0
def load_checkpoint_to_model(path_to_checkpoint, model):
    """Loads the checkpoint to an onnx inference model."""

    # Load the parameters from the checkpoint
    parameters = _internal_load_checkpoint(path_to_checkpoint)

    parameters_dict = {}
    for param in parameters:
        param_proto = TensorProto()
        param_proto.ParseFromString(param)
        parameters_dict[param_proto.name] = param_proto

    for initializer in model.graph.initializer:
        initializer.CopyFrom(parameters_dict[initializer.name])
Beispiel #9
0
def make_tensor(name: str, vals: np.ndarray) -> ITensorProto:
    """
    Make a TensorProto with specified arguments.  If raw is False, this
    function will choose the corresponding proto field to store the
    values based on data_type. If raw is True, use "raw_data" proto
    field to store the values, and values should be of type bytes in
    this case.
    """
    vals = vals.astype(np.float32)

    tensor = TensorProto()
    tensor.data_type = DataType.FLOAT
    tensor.name = name
    tensor.raw_data = vals.tobytes()
    tensor.dims.extend(vals.shape)
    return tensor
Beispiel #10
0
def make_tensor(
        name,  # type: Text
        data_type,  # type: int
        dims,  # type: Sequence[int]
        vals,  # type: Any
        raw=False  # type: bool
):  # type: (...) -> TensorProto
    '''
    Make a TensorProto with specified arguments.  If raw is False, this
    function will choose the corresponding proto field to store the
    values based on data_type. If raw is True, use "raw_data" proto
    field to store the values, and values should be of type bytes in
    this case.
    '''
    tensor = TensorProto()
    tensor.data_type = data_type
    tensor.name = name

    if data_type == TensorProto.STRING:
        assert not raw, "Can not use raw_data to store string type"

    # Check number of vals specified equals tensor size
    size = 1 if (not raw) else (
        mapping.TENSOR_TYPE_TO_NP_TYPE[data_type].itemsize)
    for d in dims:
        size = size * d
    if (len(vals) != size):
        raise ValueError("Number of values does not match tensor's size.")

    if (data_type == TensorProto.COMPLEX64
            or data_type == TensorProto.COMPLEX128):
        vals = split_complex_to_pairs(vals)

    if raw:
        tensor.raw_data = vals
    else:
        field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
            mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
        getattr(tensor, field).extend(vals)
    tensor.dims.extend(dims)
    return tensor
Beispiel #11
0
def from_array(
        arr,
        name=None):  # type: (np.ndarray[Any], Optional[Text]) -> TensorProto
    """Converts a numpy array to a tensor def.

    Inputs:
        arr: a numpy array.
        name: (optional) the name of the tensor.
    Returns:
        tensor_def: the converted tensor def.
    """
    tensor = TensorProto()
    tensor.dims.extend(arr.shape)
    if name:
        tensor.name = name

    if arr.dtype == np.object:
        # Special care for strings.
        tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
        for e in arr:
            if isinstance(e, text_type):
                tensor.string_data.append(e.encode('utf-8'))
            elif isinstance(e, np.ndarray):
                tensor.string_data.append(e.tobytes())
            else:
                raise NotImplementedError(
                    "Unrecognized object in the object array, expect a string, or array of bytes"
                )
        return tensor

    # For numerical types, directly use numpy raw bytes.
    try:
        dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
    except KeyError:
        raise RuntimeError("Numpy data type not understood yet: {}".format(
            str(arr.dtype)))
    tensor.data_type = dtype
    tensor.raw_data = arr.tobytes()  # note: tobytes() is only after 1.9.

    return tensor
Beispiel #12
0
def add_onnx_graph(scope, operator, container, onx):
    """
    Adds a whole ONNX graph to an existing one following
    :epkg:`skl2onnx` API assuming this ONNX graph implements
    an `operator <http://onnx.ai/sklearn-onnx/api_summary.html?
    highlight=operator#skl2onnx.common._topology.Operator>`_.

    :param scope: scope (to get unique names)
    :param operator: operator
    :param container: container
    :param onx: ONNX graph
    """
    graph = onx.graph
    name_mapping = {}
    node_mapping = {}
    for node in graph.node:
        name = node.name
        if name is not None:
            node_mapping[node.name] = _clean_initializer_name(node.name, scope)
        for o in node.input:
            name_mapping[o] = _clean_variable_name(o, scope)
        for o in node.output:
            name_mapping[o] = _clean_variable_name(o, scope)
    for o in graph.initializer:
        name_mapping[o.name] = _clean_operator_name(o.name, scope)

    inputs = [_copy_inout(o, scope, name_mapping[o.name]) for o in graph.input]
    outputs = [
        _copy_inout(o, scope, name_mapping[o.name]) for o in graph.output
    ]

    for inp, to in zip(operator.inputs, inputs):
        n = helper.make_node('Identity', [inp.onnx_name], [to.name],
                             name=_clean_operator_name('Identity', scope))
        container.nodes.append(n)

    for inp, to in zip(outputs, operator.outputs):
        n = helper.make_node('Identity', [inp.name], [to.onnx_name],
                             name=_clean_operator_name('Identity', scope))
        container.nodes.append(n)

    for node in graph.node:
        n = helper.make_node(
            node.op_type, [name_mapping[o] for o in node.input],
            [name_mapping[o] for o in node.output],
            name=node_mapping[node.name] if node.name else None,
            domain=node.domain if node.domain else None)
        n.attribute.extend(node.attribute)  # pylint: disable=E1101
        container.nodes.append(n)

    for o in graph.initializer:
        as_str = o.SerializeToString()
        tensor = TensorProto()
        tensor.ParseFromString(as_str)
        tensor.name = name_mapping[o.name]
        container.initializers.append(tensor)

    # opset
    for oimp in onx.opset_import:
        container.node_domain_version_pair_sets.add(
            (oimp.domain, oimp.version))
#!/usr/bin/env python

import os
import mxnet as mx
from mxnet.contrib import onnx as onnx_mxnet
import onnx
import numpy as np
from onnx import TensorProto
from onnx import numpy_helper

curr_dir = os.path.dirname(__file__)

sym, arg_params, aux_params = onnx_mxnet.import_model(curr_dir + "/model.onnx")

input_tensor = TensorProto()
with open(curr_dir + "/input_0.pb", 'rb') as proto_file:
    input_tensor.ParseFromString(proto_file.read())
input_array = numpy_helper.to_array(input_tensor)

x = mx.nd.array(input_array)

mod = mx.mod.Module(symbol=sym,
                    data_names=['0'],
                    context=mx.cpu(),
                    label_names=None)
mod.bind(for_training=False,
         data_shapes=[('0', (2, 4, 6, 6))],
         label_shapes=None)
mod.set_params(arg_params=arg_params, aux_params=aux_params)
mod.forward(mx.io.DataBatch([x]))
result = mod.get_outputs()[0].asnumpy()