def _make_module(in_shape, op_version: int, inverse: int):
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           in_shape)

    # NOTE the  a/b != b/a; a-0 != 0-a
    #  (the random data will be zero, can't not be divide)
    g = BinaryGraph(inverse)
    x = g.make_node('mul_0', input, vals=[1])
    x = g.make_node('div_0', x, vals=[1])
    x = g.make_node('mul_1', x, vals=[3])
    x = g.make_node('sub_0', x, vals=[0])
    x = g.make_node('add_0', x, vals=[-2])
    x = g.make_node('div_1', x, vals=[1])
    x = g.make_node('sub_1', x, vals=[0])
    x = g.make_node('mul_2', x, vals=[1])
    x = g.make_node('div_2', x, vals=[1])
    x = g.make_node('add_1', x, vals=[-2], final=True)

    graph_def = helper.make_graph(g.nodes,
                                  'test-model', [input], [output],
                                  initializer=g.initializers)
    op = OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])
    print(model_def)

    return model_def
def gen_scale_input(model_path):

    nodes = [
        helper.make_node("Mul", ["input_0", "scale"], ["scaled_input_0"],
                         "scale input_0"),
        helper.make_node(
            "MatMul",
            ["scaled_input_0", "input_1"],
            ["output_0"],
            "MatMul input_0 and input_1",
        ),
    ]

    initializers = [helper.make_tensor("scale", TensorProto.FLOAT, [1], [1.0])]

    inputs = [
        helper.make_tensor_value_info("input_0", TensorProto.FLOAT, []),
        helper.make_tensor_value_info("input_1", TensorProto.FLOAT, [1, "K"]),
    ]

    outputs = [
        helper.make_tensor_value_info("output_0", TensorProto.FLOAT, ["K"]),
    ]

    onnxdomain = OperatorSetIdProto()
    onnxdomain.version = 14
    onnxdomain.domain = ""
    opsets = [onnxdomain, msdomain]
    save(model_path, nodes, inputs, outputs, initializers, opsets)
Exemple #3
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs))
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX
     opset_id.version = cls._target_opset_version
     model.opset_import.extend([opset_id])
     checker.check_model(model)
     return model
Exemple #4
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX default domain
     opset_id.version = cls.target_opset_version
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
                        opset_imports=[opset_id],  # current supported opset version
                        producer_name='onnx-caffe2',  # producer name
                        )
     checker.check_model(model)
     return model
Exemple #5
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX default domain
     opset_id.version = cls.target_opset_version
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
                        opset_imports=[opset_id],  # current supported opset version
                        producer_name='onnx-caffe2',  # producer name
                        )
     checker.check_model(model)
     return model
Exemple #6
0
 def make_model(self, graph, producer_name="onnx-tests"):
     imp = OperatorSetIdProto()
     imp.version = self.config.opset
     model_proto = helper.make_model(graph,
                                     producer_name=producer_name,
                                     opset_imports=[imp])
     try:
         model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(
             self.config.opset, model_proto.ir_version)
     except:  # pylint: disable=bare-except
         pass
     return model_proto
Exemple #7
0
def make_opsetid(domain: Text, version: int) -> OperatorSetIdProto:
    """Construct an OperatorSetIdProto.

    Arguments:
        domain (string): The domain of the operator set id
        version (integer): Version of operator set id
    Returns:
        OperatorSetIdProto
    """
    opsetid = OperatorSetIdProto()
    opsetid.domain = domain
    opsetid.version = version
    return opsetid
Exemple #8
0
def make_operatorsetid(
        domain,  # type: Text
        version,  # type: int
):  # type: (...) -> OperatorSetIdProto
    """Construct an OperatorSetIdProto.

    Arguments:
        domain (string): The domain of the operator set id
        version (integer): Version of operator set id
    """
    operatorsetid = OperatorSetIdProto()
    operatorsetid.domain = domain
    operatorsetid.version = version
    return operatorsetid
def GenerateModel(model_name, has_casts=False):
    nodes = [  # SimplifiedLayerNorm subgraph
        helper.make_node("Pow", ["cast_A" if has_casts else "A", "pow_in_2"],
                         ["pow_out"], "pow"),
        helper.make_node("ReduceMean", ["pow_out"], ["rd2_out"],
                         "reduce",
                         axes=[-1],
                         keepdims=1),
        helper.make_node("Add", ["rd2_out", "const_e12"], ["add1_out"], "add"),
        helper.make_node("Sqrt", ["add1_out"], ["sqrt_out"], "sqrt"),
        helper.make_node("Div", ["cast_A" if has_casts else "A", "sqrt_out"],
                         ["div_out"], "div"),
        helper.make_node("Mul",
                         ["gamma", "cast_div_out" if has_casts else "div_out"],
                         ["C"], "mul"),
    ]

    if has_casts:
        nodes.extend([
            helper.make_node("Cast", ["A"], ["cast_A"], "cast A", to=1),
            helper.make_node("Cast", ["div_out"], ["cast_div_out"],
                             "cast_2",
                             to=10),
        ])

    initializers = [  # initializers
        helper.make_tensor('pow_in_2', TensorProto.FLOAT, [], [2]),
        helper.make_tensor('const_e12', TensorProto.FLOAT, [], [1e-12]),
        helper.make_tensor(
            'gamma', TensorProto.FLOAT16 if has_casts else TensorProto.FLOAT,
            [4], [1, 2, 3, 4]),
    ]

    input_type = TensorProto.FLOAT16 if has_casts else TensorProto.FLOAT
    output_type = TensorProto.FLOAT16 if has_casts else TensorProto.FLOAT

    graph = helper.make_graph(
        nodes,
        "SimplifiedLayerNorm",  #name
        [  # inputs
            helper.make_tensor_value_info('A', input_type, [16, 32, 4]),
        ],
        [  # outputs
            helper.make_tensor_value_info('C', output_type, [16, 32, 4]),
        ],
        initializers)

    onnxdomain = OperatorSetIdProto()
    onnxdomain.version = 12
    # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
    onnxdomain.domain = ""
    msdomain = OperatorSetIdProto()
    msdomain.version = 1
    msdomain.domain = "com.microsoft"
    opsets = [onnxdomain, msdomain]

    model = helper.make_model(graph, opset_imports=opsets)
    onnx.save(model, model_name)
def GenerateModel2(model_name):
    nodes = [  # LayerNormWithCast4 subgraph
        helper.make_node("Cast", ["A"], ["cast_A"], "cast1", to=1),
        helper.make_node("ReduceMean", ["cast_A"], ["rd1_out"],
                         "reduce",
                         axes=[-1]),
        helper.make_node("Sub", ["cast_A", "rd1_out"], ["sub1_out"], "sub1"),
        helper.make_node("Sub", ["cast_A", "rd1_out"], ["sub2_out"], "sub2"),
        helper.make_node("Pow", ["sub1_out", "pow_in_2"], ["pow_out"], "pow"),
        helper.make_node("ReduceMean", ["pow_out"], ["rd2_out"],
                         "reduce2",
                         axes=[-1]),
        helper.make_node("Add", ["rd2_out", "const_0"], ["add1_out"], "add"),
        helper.make_node("Sqrt", ["add1_out"], ["sqrt_out"], "sqrt"),
        helper.make_node("Div", ["sub2_out", "sqrt_out"], ["div_out"], "div"),
        helper.make_node("Cast", ["div_out"], ["cast_out"], "cast2", to=10),
        helper.make_node("Mul", ["gamma", "cast_out"], ["mul_out"], "mul"),
        helper.make_node("Add", ["beta", "mul_out"], ["C"], "add2"),
    ]

    initializers = [  # initializers
        helper.make_tensor("pow_in_2", TensorProto.FLOAT, [], [2]),
        helper.make_tensor("const_0", TensorProto.FLOAT, [], [0]),
        helper.make_tensor("gamma", TensorProto.FLOAT16, [4], [1, 2, 3, 4]),
        helper.make_tensor("beta", TensorProto.FLOAT16, [4], [1, 2, 3, 4]),
    ]

    graph = helper.make_graph(
        nodes,
        "LayerNormWithCast4",  # name
        [  # inputs
            helper.make_tensor_value_info("A", TensorProto.FLOAT16,
                                          [16, 32, 4]),
        ],
        [  # outputs
            helper.make_tensor_value_info("C", TensorProto.FLOAT16,
                                          [16, 32, 4]),
        ],
        initializers,
    )

    onnxdomain = OperatorSetIdProto()
    onnxdomain.version = 12
    # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
    onnxdomain.domain = ""
    msdomain = OperatorSetIdProto()
    msdomain.version = 1
    msdomain.domain = "com.microsoft"
    opsets = [onnxdomain, msdomain]

    model = helper.make_model(graph, opset_imports=opsets)
    onnx.save(model, model_name)
Exemple #11
0
 def singa_to_onnx_model(cls, inputs, y, model_name="sonnx"):
     """
     get onnx model from singa computational graph
     Args:
         inputs: a list of input tensors (each is initialized with a name)
     Args:
         y: a list of tensors, usually the outputs of the graph
     Returns: 
         the onnx model
     """
     opset_id = OperatorSetIdProto()
     opset_id.version = cls._target_opset_version
     model = helper.make_model(cls.singa_to_onnx_graph(inputs,
                                                       y,
                                                       model_name="sonnx"),
                               producer_name='sonnx',
                               opset_imports=[opset_id])
     # print('The model is:\n{}'.format(model))
     checker.check_model(model)
     return model
Exemple #12
0
def merge_model(yolo_0, yolo_1, name_output, target_opset):
    yolo_0_graph = yolo_0.graph
    yolo_1_graph = yolo_1.graph
    from keras2onnx.proto import helper, onnx_proto
    # Create a graph from its main components
    nodes = []
    nodes.extend(yolo_0_graph.node)
    nodes.extend(yolo_1_graph.node)
    nodes.append(helper.make_node('Identity', [yolo_0_graph.output[0].name], ['y1:01']))
    nodes.append(helper.make_node('Identity', [yolo_0_graph.output[1].name], ['y2:01']))
    nodes.append(helper.make_node('Identity', [yolo_0_graph.output[2].name], ['y3:01']))

    model_name = 'yolov3'
    inputs = []
    inputs.extend(yolo_0_graph.input)
    for input_ in yolo_1_graph.input:
        if input_.name == 'image_shape:01':
            inputs.extend([input_])
    outputs = []
    outputs.extend(yolo_1_graph.output)
    initializers = []
    initializers.extend(yolo_0_graph.initializer)
    initializers.extend(yolo_1_graph.initializer)

    graph = helper.make_graph(nodes, model_name, inputs, outputs, initializers)

    # Create model
    imp = OperatorSetIdProto()
    imp.version = target_opset
    onnx_model = helper.make_model(graph, opset_imports=[imp])

    # Add extra information
    from keras2onnx.common import utils
    onnx_model.ir_version = onnx_proto.IR_VERSION
    onnx_model.producer_name = utils.get_producer()
    onnx_model.producer_version = utils.get_producer_version()
    onnx_model.domain = utils.get_domain()
    onnx_model.model_version = utils.get_model_version()
    onnx_model.doc_string = ''

    onnx.save_model(onnx_model, name_output)
Exemple #13
0
    def make_model(self, doc, optimize=True):
        """
        Create final ModelProto for onnx from internal graph.
        Args:
            optimize: optimize graph via onnx
            doc: text for doc string of the model
            output_names: list of model outputs
        """
        graph = self.make_graph(doc)
        kwargs = {"producer_name": "tf2onnx", "producer_version": __version__}
        opsets = []
        imp = OperatorSetIdProto()
        imp.version = self._opset
        opsets.append(imp)
        if self._extra_opset is not None:
            opsets.extend(self._extra_opset)
        kwargs["opset_imports"] = opsets
        model_proto = helper.make_model(graph, **kwargs)

        # optimize the model proto
        if optimize:
            model_proto = optimizer.optimize(model_proto)
        return model_proto
Exemple #14
0
def infer_onnx_shape_dtype(node, opset_version, input_shapes, input_dtypes, initializers=None):
    """
    Infer shapes and dtypes for outputs of the node.
    Sometimes, shape inference needs the values of node's inputs, so initializers are used.
    """

    def build_onnx_op(node):
        """Build onnx op"""
        onnx_node = helper.make_node(node.type, node.input, node.output, name=node.name)
        # deal with attributes
        attr = []
        attr_graphs = node.get_body_graphs()
        if attr_graphs:
            for attr_name, sub_graph in attr_graphs.items():
                copied_sub_graph = copy.deepcopy(sub_graph)
                graph_proto = copied_sub_graph.make_graph("graph for " + node.name + " " + attr_name)
                attr.append(helper.make_attribute(attr_name, graph_proto))
        attr.extend([a for a in node.attr_onnx.values()])
        if attr:
            onnx_node.attribute.extend(attr)
        return onnx_node

    inputs = []
    outputs = []
    for inp, shape, dtype in zip(node.input, input_shapes, input_dtypes):
        inputs.append(utils.make_onnx_inputs_outputs(inp, dtype, shape))
    for output in node.output:
        outputs.append(utils.make_onnx_inputs_outputs(output, TensorProto.UNDEFINED, None))
    graph_proto = helper.make_graph([build_onnx_op(node)], "infer-graph", inputs, outputs, initializer=initializers)
    imp = OperatorSetIdProto()
    imp.version = opset_version
    model_proto = helper.make_model(graph_proto, opset_imports=[imp])

    inferred_model = None
    try:
        inferred_model = shape_inference.infer_shapes(model_proto)
    except Exception:  # pylint: disable=broad-except
        logger.warning(
            "ONNX Failed to infer shapes and dtypes for [%s, type: %s]",
            node.name, node.type, exc_info=1
        )
        return None, None

    shapes = {}
    dtypes = {}
    for output in inferred_model.graph.output:
        tensor_type = output.type.tensor_type
        if tensor_type.HasField("elem_type"):
            dtypes[output.name] = tensor_type.elem_type
        else:
            dtypes[output.name] = TensorProto.UNDEFINED
        # 0 in shapes of onnx means unknown which is -1 in our convertor
        if tensor_type.HasField("shape"):
            shapes[output.name] = [
                dim.dim_value if dim.dim_value != 0 else utils.ONNX_UNKNOWN_DIMENSION for dim in tensor_type.shape.dim
            ]
        else:
            shapes[output.name] = None
    output_shapes = []
    output_dtypes = []
    for output in node.output:
        if output in shapes:
            output_shapes.append(shapes[output])
        else:
            output_shapes.append(None)
        if output in dtypes:
            output_dtypes.append(dtypes[output])
        else:
            output_dtypes.append(TensorProto.UNDEFINED)
    return output_shapes, output_dtypes
Exemple #15
0
def make_opsetid(domain, version):  # type: (Text, int) -> OperatorSetIdProto
    opsetid = OperatorSetIdProto()
    opsetid.domain = domain
    opsetid.version = version
    return opsetid
Exemple #16
0
Y.type.tensor_type.shape.Clear()

nonzero = helper.make_node('NonZero', ['input'], ['nonzero'], name='nonzero')
transpose = helper.make_node('Transpose', ['nonzero'], ['transpose'], name='transpose', perm=[1,0])
gathernd = helper.make_node('GatherND', ['input', 'transpose'], ['output'], name='gathernd')

# Create the graph (GraphProto)
graph_def = helper.make_graph(
    [nonzero, transpose, gathernd],
    'nonzero_shape_setter_model',
    [X],
    [Y]
)

opsets = []
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 12
onnxdomain.domain = "" # Empty string implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)

msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = 'com.microsoft'

opsets.append(msdomain)
kwargs={}
kwargs['opset_imports'] = opsets

# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name='onnx-example', **kwargs)
onnx.save(model_def, 'nonzero_shape_setter.onnx')
Exemple #17
0
    def make_model(self, doc, input_names, output_names, optimize=True):
        """
        Create final ModelProto for onnx from internal graph.
        Args:
            optimize: optimize graph via onnx
            doc: text for doc string of the model
            input_names: list of model inputs
            output_names: list of model outputs
        """

        # create output_tensor_values
        output_tensor_values = []
        for name in output_names:
            dtype = self.get_dtype(name)
            if not dtype:
                raise ValueError("cannot found the output dtype for " + name)
            v = helper.make_tensor_value_info(name, dtype, self.get_shape(name))
            output_tensor_values.append(v)

        # update attributes
        ops = []
        all_inputs = set()
        for op in self.get_nodes():
            all_inputs |= set(op.input)
            onnx_op = op.op
            del onnx_op.attribute[:]
            attr = []
            for a in op.attr.values():
                if a.name in utils.ONNX_VALID_ATTRIBUTES:
                    attr.append(a)
            if attr:
                onnx_op.attribute.extend(attr)
            ops.append(onnx_op)

        # create input_tensor_values, initializers
        initializers = [i for i in list(self._initializers.values()) if i.name in all_inputs]
        input_with_initializers = []
        for initializer in initializers:
            shape = self.get_shape(initializer.name)
            if shape and list(shape) != initializer.dims:
                raise ValueError("initializer shape is inconsistent")
            val = helper.make_tensor_value_info(initializer.name, initializer.data_type, initializer.dims)
            input_with_initializers.append(val)
        input_with_initializers.extend(self.model_inputs)
        print(" create model proto")
        # create model proto
        graph = helper.make_graph(ops, "tf2onnx",
                                  input_with_initializers,
                                  output_tensor_values,
                                  initializer=initializers,
                                  doc_string=doc)

        kwargs = {"producer_name": "zjj",
                  "producer_version": __version__}
        opsets = []
        imp = OperatorSetIdProto()
        imp.version = self._opset
        opsets.append(imp)
        if self._extra_opset is not None:
            opsets.extend(self._extra_opset)
        kwargs["opset_imports"] = opsets

        model_proto = helper.make_model(graph, **kwargs)
        print(" optimize the model proto begin")
        # optimize the model proto
        if optimize:
            model_proto = optimizer.optimize(model_proto)

        print(" optimize the model proto  done ")
        return model_proto
Exemple #18
0
def make_opsetid(domain: Text, version: int) -> OperatorSetIdProto:
    opsetid = OperatorSetIdProto()
    opsetid.domain = domain
    opsetid.version = version
    return opsetid
Exemple #19
0
def make_opsetid(domain, version):
    opsetid = OperatorSetIdProto()
    opsetid.domain = domain
    opsetid.version = version
    return opsetid
Exemple #20
0
 def _get_opsets(self):
     opsets = []
     imp = OperatorSetIdProto()
     imp.version = self._opset_version
     opsets.append(imp)
     return opsets
Exemple #21
0
    def make_model(self, doc, output_names, optimize=True):
        """
        Create final ModelProto for onnx from internal graph.
        Args:
            optimize: optimize graph via onnx
            doc: text for doc string of the model
            output_names: list of model outputs
        """
        self.update_proto()

        # TODO: we'd want to do something like this so that transpose optimizer is active
        # for  all (unit) tests
        # if optimize:
        #    from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
        #    optimizer = TransposeOptimizer(self, False)
        #    optimizer.optimize()

        # create output_tensor_values
        output_tensor_values = []
        for name in output_names:
            if name in self._dtypes_override:
                dtype = self._dtypes_override[name]
            else:
                dtype = self.get_dtype(name)
            if not dtype:
                raise ValueError("cannot found the output dtype for " + name)
            v = helper.make_tensor_value_info(
                name, dtype, utils.make_onnx_shape(self.get_shape(name)))
            output_tensor_values.append(v)

        # update attributes
        ops = []
        all_inputs = set()
        for op in self.get_nodes():
            all_inputs |= set(op.input)
            onnx_op = op.op
            ops.append(onnx_op)

        # create input_tensor_values, initializers
        # if initializer is not used as input by any node, then it will be ignored
        initializers = [
            i for i in list(self._initializers.values())
            if i.name in all_inputs
        ]
        input_with_initializers = []
        for initializer in initializers:
            shape = self.get_shape(initializer.name)
            if shape and list(shape) != initializer.dims:
                raise ValueError("initializer shape is inconsistent for " +
                                 initializer.name)
            val = helper.make_tensor_value_info(
                initializer.name, initializer.data_type,
                utils.make_onnx_shape(initializer.dims))
            input_with_initializers.append(val)

        input_with_initializers.extend(list(self._model_inputs.values()))

        # create model proto
        graph = helper.make_graph(ops,
                                  "tf2onnx",
                                  input_with_initializers,
                                  output_tensor_values,
                                  initializer=initializers,
                                  doc_string=doc)

        kwargs = {"producer_name": "tf2onnx", "producer_version": __version__}
        opsets = []
        imp = OperatorSetIdProto()
        imp.version = self._opset
        opsets.append(imp)
        if self._extra_opset is not None:
            opsets.extend(self._extra_opset)
        kwargs["opset_imports"] = opsets
        model_proto = helper.make_model(graph, **kwargs)

        # optimize the model proto
        if optimize:
            model_proto = optimizer.optimize(model_proto)
        return model_proto
Exemple #22
0
def InferOnnxShapeDtype(node,
                        opset_version,
                        input_shapes,
                        input_dtypes,
                        initializers=None):
    """
    Infer shapes and dtypes for outputs of the node.
    Sometimes, shape inference needs the values of node's inputs, so initializers are used.
    """
    def BuildOnnxOp(node):
        """Build onnx op"""
        onnx_node = helper.make_node(
            node.op_type,
            node.input_tensor_names,
            node.output_tensor_names,
            name=node.name,
        )

        # # deal with attributes
        # attr = []
        # attr_graphs = node.get_body_graphs()
        # if attr_graphs:
        #     for attr_name, sub_graph in attr_graphs.items():
        #         copied_sub_graph = copy.deepcopy(sub_graph)
        #         graph_proto = copied_sub_graph.MakeGraph(
        #             "graph for " + node.name + " " + attr_name
        #         )
        #         attr.append(helper.make_attribute(attr_name, graph_proto))
        # attr.extend(node.attrs_onnx.values())
        # if attr:
        #     onnx_node.attribute.extend(attr)
        return onnx_node

    inputs = []
    outputs = []
    for inp, shape, dtype in zip(node.input_tensor_names, input_shapes,
                                 input_dtypes):
        inputs.append(util.MakeOnnxInputsOutputs(inp, dtype, shape))
    for output in node.output_tensor_names:
        outputs.append(
            util.MakeOnnxInputsOutputs(output, TensorProto.UNDEFINED, None))
    graph_proto = helper.make_graph([BuildOnnxOp(node)],
                                    "infer-graph",
                                    inputs,
                                    outputs,
                                    initializer=initializers)
    imp = OperatorSetIdProto()
    imp.version = opset_version
    model_proto = helper.make_model(graph_proto, opset_imports=[imp])

    inferred_model = None
    try:
        inferred_model = shape_inference.infer_shapes(model_proto)
    except Exception:
        print('error')
        return None, None
    shapes = {}
    dtypes = {}
    for output in inferred_model.graph.output:
        tensor_type = output.type.tensor_type
        if tensor_type.HasField("elem_type"):
            dtypes[output.name] = tensor_type.elem_type
        else:
            dtypes[output.name] = TensorProto.UNDEFINED
        # 0 in shapes of onnx means unknown which is -1 in our convertor
        #fixme:how to do if the dim is -1 originally
        if tensor_type.HasField("shape"):
            shapes[output.name] = [
                dim.dim_value
                if dim.dim_value != 0 else util.ONNX_UNKNOWN_DIMENSION
                for dim in tensor_type.shape.dim
            ]
        else:
            shapes[output.name] = None
    output_shapes = []
    output_dtypes = []
    for output in node.output_tensor_names:
        if output in shapes:
            output_shapes.append(shapes[output])
        else:
            output_shapes.append(None)
        if output in dtypes:
            output_dtypes.append(dtypes[output])
        else:
            output_dtypes.append(TensorProto.UNDEFINED)
    return output_shapes, output_dtypes
Exemple #23
0
    def make_model(self, doc, input_names, output_names, optimize=True):
        """
        Create final ModelProto for onnx from internal graph.
        Args:
            optimize: optimize graph via onnx
            doc: text for doc string of the model
            input_names: list of model inputs
            output_names: list of model outputs
        """

        # create output_tensor_values
        output_tensor_values = []
        for name in output_names:
            op = self.get_node_by_name(name)
            if op:
                dtype = op.dtype
                if not dtype:
                    continue
                v = helper.make_tensor_value_info(name, dtype, self.get_shape(name))
                output_tensor_values.append(v)

        # update attributes
        ops = []
        for op in self.get_nodes():
            onnx_op = op.op
            del onnx_op.attribute[:]
            attr = []
            for a in op.attr.values():
                if a.name in utils.ONNX_VALID_ATTRIBUTES:
                    attr.append(a)
            if attr:
                onnx_op.attribute.extend(attr)
            ops.append(onnx_op)

        # create input_tensor_values, initializers
        initializers = list(self._initializers.values())
        input_with_initializers = []
        for initializer in initializers:
            shape = self.get_shape(initializer.name)
            if shape and list(shape) != initializer.dims:
                raise ValueError("initializer shape is inconsistent")
            val = helper.make_tensor_value_info(initializer.name, initializer.data_type, initializer.dims)
            input_with_initializers.append(val)
        input_with_initializers.extend(self.model_inputs)

        # create model proto
        graph = helper.make_graph(ops, "tf2onnx",
                                  input_with_initializers,
                                  output_tensor_values,
                                  initializer=initializers,
                                  doc_string=doc)

        kwargs = {"producer_name": "tf2onnx",
                  "producer_version": __version__}
        if self._opset > 0:
            imp = OperatorSetIdProto()
            imp.version = self._opset
            kwargs["opset"] = imp

        model_proto = helper.make_model(graph, **kwargs)

        # optimize the model proto
        if optimize:
            optimized_model = optimizer.optimize(model_proto.SerializeToString(),
                                                 ["fuse_consecutive_transposes",
                                                  "fuse_transpose_into_gemm",
                                                  "eliminate_nop_transpose"])
            model_proto = ModelProto()
            model_proto.ParseFromString(optimized_model)
        return model_proto
def _make_module(in_shape, op_version: int):
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           in_shape)
    initializers = []

    # x*1
    Mul_9 = helper.make_node('Mul', ['input', 'Constant_0'], ['Mul_9'])
    Constant_0 = helper.make_tensor('Constant_0',
                                    TensorProto.FLOAT,
                                    dims=[1],
                                    vals=[100])
    initializers.append(Constant_0)

    # (x*1) + 3
    Add_14 = helper.make_node('Add', ['Mul_9', 'Constant_1'], ['tmp_4'])
    Constant_1 = helper.make_tensor('Constant_1',
                                    TensorProto.FLOAT,
                                    dims=[1],
                                    vals=[20])
    initializers.append(Constant_1)

    # clip((x*1) + 3 , 0 , 6)
    Clip_2 = helper.make_node('Clip', ['tmp_4', 'Constant_2', 'Constant_3'],
                              ['relu6_2.tmp_0'])
    Constant_2 = helper.make_tensor('Constant_2',
                                    TensorProto.FLOAT,
                                    dims=[],
                                    vals=[0])
    Constant_3 = helper.make_tensor('Constant_3',
                                    TensorProto.FLOAT,
                                    dims=[],
                                    vals=[6])
    initializers.extend([Constant_2, Constant_3])

    # clip((x*1) + 3 , 0 , 6) * 0.16
    Mul_10 = helper.make_node('Mul', ['relu6_2.tmp_0', 'Constant_4'],
                              ['Mul_10'])
    Constant_4 = helper.make_tensor('Constant_4',
                                    TensorProto.FLOAT,
                                    dims=[1],
                                    vals=[1 / 6])
    initializers.append(Constant_4)

    # clip((x*1) + 3 , 0 , 6) * 0.16 + 0.
    Add_15 = helper.make_node('Add', ['Mul_10', 'Constant_5'], ['output'])
    Constant_5 = helper.make_tensor('Constant_5',
                                    TensorProto.FLOAT,
                                    dims=[1],
                                    vals=[0])
    initializers.append(Constant_5)

    graph_def = helper.make_graph([Mul_9, Add_14, Clip_2, Mul_10, Add_15],
                                  'test-model', [input], [output],
                                  initializer=initializers)
    op = OperatorSetIdProto()
    op.version = op_version
    model_def = helper.make_model(graph_def,
                                  producer_name='kendryte',
                                  opset_imports=[op])

    return model_def
Exemple #25
0
def make_opsetid(domain, version):  # type: (Text, int) -> OperatorSetIdProto
    opsetid = OperatorSetIdProto()
    opsetid.domain = domain
    opsetid.version = version
    return opsetid