Exemplo n.º 1
0
def expand_out_dim(
    model: ModelProto,
    dim_idx: int,
    inplace: Optional[bool] = False,
) -> ModelProto:
    """Inserts an extra dimension with extent 1 to each output in the graph.

    Inserts an Unsqueeze node for each output. It can be used as a utility before merging graphs,
    for example when the second one expects a batch dimension.

    Arguments:
        model (ModelProto): Model
        dim_idx (int): Index of the dimension to be inserted.
                       A negative value means counting dimensions from the back.
        inplace (bool): If True, mutates the model directly.
                        Otherwise, a copy will be created
    """
    if type(model) is not ModelProto:
        raise ValueError("model argument is not an ONNX model")

    if not inplace:
        m = ModelProto()
        m.CopyFrom(model)
        model = m

    expand_out_dim_graph(
        model.graph,
        dim_idx,
        inplace=True  # No need to create a copy, since it's a new model
    )
    return model
Exemplo n.º 2
0
    def test_expand_out_dim(self) -> None:
        '''
        Tests expanding output dimensions. The resulting graph should have the same output names,
        but with one more dimension at the specified index.
        '''
        m1 = _load_model(m1_def)

        def _check_model(m1: ModelProto, m2: ModelProto, dim_idx: int) -> None:
            for out_g2, out_g1 in zip(m2.graph.output, m1.graph.output):
                self.assertEqual(out_g2.name, out_g1.name)
                self.assertEqual(out_g2.type.tensor_type.elem_type,
                                 out_g1.type.tensor_type.elem_type)
                expected_out_shape = _get_shape(out_g1)
                expected_out_shape.insert(dim_idx, 1)
                self.assertEqual(_get_shape(out_g2), expected_out_shape)

        for dim_idx in [0, 2, -1, -3]:
            m2 = compose.expand_out_dim(m1, dim_idx)
            _check_model(m1, m2, dim_idx)

        # Test inplace
        m2 = ModelProto()
        m2.CopyFrom(m1)
        dim_idx = 0
        compose.expand_out_dim(m2, dim_idx, inplace=True)
        _check_model(m1, m2, dim_idx)
Exemplo n.º 3
0
    def __init__(self, model: onnx.ModelProto):
        copy_model = ModelProto()
        copy_model.CopyFrom(model)
        self.model = copy_model

        self.input_tensors = get_input_tensors(model)
        self.value_info = {
            vi.name: vi
            for vi in list(self.model.graph.value_info) +
            list(self.model.graph.input) + list(self.model.graph.output)
        }

        # raise Exception if input_tensor is not defined in model.graph.input
        for input_tensor in self.input_tensors:
            if input_tensor not in [
                    input.name for input in self.model.graph.input
            ]:
                raise Exception(
                    'input_tensor: %s is not defined in model.graph.input' %
                    input_tensor)
Exemplo n.º 4
0
    def _test_add_prefix(self,
                         rename_nodes: bool = False,
                         rename_edges: bool = False,
                         rename_inputs: bool = False,
                         rename_outputs: bool = False,
                         rename_initializers: bool = False,
                         rename_value_infos: bool = False,
                         inplace: bool = False) -> None:
        m1 = _load_model(m1_def)

        prefix = 'pre/'

        if inplace:
            m2 = ModelProto()
            m2.CopyFrom(m1)
            compose.add_prefix(m2,
                               prefix,
                               rename_nodes=rename_nodes,
                               rename_edges=rename_edges,
                               rename_inputs=rename_inputs,
                               rename_outputs=rename_outputs,
                               rename_initializers=rename_initializers,
                               rename_value_infos=rename_value_infos,
                               inplace=True)
        else:
            m2 = compose.add_prefix(m1,
                                    prefix,
                                    rename_nodes=rename_nodes,
                                    rename_edges=rename_edges,
                                    rename_inputs=rename_inputs,
                                    rename_outputs=rename_outputs,
                                    rename_initializers=rename_initializers,
                                    rename_value_infos=rename_value_infos)
        g_in = m1.graph
        g_out = m2.graph

        if rename_edges or rename_inputs or rename_outputs or rename_initializers or rename_value_infos:
            name_mapping = {}

            # Rename inputs/outputs/edges. Propagate name changes from and to edges
            if rename_edges:
                for n in g_in.node:
                    for e in n.input:
                        name_mapping[e] = _prefixed(prefix, e)
                    for e in n.output:
                        name_mapping[e] = _prefixed(prefix, e)
            else:
                if rename_inputs:
                    for elem in g_in.input:
                        name_mapping[elem.name] = _prefixed(prefix, elem.name)
                if rename_outputs:
                    for elem in g_in.output:
                        name_mapping[elem.name] = _prefixed(prefix, elem.name)

            if rename_initializers:
                for init in g_in.initializer:
                    name_mapping[init.name] = _prefixed(prefix, init.name)
                for sparse_init in g_in.sparse_initializer:
                    name_mapping[sparse_init.values.name] = \
                        _prefixed(prefix, sparse_init.values.name)
                    name_mapping[sparse_init.indices.name] = \
                        _prefixed(prefix, sparse_init.indices.name)

            if rename_value_infos:
                for value_info in g_in.output:
                    name_mapping[value_info.name] = _prefixed(
                        prefix, value_info.name)

            for n1, n0 in zip(g_out.node, g_in.node):
                for e1, e0 in zip(n1.input, n0.input):
                    self.assertEqual(name_mapping.get(e0, e0), e1)
                for e1, e0 in zip(n1.output, n0.output):
                    self.assertEqual(name_mapping.get(e0, e0), e1)
            for i1, i0 in zip(g_out.input, g_in.input):
                self.assertEqual(name_mapping.get(i0.name, i0.name), i1.name)
            for o1, o0 in zip(g_out.output, g_in.output):
                self.assertEqual(name_mapping.get(o0.name, o0.name), o1.name)

            for init1, init0 in zip(g_out.initializer, g_in.initializer):
                self.assertEqual(name_mapping.get(init0.name, init0.name),
                                 init1.name)

            for sparse_init1, sparse_init0 in zip(g_out.sparse_initializer,
                                                  g_in.sparse_initializer):
                self.assertEqual(
                    name_mapping.get(sparse_init0.values.name,
                                     sparse_init0.values.name),
                    sparse_init1.values.name)
                self.assertEqual(
                    name_mapping.get(sparse_init0.indices.name,
                                     sparse_init0.indices.name),
                    sparse_init1.indices.name)

            for vi1, vi0 in zip(g_out.value_info, g_in.value_info):
                self.assertEqual(name_mapping.get(vi0.name, vi0.name),
                                 vi1.name)

            if rename_nodes:
                for n1, n0 in zip(g_out.node, g_in.node):
                    self.assertEqual(_prefixed(prefix, n0.name), n1.name)
Exemplo n.º 5
0
# 2. Remove their initializers
new_initializers = [
    init
    for init in model.graph.initializer
    if init.name not in nodes_to_remove and init.name not in inputs_to_remove
]

# 3. Remove nodes
new_nodes = [n for n in model.graph.node if n.name not in nodes_to_remove]


# Get Ouput Tensor Types to create ValueInfo for output info
# by running model on dummy input
temp_model = ModelProto()
temp_model.CopyFrom(model)
for i in new_output_names:
    op = ValueInfoProto()
    op.name = i
    temp_model.graph.output.append(op)
onnx.save(temp_model, "__temp.onnx")
sess = onnxruntime.InferenceSession("__temp.onnx")
sess_inps = sess.get_inputs()
input_dict = {}
for i in sess_inps:
    shape = fix_shape(i.shape, batch_size)
    typ = get_np_type_from_onnxruntime(i.type)
    input_dict[i.name] = np.random.rand(*shape).astype(typ)

output_tensors = sess.run(new_output_names, input_dict)
if os.path.exists("__temp.onnx"):
Exemplo n.º 6
0
def add_prefix(
    model: ModelProto,
    prefix: Text,
    rename_nodes: Optional[bool] = True,
    rename_edges: Optional[bool] = True,
    rename_inputs: Optional[bool] = True,
    rename_outputs: Optional[bool] = True,
    rename_initializers: Optional[bool] = True,
    rename_value_infos: Optional[bool] = True,
    rename_functions: Optional[bool] = True,
    inplace: Optional[bool] = False,
) -> ModelProto:
    """Adds a prefix to names of elements in a graph: nodes, edges, inputs, outputs,
    initializers, sparse initializer, value infos, and local functions.

    It can be used as a utility before merging graphs that have overlapping names.
    Empty names are not _prefixed.

    Arguments:
        model (ModelProto): Model
        prefix (Text): Prefix to be added to each name in the graph
        rename_nodes (bool): Whether to prefix node names
        rename_edges (bool): Whether to prefix node edge names
        rename_inputs (bool): Whether to prefix input names
        rename_outputs (bool): Whether to prefix output names
        rename_initializers (bool): Whether to prefix initializer and sparse initializer names
        rename_value_infos (bool): Whether to prefix value info nanes
        rename_functions (bool): Whether to prefix local function names
        inplace (bool): If True, mutates the model directly.
                        Otherwise, a copy will be created
    """
    if type(model) is not ModelProto:
        raise ValueError("model argument is not an ONNX model")

    if not inplace:
        m = ModelProto()
        m.CopyFrom(model)
        model = m

    add_prefix_graph(
        model.graph,
        prefix,
        rename_nodes=rename_nodes,
        rename_edges=rename_edges,
        rename_inputs=rename_inputs,
        rename_outputs=rename_outputs,
        rename_initializers=rename_initializers,
        rename_value_infos=rename_value_infos,
        inplace=True  # No need to create a copy, since it's a new model
    )

    if rename_functions:
        f_name_map = {}
        for f in model.functions:
            new_f_name = prefix + f.name
            f_name_map[f.name] = new_f_name
            f.name = new_f_name
        # Adjust references to local functions in other local function
        # definitions
        for f in model.functions:
            for n in f.node:
                if n.op_type in f_name_map:
                    n.op_type = f_name_map[n.op_type]
        # Adjust references to local functions in the graph
        for n in model.graph.node:
            if n.op_type in f_name_map:
                n.op_type = f_name_map[n.op_type]

    return model
Exemplo n.º 7
0
def merge_models(m1: ModelProto,
                 m2: ModelProto,
                 io_map: List[Tuple[Text, Text]],
                 inputs: Optional[List[Text]] = None,
                 outputs: Optional[List[Text]] = None,
                 prefix1: Optional[Text] = None,
                 prefix2: Optional[Text] = None,
                 name: Optional[Text] = None,
                 doc_string: Optional[Text] = None,
                 producer_name: Optional[Text] = 'onnx.compose.merge_models',
                 producer_version: Optional[Text] = "1.0",
                 domain: Optional[Text] = "",
                 model_version: Optional[int] = 1) -> ModelProto:
    """Combines two ONNX models into a single one.

    The combined model is defined by connecting the specified set of outputs/inputs.
    Those inputs/outputs not specified in the io_map argument will remain as
    inputs/outputs of the combined model.

    Both models should have the same IR version, and same operator sets imported.

    Arguments:
        m1 (ModelProto): First model
        m2 (ModelProto): Second model
        io_map (list of pairs of string): The pairs of names [(out0, in0), (out1, in1), ...]
                                          representing outputs of the first graph and inputs of the second
                                          to be connected
        inputs (list of string): Optional list of inputs to be included in the combined graph
                                 By default, all inputs not present in the ``io_map`` argument will be
                                 included in the combined model
        outputs (list of string): Optional list of outputs to be included in the combined graph
                                  By default, all outputs not present in the ``io_map`` argument will be
                                  included in the combined model
        prefix1 (string): Optional prefix to be added to all names in m1
        prefix2 (string): Optional prefix to be added to all names in m2
        name (string): Optional name for the combined graph
                       By default, the name is g1.name and g2.name concatenated with an undescore delimiter
        doc_string (string): Optional docstring for the combined graph
                             If not provided, a default docstring with the concatenation of g1 and g2 docstrings is used
        producer_name (string): Optional producer name for the combined model. Default: 'onnx.compose'
        producer_version (string): Optional producer version for the combined model. Default: "1.0"
        domain (string): Optional domain of the combined model. Default: ""
        model_version (int): Optional version of the graph encoded. Default: 1
    """
    if type(m1) is not ModelProto:
        raise ValueError("m1 argument is not an ONNX model")
    if type(m2) is not ModelProto:
        raise ValueError("m2 argument is not an ONNX model")

    if m1.ir_version != m2.ir_version:
        raise ValueError(
            f"IR version mismatch {m1.ir_version} != {m2.ir_version}."
            " Both models should have have the same IR version")
    ir_version = m1.ir_version

    opset_import_map: MutableMapping[Text, int] = {}
    opset_imports = \
        [entry for entry in m1.opset_import] + \
        [entry for entry in m2.opset_import]

    for entry in opset_imports:
        if entry.domain in opset_import_map:
            found_version = opset_import_map[entry.domain]
            if entry.version != found_version:
                raise ValueError(
                    "Can't merge two models with different operator set ids for a given domain. "
                    f"Got: {m1.opset_import} and {m2.opset_import}")
        else:
            opset_import_map[entry.domain] = entry.version

    # Prefixing names in the graph if requested, adjusting io_map accordingly
    if prefix1 or prefix2:
        if prefix1:
            m1_copy = ModelProto()
            m1_copy.CopyFrom(m1)
            m1 = m1_copy
            m1 = add_prefix(m1, prefix=prefix1)
        if prefix2:
            m2_copy = ModelProto()
            m2_copy.CopyFrom(m2)
            m2 = m2_copy
            m2 = add_prefix(m2, prefix=prefix2)
        io_map = [(prefix1 + io[0] if prefix1 else io[0],
                   prefix2 + io[1] if prefix2 else io[1]) for io in io_map]

    graph = merge_graphs(m1.graph,
                         m2.graph,
                         io_map,
                         inputs=inputs,
                         outputs=outputs,
                         name=name,
                         doc_string=doc_string)
    model = helper.make_model(graph,
                              producer_name=producer_name,
                              producer_version=producer_version,
                              domain=domain,
                              model_version=model_version,
                              opset_imports=opset_imports,
                              ir_version=ir_version)

    # Merging model metadata props
    model_props = {}
    for meta_entry in m1.metadata_props:
        model_props[meta_entry.key] = meta_entry.value
    for meta_entry in m2.metadata_props:
        if meta_entry.key in model_props:
            value = model_props[meta_entry.key]
            if value != meta_entry.value:
                raise ValueError(
                    "Can't merge models with different values for the same model metadata property."
                    f" Found: property = {meta_entry.key}, with values {value} and {meta_entry.value}."
                )
        else:
            model_props[meta_entry.key] = meta_entry.value
    helper.set_model_props(model, model_props)

    # Merging functions
    function_overlap = list(
        set([f.name
             for f in m1.functions]) & set([f.name for f in m2.functions]))
    if function_overlap:
        raise ValueError(
            "Can't merge models with overlapping local function names."
            " Found in both graphs: " + ', '.join(function_overlap))
    model.functions.MergeFrom(m1.functions)
    model.functions.MergeFrom(m2.functions)

    checker.check_model(model)
    return model