Exemplo n.º 1
0
def _copy_value_info_proto(new_name, obj):
    value_info = ValueInfoProto()
    value_info.name = new_name
    value_info.type.CopyFrom(obj.type)  # pylint: disable=E1101
    if obj.type.doc_string:
        value_info.doc_string = obj.type.doc_string
    return value_info
def _rename_graph_input(graph, old_name, new_name):
    """
    Renames an input and adds an *Identity* node
    to connect the dots.

    :param graph: ONNX graph
    :return: modified graph
    """
    inputs = []
    for i in graph.input:
        if old_name != i.name:
            inputs.append(i)
        else:
            value_info = ValueInfoProto()
            value_info.name = new_name
            value_info.type.CopyFrom(i.type)
            if i.type.doc_string:
                value_info.doc_string = i.type.doc_string
            inputs.append(value_info)
    nodes = list(graph.node)
    nodes.append(_make_node('Identity', [new_name], [old_name]))
    new_graph = make_graph(nodes, graph.name, inputs, graph.output,
                           graph.initializer)
    new_graph.value_info.extend(graph.value_info)
    return new_graph
Exemplo n.º 3
0
def _rename_graph_output(graph, old_name, new_name):
    """
    Renames an output and adds an *Identity* node
    to connect the dots.

    @param      graph       ONNX graph
    @return                 modified graph
    """
    outputs = []
    for o in graph.output:
        if old_name != o.name:
            outputs.append(o)
        else:
            value_info = ValueInfoProto()
            value_info.name = new_name
            value_info.type.CopyFrom(o.type)  # pylint: disable=E1101
            if o.type.doc_string:
                value_info.doc_string = o.type.doc_string
            outputs.append(value_info)
    nodes = list(graph.node)
    nodes.append(_make_node('Identity', [old_name], [new_name]))
    new_graph = make_graph(nodes, graph.name, graph.input, outputs,
                           graph.initializer)
    new_graph.value_info.extend(graph.value_info)  # pylint: disable=E1101
    return new_graph
Exemplo n.º 4
0
def get_io_shapes(model):
    """returns map io_name -> shape"""

    rv = {}

    intermediate_outputs = list(enumerate_model_node_outputs(model))

    initializers = [i.name for i in model.graph.initializer]
    inputs = [i for i in model.graph.input if i.name not in initializers]
    assert len(inputs) == 1

    t = inputs[0].type.tensor_type.elem_type
    assert t == onnx.TensorProto.FLOAT
    dtype = np.float32

    if dtype == np.float32:
        elem_type = onnx.TensorProto.FLOAT
    else:
        assert dtype == np.float64
        elem_type = onnx.TensorProto.DOUBLE

    # create inputs as zero tensors
    input_map = {}

    for inp in inputs:
        shape = tuple(d.dim_value if d.dim_value != 0 else 1
                      for d in inp.type.tensor_type.shape.dim)

        input_map[inp.name] = np.zeros(shape, dtype=dtype)

        # also save it's shape
        rv[inp.name] = shape

    new_out = []

    # add all old outputs
    for out in model.graph.output:
        new_out.append(out)

    for out_name in intermediate_outputs:
        if out_name in rv:  # inputs were already added
            continue

        # create new output
        #nt = onnx.TypeProto()
        #nt.tensor_type.elem_type = elem_type

        value_info = ValueInfoProto()
        value_info.name = out_name
        new_out.append(value_info)

    # ok run once and get all outputs
    graph = make_graph(model.graph.node, model.graph.name, model.graph.input,
                       new_out, model.graph.initializer)

    # this model is not a valud model since the outputs don't have shape type info...
    # but it still will execute! skip the check_model step
    new_onnx_model = make_model_with_graph(model, graph, check_model=False)

    sess = ort.InferenceSession(new_onnx_model.SerializeToString())

    res = sess.run(None, input_map)
    names = [o.name for o in sess.get_outputs()]
    out_map = {name: output for name, output in zip(names, res)}

    for out_name in intermediate_outputs:
        if out_name in rv:  # inputs were already added
            continue

        rv[out_name] = out_map[out_name].shape

    return rv
Exemplo n.º 5
0
def stan_select_model_inputs_outputs(model, dtype, inputs, outputs, io_shapes):
    """
    a modificiation of select_model_input_outputs from sklearn-on

    Takes a model and changes its inputs and outputs
    :param model: *ONNX* model
    :param inputs: new inputs
    :return: modified model
    The function removes unneeded nodes.
    """

    if dtype == np.float32:
        elem_type = onnx.TensorProto.FLOAT
    else:
        assert dtype == np.float64
        elem_type = onnx.TensorProto.DOUBLE

    if inputs is None:
        raise NotImplementedError("Parameter inputs cannot be empty.")
    if outputs is None:
        raise NotImplementedError("Parameter inputs cannot be empty.")

    if not isinstance(inputs, list):
        inputs = [inputs]

    if not isinstance(outputs, list):
        outputs = [outputs]

    ##########

    mark_var = {
    }  # keys are (input or node output) names, vals 1 = keep, 0 = delete

    for out in enumerate_model_node_outputs(model):
        mark_var[out] = 0

    for inp in model.graph.input:
        mark_var[inp.name] = 0

    for out in outputs:
        if out not in mark_var:
            raise ValueError(
                "Desired Output '{}' not found in model.".format(out))

    initializers = [i.name for i in model.graph.initializer]

    for inp in inputs:
        if inp not in mark_var:
            raise ValueError(
                "Desired Input '{}' not found in model.".format(inp))

        if inp not in initializers:
            mark_var[inp] = 1

    nodes = list(enumerate(model.graph.node))

    mark_op = {
    }  # these are the marks for the node indices, 1 = keep, 0 = delete
    for node in nodes:
        mark_op[node[0]] = 0

    # We mark all the nodes we need to keep.
    nb = 1  # number marked... used as a termination condition

    keep_initializers = []

    while nb > 0:
        nb = 0

        for index, node in nodes:

            if mark_op[index] == 1:  # node was already processed, skip
                continue

            mod = False  # is this a newly-marked node?

            node_initializers = []

            for inp in node.input:
                if inp in outputs:
                    continue

                if not inp in mark_var or mark_var.get(inp, 0) == 0:
                    node_initializers.append(inp)  # was initializer
                elif mark_var[inp] == 1:
                    # make the node because its input was marked
                    mark_op[index] = 1
                    mod = True

            for out in node.output:
                if out in inputs:
                    continue

                if mark_var[out] == 1:
                    # mark the node because the output was marked
                    mark_op[index] = 1
                    mod = True

            if not mod:  # none of the node's inputs were marked, skip it
                continue

            keep_initializers += node_initializers

            nb += 1  # mark the node and all its inputs / outputs

            for out in node.output:
                if mark_var.get(out, 0) == 1:
                    continue

                if out in outputs:
                    continue

                mark_var[out] = 1
                nb += 1

            for inp in node.input:
                if mark_var.get(inp, 0) == 1:
                    continue

                if inp in inputs:
                    continue

                mark_var[inp] = 1
                nb += 1

    # All nodes verifies mark_op[node.name] == 1
    keep_nodes = [node[1] for node in nodes if mark_op[node[0]] == 1]

    var_in = []
    for inp in inputs:
        nt = onnx.TypeProto()
        nt.tensor_type.elem_type = elem_type

        # inputs need shape info, which is not in the graph!
        shape = io_shapes[inp]

        for s in shape:
            nt.tensor_type.shape.dim.add()
            nt.tensor_type.shape.dim[-1].dim_value = s

        value_info = ValueInfoProto(type=nt)
        value_info.name = inp

        var_in.append(value_info)

    # add initializers to inputs
    for i in model.graph.input:
        if i.name in keep_initializers:
            var_in.append(i)

    var_out = []
    for out in outputs:
        nt = onnx.TypeProto()
        nt.tensor_type.elem_type = elem_type

        # inputs need shape info, which is not in the graph!
        shape = io_shapes[out]

        for s in shape:
            nt.tensor_type.shape.dim.add()
            nt.tensor_type.shape.dim[-1].dim_value = s

        value_info = ValueInfoProto(type=nt)

        value_info.name = out
        var_out.append(value_info)

    init_out = [
        init for init in model.graph.initializer
        if init.name in keep_initializers
    ]

    graph = make_graph(keep_nodes, model.graph.name, var_in, var_out, init_out)

    #print(f"making model with inputs {inputs} / outputs {outputs} and nodes len: {len(keep_nodes)}")
    onnx_model = make_model_with_graph(model, graph)

    return onnx_model