def add_initializers_into_inputs(model: onnx.ModelProto) -> onnx.ModelProto:
    # Due to a onnx bug, https://github.com/onnx/onnx/issues/2417, we need to add missing initializers into inputs
    for x in model.graph.initializer:
        input_names = [x.name for x in model.graph.input]
        if x.name not in input_names:
            shape = onnx.TensorShapeProto()
            for dim in x.dims:
                shape.dim.extend(
                    [onnx.TensorShapeProto.Dimension(dim_value=dim)])
            model.graph.input.extend([
                onnx.ValueInfoProto(
                    name=x.name,
                    type=onnx.TypeProto(tensor_type=onnx.TypeProto.Tensor(
                        elem_type=x.data_type, shape=shape)))
            ])
    return model
예제 #2
0
def _extract_value_info(arr, name):
    if isinstance(arr, list):
        assert arr
        assert not isinstance(arr[0], list)
        value_info_proto = onnx.ValueInfoProto()
        value_info_proto.name = name
        sequence_type_proto = value_info_proto.type.sequence_type
        nested = _extract_value_info(arr[0], name)
        tensor_type = sequence_type_proto.elem_type.tensor_type
        tensor_type.CopyFrom(nested.type.tensor_type)
        return value_info_proto
    else:
        return onnx.helper.make_tensor_value_info(
            name=name,
            elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype],
            shape=arr.shape)
예제 #3
0
파일: onnx.py 프로젝트: toydogcat/torchrt
def _forward(
    model: onnx.ModelProto,
    extra_output_nodes: Optional[List[onnx.NodeProto]] = None
) -> OrderedDict[str, np.ndarray]:
    # add outputs of the argument nodes as model outputs.
    if extra_output_nodes is not None:
        model = deepcopy(model)
        for node in extra_output_nodes:
            for output in node.output:
                value_info = onnx.ValueInfoProto(name=output)
                model.graph.output.append(value_info)

    # create ONNX runtime session
    sess_options = onnxrt.SessionOptions()
    sess_options.graph_optimization_level = onnxrt.GraphOptimizationLevel(0)
    sess_options.log_severity_level = 3
    sess = onnxrt.InferenceSession(
        model.SerializeToString(),
        sess_options=sess_options,
        providers=["CPUExecutionProvider"],
    )

    # get names of input nodes that are not initializers
    input_names = set([v.name for v in model.graph.input])
    init_names = set([v.name for v in model.graph.initializer])
    input_names = input_names - init_names

    # generate random inputs
    inputs = {}
    for v in model.graph.input:
        name = v.name
        shape = (d.dim_value for d in v.type.tensor_type.shape.dim)
        dtype = _numpy_dtype(v.type.tensor_type.elem_type)
        if name in input_names:
            inputs[name] = np.random.rand(*shape).astype(dtype)

    output_names = [x.name for x in sess.get_outputs()]
    run_options = onnxrt.RunOptions()
    run_options.log_severity_level = 3
    outputs = sess.run(output_names, inputs, run_options=run_options)

    return OrderedDict(zip(output_names, outputs))
예제 #4
0
    def modify_src_model_output(self) -> bool:
        onnx_model = onnx.load(self.src_model_path)

        output_list = []
        for output in onnx_model.graph.output:
            output_list.append(output.name)

        if self.align_all:
            for node in onnx_model.graph.node:
                for output in node.output:
                    if output in output_list:
                        continue
                    onnx_model.graph.output.extend([onnx.ValueInfoProto(name=output)])

        model_name = self.src_model_path.split("/")[-1][:-5]
        model_name = model_name + ".hack.onnx"
        self.modify_model_path = os.path.join(self.dump_dir_path, model_name)
        onnx.save(onnx_model, self.modify_model_path)

        return True
예제 #5
0
def make_kneron_valid_onnx_input(input_init):
    onnx_inputs = []
    for data in input_init:

        if isinstance(data, onnx.TensorProto):
            val = helper.make_tensor_value_info(data.name, data.data_type.real,
                                                list(d for d in data.dims))
            onnx_inputs.append(val)

        elif isinstance(data, onnx.AttributeProto):
            value_info = onnx.ValueInfoProto()
            value_info.name = data.name

            onnx_type = onnx_proto.TypeProto()
            onnx_type.tensor_type.elem_type = data.type
            value_info.type.CopyFrom(onnx_type)

            onnx_inputs.append(value_info)
        else:
            onnx_inputs.append(data)
    return onnx_inputs
예제 #6
0
def getOnnxLayerOutputs(onnx_info):
    print(onnx_info)
    onnx_path = onnx_info[0]
    in_node = onnx_info[1]
    input_data = np.loadtxt(onnx_info[2])
    input_data = input_data.reshape(onnx_info[3]).astype(np.float32)

    model = onnx.load(onnx_path)
    onnx.checker.check_model(model)
    for node in model.graph.node:
        for output in node.output:
            model.graph.output.extend([onnx.ValueInfoProto(name=output)])
    sess = onnxruntime.InferenceSession(model.SerializeToString())
    outputs = [x.name for x in sess.get_outputs()]
    res = sess.run(outputs, {in_node: input_data})
    res = OrderedDict(zip(outputs, res))

    output_names = list(res.keys())
    output_names.sort()
    print("onnx num of layers: {}".format(len(output_names)))

    return res
예제 #7
0
def add_initializers_into_inputs(model: onnx.ModelProto) -> onnx.ModelProto:
    """add initializers into inputs of a model.

    Args:
        model (onnx.ModelProto): Input ONNX model.

    Returns:
        onnx.ModelProto: Updated ONNX model.
    """
    for x in model.graph.initializer:
        input_names = [x.name for x in model.graph.input]
        if x.name not in input_names:
            shape = onnx.TensorShapeProto()
            for dim in x.dims:
                shape.dim.extend(
                    [onnx.TensorShapeProto.Dimension(dim_value=dim)])
            model.graph.input.extend([
                onnx.ValueInfoProto(
                    name=x.name,
                    type=onnx.TypeProto(tensor_type=onnx.TypeProto.Tensor(
                        elem_type=x.data_type, shape=shape)))
            ])
    return model
예제 #8
0
def get_intermediate_output_statistics(model,
                                       numpy_tensor,
                                       channelwise=False,
                                       debug=None):
    graph = model.graph
    output_needed_module = {}
    output_needed_all_input = {}
    for idx, node in enumerate(graph.node):
        output = node.output
        input = node.input
        if 'input' in node.input:
            for out in output:
                output_needed_module[out] = set([idx])
                output_needed_all_input[out] = set(input)
        else:
            s = set()
            s_i = set()
            for in_ in input:
                s |= (output_needed_module[in_]
                      if in_ in output_needed_module.keys() else set())
                s_i |= (output_needed_all_input[in_]
                        if in_ in output_needed_all_input.keys() else set())
            for out in output:
                output_needed_module[out] = s | set([idx])
                output_needed_all_input[out] = s_i | set(input)

    output_statistics = {}
    if not channelwise:
        statistic = {
            'shape':
            numpy_tensor.shape,
            'min':
            np.min(numpy_tensor),
            'max':
            np.max(numpy_tensor) if np.max(numpy_tensor) > 0 else np.abs(
                np.min(numpy_tensor)),
            '99.9':
            np.percentile(numpy_tensor, 99.9)
        }
    else:
        axis_args = (0, 2, 3) if len(numpy_tensor.shape) == 4 else (0)
        statistic = {
            'shape': numpy_tensor.shape,
            'min': np.min(numpy_tensor, axis=axis_args),
            'max': np.max(numpy_tensor, axis=axis_args),
            '99.9': np.percentile(numpy_tensor, 99.9, axis=axis_args)
        }
    output_statistics['input'] = statistic
    print("\nGetting intermediate output statistics...\n")
    for out in tqdm.tqdm(output_needed_module.keys()):
        keep_nodes = [graph.node[i] for i in list(output_needed_module[out])]
        keep_initializer = [
            init for init in graph.initializer
            if init.name in list(output_needed_all_input[out])
        ]
        var_out = []
        value_info = onnx.ValueInfoProto()
        value_info.name = out
        var_out.append(value_info)
        new_graph = onnx.helper.make_graph(keep_nodes, graph.name, graph.input,
                                           var_out, keep_initializer)
        tmp_model = onnx.helper.make_model(new_graph)
        tmp_model.ir_version = model.ir_version
        tmp_model.producer_name = model.producer_name
        tmp_model.producer_version = model.producer_version
        tmp_model.domain = model.domain
        tmp_model.model_version = model.model_version
        tmp_model.doc_string = model.doc_string
        if len(tmp_model.metadata_props) > 0:
            values = {p.key: p.value for p in model.metadata_props}
            onnx.helper.set_model_props(tmp_model, values)
        # fix opset import
        for oimp in model.opset_import:
            op_set = tmp_model.opset_import.add()
            op_set.domain = oimp.domain
            op_set.version = oimp.version

        ort_session = ort.InferenceSession(tmp_model.SerializeToString())
        outputs = ort_session.run(None, {'input': numpy_tensor})
        if debug is not None:
            # print(out,outputs[0].reshape(1,-1)[0,10:20])
            debug[out] = outputs[0]
        if not channelwise:
            statistic = {
                'shape':
                outputs[0].shape,
                'min':
                np.min(outputs[0]),
                'max':
                np.max(outputs[0]) if np.max(outputs[0]) > 0 else np.abs(
                    np.min(outputs[0])),
                '99.9':
                np.percentile(outputs[0], 99.9)
                if np.percentile(outputs[0], 99.9) > 0 else np.abs(
                    np.min(outputs[0]))
            }
        else:
            axis_args = (0, 2, 3) if len(outputs[0].shape) == 4 else (0)
            statistic = {
                'shape': outputs[0].shape,
                'min': np.min(outputs[0], axis=axis_args),
                'max': np.max(outputs[0], axis=axis_args),
                '99.9': np.percentile(outputs[0], 99.9, axis=axis_args)
            }
            # print(np.max(statistic['max']),np.max(outputs[0]))
        output_statistics[out] = statistic
    print("Finished getting intermediate output statistics!")
    if debug is not None:
        return output_statistics, debug
    else:
        return output_statistics
예제 #9
0
    def __init__(self, proto, parent):
        super().__init__(proto, parent)

        self.id = self.get_root().alloc_graph_id()

        self.initializer = [ ConnxTensorProto(proto, self) for proto in proto.initializer ]
        self.sparse_initializer = [ ConnxSparseTensorProto(proto, self) for proto in proto.sparse_initializer ]
        self.input = [ ]
        self.output = [ ]
        self.value_info = [ ConnxValueInfoProto(proto, self) for proto in proto.value_info ]

        # Make value_info reflects to initializer
        for initializer in self.initializer:
            value_info = self.get_value_info(initializer.proto.name)

            if value_info is None:
                value_info_proto = onnx.ValueInfoProto()
                value_info_proto.name = initializer.proto.name
                value_info_proto.type.tensor_type.elem_type = initializer.proto.data_type

                for i in range(len(initializer.proto.dims)):
                    dim = onnx.TensorShapeProto.Dimension()
                    dim.dim_value = initializer.proto.dims[i]
                    value_info_proto.type.tensor_type.shape.dim.append(dim)

                value_info = ConnxValueInfoProto(value_info_proto, self)
                value_info.initializer = initializer
                self.value_info.append(value_info)

        # Make value_info reflects to sparse_initializer
        for sparse_initializer in self.sparse_initializer:
            value_info = self.get_value_info(sparse_initializer.proto.name)

            if value_info is None:
                value_info_proto = onnx.ValueInfoProto()
                value_info_proto.name = sparse_initializer.proto.name
                value_info_proto.type.tensor_type.elem_type = sparse_initializer.proto.data_type

                for i in range(len(sparse_initializer.proto.dims)):
                    dim = onnx.TensorShapeProto.Dimension()
                    dim.dim_value = sparse_initializer.proto.dims[i]
                    value_info_proto.type.tensor_type.shape.dim.append(dim)

                value_info = ConnxValueInfoProto(value_info_proto, self)
                value_info.sparse_initializer = sparse_initializer
                self.value_info.append(value_info)

        # Make value_info reflects to input
        for input in proto.input:
            value_info = self.get_value_info(input.name)

            if value_info is None:
                value_info = ConnxValueInfoProto(input, self)
                self.value_info.append(value_info)

            self.input.append(value_info)

        # Make value_info reflects to output
        for output in proto.output:
            value_info = self.get_value_info(output.name)

            if value_info is None:
                value_info = ConnxValueInfoProto(output, self)
                self.value_info.append(value_info)

            self.output.append(value_info)

        # Make value_info reflects to node
        for node in proto.node:
            for name in itertools.chain(node.input, node.output):
                value_info = self.get_value_info(name)

                if value_info is None:
                    value_info = ConnxValueInfoProto(None, self, name=name)
                    self.value_info.append(value_info)

        # Assign ID to value_info, which has intializer and sparse_initializer first
        id = 1
        for value_info in self.value_info:
            if value_info.initializer is not None:
                value_info.id = id
                value_info.initializer.id = id
                id += 1
            elif value_info.sparse_initializer is not None:
                value_info.id = id
                value_info.sparse_initializer.id = id
                id += 1

        # Assign ID to value_info, which doesn't have intializer and sparse_initializer next
        for value_info in self.value_info:
            if value_info.initializer is None and value_info.sparse_initializer is None:
                value_info.id = id
                id += 1

        self.node = [ ConnxNodeProto(proto, self) for proto in proto.node ]
예제 #10
0
    def new_empty_tensor_with_value(self, value):
        '''
        generate a tensor with Value to indicate shape
        it is for inputting and outputting
        '''

        if isinstance(value, values.TensorValue):
            dtype = np.float32
            if value.dtype is not None:
                dtype = value.dtype

            if len(value.shape) > 0:
                shape = list(value.shape)
                shape = [x if x != -1 else 'Undefined' for x in shape]
                # type estimation is not correct. so shape needs to be undefined.
                shape = None
                return self.new_empty_tensor(
                    shape, dtype, value2onnx_parameter[value].onnx_name)
            else:
                shape = None
                return self.new_empty_tensor(
                    shape, dtype, value2onnx_parameter[value].onnx_name)

        if isinstance(value, values.BoolValue):
            return self.new_empty_tensor(None, np.bool,
                                         value2onnx_parameter[value].onnx_name)

        if isinstance(value, values.ListValue):
            vi = onnx.ValueInfoProto()
            vi.name = value2onnx_parameter[value].onnx_name
            vi.type.sequence_type.elem_type.tensor_type.elem_type = onnx.TensorProto.FLOAT
            self.generator.onnx_tensors[vi.name] = vi
            return vi

        if isinstance(value, values.TupleValue):
            vi = onnx.ValueInfoProto()
            vi.name = value2onnx_parameter[value].onnx_name
            vi.type.sequence_type.elem_type.tensor_type.elem_type = onnx.TensorProto.FLOAT
            self.generator.onnx_tensors[vi.name] = vi
            return vi

        if isinstance(value, values.NumberValue):
            if value.dtype is not None:
                return self.new_empty_tensor(
                    None, value.dtype, value2onnx_parameter[value].onnx_name)
            elif value.internal_value is not None:
                if isinstance(value.internal_value, int):
                    dtype = np.array(value.internal_value).dtype
                    return self.new_empty_tensor(
                        None, dtype, value2onnx_parameter[value].onnx_name)
                if isinstance(value.internal_value, float):

                    if config.float_restrict:
                        dtype = np.array(value.internal_value).dtype
                    else:
                        dtype = np.float32

                    return self.new_empty_tensor(
                        None, dtype, value2onnx_parameter[value].onnx_name)

        return self.new_empty_tensor(None, np.float32,
                                     value2onnx_parameter[value].onnx_name)
예제 #11
0
def _make_value_info(name):
    vi = onnx.ValueInfoProto()
    vi.name = name
    return vi
예제 #12
0
def _onnx_derivative_fw(onx, weights, inputs, options):
    """
    Implements a gradient based on class `OrtModuleGraphBuilder`.
    """
    if weights is None:
        inits = get_train_initializer(onx)
        weights = list(inits)
    builder = OrtModuleGraphBuilder()

    config = OrtModuleGraphBuilderConfiguration()
    config.initializer_names = weights
    config.initializer_names_to_train = weights
    if inputs is None:
        inputs_name = _default_inputs(onx)
        if len(inputs_name) > 0:
            config.input_names_require_grad = inputs_name
    config.build_gradient_graph = True

    p = TrainingGraphTransformerConfiguration()
    config.graph_transformer_config = p

    builder.initialize(onx.SerializeToString(), config)
    builder.build()
    train_onnx_model_serialized = builder.get_model()
    # optimized_pre_grad_model = builder.get_inference_optimized_model()
    grad_yield = onnx.load(BytesIO(train_onnx_model_serialized))
    if options & DerivativeOptions.KeepYieldOp:
        if options != DerivativeOptions.KeepYieldOp:
            raise ValueError(
                "Option YieldOd cannot be combined with any other.")
        return grad_yield

    yields_op = [(index, node)
                 for index, node in enumerate(grad_yield.graph.node)
                 if node.op_type == 'YieldOp']
    if len(yields_op) == 0:
        raise RuntimeError(  # pragma: no cover
            "No YieldOp was found. The input graph must be wrong.")

    other_nodes = [(index, node)
                   for index, node in enumerate(grad_yield.graph.node)
                   if node.op_type != 'YieldOp']
    inputs = list(grad_yield.graph.input)
    if options & DerivativeOptions.KeepOutputs:
        outputs = list(grad_yield.graph.output)
    else:
        original = set(i.name for i in onx.graph.output)
        outputs = [
            o for o in grad_yield.graph.output if o.name not in original
        ]
    map_out = {o.name: o for o in onx.graph.output}
    for index, yn in yields_op:
        if len(yn.input) != 1 or len(yn.output) != 1:
            raise NotImplementedError(  # pragma: no cover
                f"Unexpected configuration for YieldOp node {yn!r}.")
        if yn.input[0] not in map_out:
            raise RuntimeError(  # pragma: no cover
                f"Unable to find output {yn.input[0]!r} in {list(map_out)!r}.")
        if not (options & DerivativeOptions.FillGrad):  # pylint: disable=C0325
            out = map_out[yn.input[0]]
            new_input = onnx.ValueInfoProto()
            new_input.name = yn.output[0]
            new_input.doc_string = "from yieldop"
            new_input.type.CopyFrom(out.type)
            inputs.append(new_input)
        else:
            if not (options & DerivativeOptions.KeepOutputs):  # pylint: disable=C0325
                raise ValueError(  # pragma: no cover
                    "FillGrad should be set with KeepOutputs.")
            name = f"{yn.input[0]}_shape"
            node = make_node('Shape', [yn.input[0]], [name])
            other_nodes.append((index + 0.1, node))
            out = map_out[yn.input[0]]
            elem_type = out.type.tensor_type.elem_type
            node = make_node('ConstantOfShape', [name], [yn.output[0]],
                             value=make_tensor("value", elem_type, (1, ), [1]))
            other_nodes.append((index + 0.2, node))
        if options & DerivativeOptions.KeepOutputs:
            # Keeps output from the original graph.
            outputs.append(out)

    # Final graph.
    other_nodes.sort()
    other_nodes = [o[1] for o in other_nodes]
    graph = make_graph(other_nodes, grad_yield.graph.name, inputs, outputs,
                       list(grad_yield.graph.initializer))
    new_model = make_model(graph)
    new_model.ir_version = grad_yield.ir_version
    new_model.producer_name = grad_yield.producer_name
    new_model.producer_version = grad_yield.producer_version
    new_model.domain = grad_yield.domain
    new_model.model_version = grad_yield.model_version
    new_model.doc_string = grad_yield.doc_string
    if hasattr(onx, 'value_info'):
        graph.value_info.extend(grad_yield.value_info)
    del new_model.opset_import[:]
    for oimp in grad_yield.opset_import:
        op_set = new_model.opset_import.add()
        op_set.domain = oimp.domain
        op_set.version = oimp.version

    return onnx_remove_node(new_model)