예제 #1
0
    def cpu_infer(self, case_dir: str, model_file: bytes):
        # create session
        try:
            print('[onnx]: using simplified model')
            sess = ort.InferenceSession(model_file)
        except Exception as e:
            print(e)
            try:
                print('[onnx]: using origin model')
                model_file = os.path.join(case_dir, 'test.onnx')
                sess = ort.InferenceSession(model_file)
            except Exception as e:
                print(e)
                print('[onnx]: using converted model')
                onnx_model = onnx.load(model_file)
                onnx_model = version_converter.convert_version(onnx_model, 8)
                model_file = os.path.join(case_dir, 'converted.onnx')
                onnx.save_model(onnx_model, model_file)
                sess = ort.InferenceSession(model_file)

        input_dict = {}
        for input in self.inputs:
            input_dict[input['name']] = input['data']

        outputs = sess.run(None, input_dict)
        i = 0
        for output in outputs:
            bin_file = os.path.join(case_dir, f'cpu_result_{i}.bin')
            text_file = os.path.join(case_dir, f'cpu_result_{i}.txt')
            self.output_paths.append((bin_file, text_file))
            output.tofile(bin_file)
            self.totxtfile(text_file, output)
            i += 1
예제 #2
0
def update_onnx_opset(model_path: pathlib.Path,
                      opset: int,
                      out_path: pathlib.Path = None,
                      logger: logging.Logger = None):
    """
    Helper to update the opset of a model using onnx version_converter. Target opset must be greater than current opset.
    :param model_path: Path to model to update
    :param opset: Opset to update model to
    :param out_path: Optional output path for updated model to be saved to.
    :param logger: Optional logger for diagnostic output
    :returns: Updated onnx.ModelProto
    """

    model_path_str = str(model_path.resolve(strict=True))
    if logger:
        logger.info("Updating %s to opset %d", model_path_str, opset)

    model = onnx.load(model_path_str)

    new_model = version_converter.convert_version(model, opset)

    if out_path:
        onnx.save(new_model, str(out_path))
        if logger:
            logger.info("Saved updated model to %s", out_path)

    return new_model
예제 #3
0
def benchmark_onnxruntime(path_to_model,
                          repeat=1000,
                          number=1,
                          warmup=100,
                          quantize=False):
    """
    Parameters
    ----------
    path_to_model: str or onnx.ModelProto
        Path to an onnx model.
    repeat: int
        Repetition of experiment. Default: 1000
    number: int
        Number of forward passes in each experiment. Default: 1
    warmup: int
        Number of disregarded experiments. Default: 100
    quantize: bool
        Dynamically quantize the model with default parameters.

    Returns
    -------
    info: dict
        Information about the size and min, max, mean, std of the time
        of the experiments.
    """
    assert repeat >= 2 * warmup

    if quantize:
        import onnx
        from onnx import version_converter
        from onnxruntime.quantization import quantize_dynamic

        orig_model = onnx.load(path_to_model)
        if orig_model.opset_import[0].version < 11:
            converted_model = version_converter.convert_version(orig_model, 11)
            path_to_model = '/tmp/model_conv.onnx'
            with open(path_to_model, 'wb') as f:
                f.write(converted_model.SerializeToString())
            del orig_model, converted_model
        path_to_quant_model = "/tmp/model_quant.onnx"
        model = quantize_dynamic(path_to_model, path_to_quant_model)
        size = os.path.getsize(path_to_quant_model)
        sess = ort.InferenceSession(path_to_quant_model)
    else:
        size = os.path.getsize(path_to_model)
        sess = ort.InferenceSession(path_to_model)

    inputs = {
        x.name: np.random.randn(*get_shape(x)).astype(get_type(x))
        for x in sess.get_inputs()
    }

    def _benchmark():
        output = sess.run(None, inputs)

    res = dict(size=size, input_size=[tuple(x.shape) for x in inputs.values()])
    res.update(benchmark_speed(_benchmark, repeat, number, warmup))
    return res
예제 #4
0
def onnx_version_conversion():
    """onnx的版本转换,采用onnx的verson converter
    参考:https://github.com/onnx/tutorials/blob/master/tutorials/VersionConversion.md
    """
    model = onnx.load("old_model_v9.onnx")
    onnx.checker.check_model(model)  # 检查IR已经很好的形成
    from onnx import version_converter
    converted_model = version_converter.convert_version(model, 8)  # 这里把onnx模型从version9转到version8
    onnx.save(converted_model, "new_model_v8.onnx")
예제 #5
0
    def cpu_infer(self, case_dir: str, model_file: bytes, type: str,
                  mode: str):
        # create session
        try:
            print('[onnx]: using simplified model')
            sess = ort.InferenceSession(model_file)
        except Exception as e:
            print(e)
            try:
                print('[onnx]: using origin model')
                model_file = os.path.join(case_dir, 'test.onnx')
                sess = ort.InferenceSession(model_file)
            except Exception as e:
                print(e)
                print('[onnx]: using converted model')
                onnx_model = onnx.load(model_file)
                onnx_model = version_converter.convert_version(onnx_model, 8)
                model_file = os.path.join(case_dir, 'converted.onnx')
                onnx.save_model(onnx_model, model_file)
                sess = ort.InferenceSession(model_file)

        if mode is "dataset":
            for input in self.inputs:
                input_dict = {}
                for in_data in input['data']:
                    topk = []
                    input_dict[input['name']] = self.transform_input(
                        self.data_pre_process(in_data[0]), "float32", "CPU")
                    outputs = sess.run(None, input_dict)
                    for output in outputs:
                        topk.append((in_data[1], get_topK('cpu', 1,
                                                          output)[0]))
                    if os.path.exists(os.path.join(case_dir,
                                                   "cpu_dataset.txt")):
                        os.remove(os.path.join(case_dir, "cpu_dataset.txt"))
                    self.output_paths.append(
                        (os.path.join(case_dir, 'cpu_result_0.bin'),
                         os.path.join(case_dir, 'cpu_result_0.txt')))
                    with open(self.output_paths[-1][1], 'a') as f:
                        for i in range(len(topk)):
                            f.write(topk[i][0].split('/')[-1] + " " +
                                    str(topk[i][1]) + '\n')
        else:
            input_dict = {}
            for input in self.inputs:
                input_dict[input['name']] = self.transform_input(
                    self.data_pre_process(input['data']), "float32", "CPU")

            outputs = sess.run(None, input_dict)
            i = 0
            for output in outputs:
                bin_file = os.path.join(case_dir, f'cpu_result_{i}.bin')
                text_file = os.path.join(case_dir, f'cpu_result_{i}.txt')
                self.output_paths.append((bin_file, text_file))
                output.tofile(bin_file)
                self.totxtfile(text_file, output)
                i += 1
예제 #6
0
    def preprocess_model(self,
                         onnx_model,
                         fix_bn=True,
                         convert_version=True,
                         simplify=True,
                         import_test=True):
        args = {
            'fix_bn': fix_bn,
            'convert_version': convert_version,
            'simplify': simplify,
            'import_test': import_test
        }
        try:
            if fix_bn:
                # fix https://github.com/onnx/models/issues/242
                for node in onnx_model.graph.node:
                    if (node.op_type == "BatchNormalization"):
                        for attr in node.attribute:
                            if (attr.name == "spatial"):
                                attr.i = 1

            if convert_version:
                curret_version = onnx_model.opset_import[0].version
                for i in range(curret_version, 8):
                    onnx_model = version_converter.convert_version(
                        onnx_model, i + 1)

            if simplify:
                onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
                input_shapes = {}
                for input in self.inputs:
                    input_shapes[input['name']] = input['shape']

                onnx_model, check = onnxsim.simplify(onnx_model,
                                                     input_shapes=input_shapes)
                assert check, "Simplified ONNX model could not be validated"

            print('[info]: preprocess ONNX model success: ', args)
            return onnx_model
        except Exception as e:
            print('[info]: preprocess ONNX model failed: ', args)
            print(e)
            # traceback.print_exc()
            return None
예제 #7
0
파일: compose_test.py 프로젝트: onnx/onnx
    def test_error_opset_import_mismatch(self) -> None:
        '''
        Tests that providing models with different operator set imported produces an error
        '''
        m1, m2 = _load_model(m1_def), _load_model(m2_def)
        m1 = helper.make_model(m1.graph,
                               producer_name='test',
                               opset_imports=[helper.make_opsetid("", 10)])
        m2 = helper.make_model(m2.graph,
                               producer_name='test',
                               opset_imports=[helper.make_opsetid("", 15)])

        io_map = [("B00", "B01"), ("B10", "B11"), ("B20", "B21")]
        self.assertRaises(ValueError, compose.merge_models, m1, m2, io_map)

        # Converting to the same Operator set version, should work
        m1 = version_converter.convert_version(m1, 15)
        m3 = compose.merge_models(m1, m2, io_map=io_map)
        checker.check_model(m3)
예제 #8
0
    def transform(self, model: onnx.ModelProto) -> onnx.ModelProto:
        # https://github.com/onnx/onnx/issues/2873#issuecomment-652541006
        #
        # > There is an underlying issue in version converter. It relies on the
        # > C++ IR which I believe has not been updated after IR v3. Because of
        # > this I think it expects initializers also be added as graph inputs.
        # > If you try to change the version of your model to IRv3 or create a
        # > model with initializers also as inputs then I think this will work.
        model = include_initializer_to_graph_input(model)

        version = int(model.opset_import[0].version)
        if version != __OPSET_VERSION__:
            try:
                model = version_converter.convert_version(
                    model, __OPSET_VERSION__)
                check_model(model)
            except Exception:
                raise Exception(
                    f"Can't convert the model (ONNX opset {version}) to ONNX opset {__OPSET_VERSION__}"
                )
        return model
예제 #9
0
def transformModel_onnx(model_file, inference_file, tensor_size):
    #model = torchvision.models.vgg16(pretrained=True)
    model = torch.load(model_file)
    model = model.cuda()

    dummy_input = Variable(torch.randn(1, 3, 224, 224)).cuda()

    #pytorch2onnx
    torch.onnx.export(model,
                      dummy_input,
                      inference_file,
                      export_params=True,
                      verbose=True,
                      training=False)

    original_model = onnx.load(inference_file)  #opset9

    onnx.checker.check_model(original_model)
    print('The model is checked!')

    #opset9 -> opset7
    converted_model = version_converter.convert_version(original_model, 7)
    onnx.checker.check_model(converted_model)
    print('The model is checked!')
예제 #10
0
    return add_const_value_infos_to_graph(model.graph)


def summarize_model(input: ModelProto):
    return f'Inputs {len(input.graph.input)} Nodes {len(input.graph.node)} Initializer {len(input.graph.initializer)} Value info {len(input.graph.value_info)}'


model = onnx.load('C:\\Users\\agibs\\Downloads\\V9\\V9\\best_bracket.onnx')
kotlin_model = onnx.load(
    'C:\\Users\\agibs\\Documents\\GitHub\\dl4j-PR-split\\deeplearning4j\\nd4j\\samediff-import\\samediff-import-onnx\\input-adjusted-model.onnx'
)
input_names_2 = [node.name for node in kotlin_model.graph.node]
input_init__names_2 = [
    initializer.name for initializer in kotlin_model.graph.initializer
]
#model = onnx.shape_inference.infer_shapes(model)
add_value_info_for_constants(model)
input_names = [node.name for node in model.graph.node]
input_init__names = [
    initializer.name for initializer in model.graph.initializer
]
input_val_info__names = [
    value_info.name for value_info in model.graph.value_info
]
converted_model = version_converter.convert_version(kotlin_model, 13)
converted_input_val_info__names = [
    value_info.name for value_info in converted_model.graph.value_info
]
converted_node_names = [node.name for node in converted_model.graph.node]
onnx.save(converted_model, 'output.onnx')
print('Converted model')
예제 #11
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import onnx
from onnx import version_converter, helper


model_path = '../../pretrain_models/yolov3.onnx'
original_model = onnx.load(model_path)

converted_model = version_converter.convert_version(original_model, 8)
onnx.save(converted_model,
          '../../pretrain_models/yolov3_version_8.onnx')
예제 #12
0
    def _test_op_upgrade(
        self,
        op: Text,
        from_opset: int,
        input_shapes: List[Union[List[Optional[int]], Text]] = [[3, 4, 5]],
        output_shapes: List[List[Optional[int]]] = [[3, 4, 5]],
        input_types: Union[List[Any], None] = None,
        output_types: Union[List[Any], None] = None,
        initializer: List[Any] = [],
        attrs: Dict[Text, Any] = {},
        seq_inputs: List[int] = [],
        seq_outputs: List[int] = [],
        optional_inputs: List[int] = [],
        optional_outputs: List[int] = []
    ) -> None:
        global tested_ops
        tested_ops.append(op)

        n_inputs = len(input_shapes)
        letters = list(string.ascii_lowercase)[:n_inputs]
        input_names = [
            letter if shape != '' else '' for (letter, shape) in zip(letters, input_shapes)
        ]
        if input_types is None:
            input_types = [TensorProto.FLOAT] * n_inputs
        is_sequence = [0 if id not in seq_inputs else 1 for id in range(n_inputs)]
        is_optional = [0 if id not in optional_inputs else 1 for id in range(n_inputs)]
        # turn empty strings into [0] to ease type analysis, even though those entries
        # will be ignored
        input_shapes_cast = cast(List[List[int]],
                [[0] if isinstance(shape, str) else shape for shape in input_shapes]
        )
        inputs: List[ValueInfoProto] = []
        for (name, ttype, shape, is_seq, is_opt) in \
                zip(input_names, input_types, input_shapes_cast, is_sequence, is_optional):
            if name != '':
                if is_seq:
                    inputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
                elif is_opt:
                    type_proto = helper.make_tensor_type_proto(ttype, shape)
                    optional_type_proto = helper.make_optional_type_proto(type_proto)
                    inputs += [helper.make_value_info(name, optional_type_proto)]
                else:
                    inputs += [helper.make_tensor_value_info(name, ttype, shape)]

        n_outputs = len(output_shapes)
        output_names = list(string.ascii_lowercase)[n_inputs:n_inputs + n_outputs]
        if output_types is None:
            output_types = [TensorProto.FLOAT] * n_outputs
        is_sequence = [0 if id not in seq_outputs else 1 for id in range(n_outputs)]
        is_optional = [0 if id not in optional_outputs else 1 for id in range(n_outputs)]
        output_shapes_cast = cast(List[List[int]],
                [[0] if isinstance(shape, str) else shape for shape in output_shapes]
        )
        outputs: List[ValueInfoProto] = []
        for (name, ttype, shape, is_seq, is_opt) in \
                zip(output_names, output_types, output_shapes_cast, is_sequence, is_optional):
            if is_seq:
                outputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
            elif is_opt:
                type_proto = helper.make_tensor_type_proto(ttype, shape)
                optional_type_proto = helper.make_optional_type_proto(type_proto)
                outputs += [helper.make_value_info(name, optional_type_proto)]
            else:
                outputs += [helper.make_tensor_value_info(name, ttype, shape)]

        node = helper.make_node(op, input_names, output_names, **attrs)
        graph = helper.make_graph([node], op, inputs, outputs, initializer)
        original = helper.make_model(
            graph,
            producer_name='test',
            opset_imports=[helper.make_opsetid('', from_opset)]
        )
        onnx.checker.check_model(original)
        shape_inference.infer_shapes(original, strict_mode=True)

        converted = version_converter.convert_version(original, latest_opset)
        onnx.checker.check_model(converted)
        shape_inference.infer_shapes(converted, strict_mode=True)
예제 #13
0
### 4b.   Cleanup ONNX

import onnx
import onnx.utils
from onnx import version_converter, optimizer
from onnxsim import simplify

# load the model
model_path = './cifar10-resnet20.onnx'
optimized_model = onnx.load(model_path)

# set ONNX version and ONNX ir_version.
# scailable currently supports ONNX version <= 1.3,
# with ONNX IR version <= 3 and ONNX Operator Set <= 8
optimized_model = version_converter.convert_version(optimized_model, 7)
optimized_model.ir_version = 3

# optimize, simplifiy and polish the model
optimized_model = optimizer.optimize(optimized_model)
optimized_model, check = simplify(optimized_model)
optimized_model = onnx.utils.polish_model(optimized_model)

# save optimized model
onnx.save(optimized_model, model_path)

### 4c. Test ONNX

import onnxruntime
from onnx import numpy_helper
예제 #14
0
def convert(onnx_model_filename,
            save_dir,
            model_basename='model.py',
            model_func_name='inference',
            embed_params=False,
            onnx_opset_version=9,
            onnx_opset_pedantic=True,
            onnx_skip_version_conversion=False,
            debug=False,
            **kwargs):
    """
    convert an ONNX model to Paddle fluid Python code and desc pb
    """

    import onnx

    from onnx.checker import ValidationError
    from onnx.checker import check_model
    from onnx.utils import polish_model
    from onnx.version_converter import convert_version

    from .onnx_utils import DEFAULT_OP_DOMAIN
    from .onnx_utils import graph_ops, graph_weights
    from .onnx_utils import inferred_model_value_info
    from .onnx_utils import optimize_model_skip_op_for_inference
    from .onnx_utils import optimize_model_strip_initializer
    from .onnx_utils import optimize_model_cast, optimize_model_slice
    from .writer import Program, Writer
    from .writer import make_var_name

    logger = logging.getLogger('convert')

    # prepare onnx model
    logger.info('loading model: %s ...', onnx_model_filename)
    onnx_model = onnx.load(onnx_model_filename)
    try:
        logger.info('checking model ...')
        check_model(onnx_model)
        if onnx_skip_version_conversion:  # WORKAROUND: RuntimeError: No Adapter For OP
            logger.debug('assumed opset version: %d', onnx_opset_version)
            logger.warning(
                'opset conversion skipped for onnx_opset_pedantic is OFF')
        else:
            logger.debug('using opset version: %d', onnx_opset_version)
            onnx_model = convert_version(onnx_model, onnx_opset_version)
        onnx_model = polish_model(onnx_model)
    except ValidationError as e:
        if onnx_opset_pedantic:
            raise e
        else:
            logger.warning('due to onnx_opset_pedantic is OFF')
            logger.warning('the ONNX model sanity checking error is suppressed')
            logger.warning('value_info inferring may be uncompleted')
    # onnx model optimization
    logger.info('model has %d ops', len(onnx_model.graph.node))
    logger.info('optimizing model ...')
    onnx_model = optimize_model_skip_op_for_inference(onnx_model)
    onnx_model = optimize_model_strip_initializer(onnx_model)
    onnx_model = optimize_model_cast(onnx_model)
    onnx_model = optimize_model_slice(onnx_model)

    # prepare filesystem
    shutil.rmtree(save_dir, ignore_errors=True)
    shutil.os.makedirs(save_dir, exist_ok=True)
    logger.info('folder %s cleared', save_dir)

    # DEBUG:
    if debug:
        model = onnx.shape_inference.infer_shapes(onnx_model)
        debug_model_filename, _ = shutil.os.path.splitext(onnx_model_filename)
        onnx.save(model, debug_model_filename + '.optimized_and_inffered.onnx')


#        onnx.save(model, '/tmp/export/optimized_and_inffered.onnx')

# I/O instances
    onnx_graph = onnx_model.graph
    fluid_program = Program()
    fluid_writer = Writer()

    # model components
    #    graph_name = onnx_graph.name
    graph_inputs = [value.name for value in onnx_graph.input]
    graph_outputs = [value.name for value in onnx_graph.output]
    graph_params = []
    graph_value_infos = inferred_model_value_info(onnx_model)

    # prepare additional value_info
    # for weights
    for name, weight in graph_weights(onnx_graph):
        value_info = graph_value_infos[name]
        value_info['embeded_as'] = []
        value_info['get_weight'] = (lambda w: lambda: w.tolist())(
            weight)  # lazy getter

    logger.info('conversion started')
    # op set conversion
    #    topo = 'backward' if embed_params else 'forward'
    topo = 'forward'
    for name, domain, op_type, inputs, outputs, attrs in graph_ops(
            onnx_graph, topo=topo):
        logger.debug('translating op %s %s::%s ...', name, domain, op_type)
        if domain == DEFAULT_OP_DOMAIN:
            domain = ''
        try:
            fluid_writer.emit_op(
                fluid_program,
                name,
                domain,
                op_type,
                inputs,
                outputs,
                attrs,
                graph_value_infos,
                embed_params=embed_params,
            )
        except BaseException as e:
            logger.fatal('conversion failed for:\n\t%s -> %s::%s -> %s', inputs,
                         domain, op_type, outputs)
            raise e
    op_codes = fluid_program.codes
    fluid_program.codes = []
    logger.info('%d ops in, %d ops out', len(onnx_graph.node),
                len(fluid_program.op_descs))

    # weight writer
    for name, weight in graph_weights(onnx_graph):
        graph_params.append(name)
        value_info = graph_value_infos[name]
        var_names = value_info.get('embeded_as', [])
        if var_names:
            if len(var_names) > 1:
                logger.info(
                    'weight %s is shared between ops, more disk space will be consumed',
                    name)
            logger.debug('saving weight %s(%s[%d], %dB) as %s ...', name,
                         weight.dtype, weight.size, weight.nbytes, var_names)
            for var_name in var_names:  # multiple references
                fluid_writer.write_weight(
                    weight, shutil.os.path.join(save_dir, var_name))
        else:
            logger.debug('saving weight %s(%s[%d], %dB) to %s ...', name,
                         weight.dtype, weight.size, weight.nbytes,
                         make_var_name(name))
            fluid_writer.write_weight(
                weight, shutil.os.path.join(save_dir, make_var_name(name)))
        fluid_writer.emit_param(fluid_program, name, value_info)
    param_codes = fluid_program.codes
    fluid_program.codes = []
    logger.info('%d weights converted', len(graph_params))

    # input writer
    external_inputs = []
    for name in graph_inputs:
        if name not in graph_params:
            value_info = graph_value_infos[name]
            assert value_info['external']
            external_inputs.append(name)
    fluid_writer.emit_inputs(
        fluid_program, external_inputs, graph_value_infos,
        remove_batch=False)  # TODO:
    input_codes = fluid_program.codes
    fluid_program.codes = []
    logger.info('%d inputs converted', len(external_inputs))

    # output writer
    external_outputs = []
    for name in graph_outputs:
        if name not in graph_params:
            value_info = graph_value_infos[name]
            assert value_info['external']
            external_outputs.append(name)
    fluid_writer.emit_outputs(fluid_program, external_outputs)
    output_codes = [''] + fluid_program.codes  # add an empty line
    fluid_program.codes = []
    logger.info('%d outputs converted', len(external_outputs))

    # code generation
    header_codes = fluid_writer.header_code(
        model_func_name, 'From: {}'.format(onnx_model_filename))
    code_filename = shutil.os.path.join(save_dir, model_basename)
    fluid_writer.write_code_file(code_filename, header_codes, input_codes,
                                 param_codes, op_codes, output_codes)
    logger.info('code saved to %s, factory function: %s', code_filename,
                model_func_name)

    # desc generation
    desc_filename = shutil.os.path.join(save_dir, '__model__')
    fluid_writer.write_desc_file(
        desc_filename,
        op_descs=fluid_program.op_descs,
        var_descs=fluid_program.var_descs,
    )
    logger.info('program saved to %s', desc_filename)

    logger.info('conversion finished')
예제 #15
0
except onnx.checker.ValidationError as e:
    print('The model is invalid: %s' % e)
else:
    print('The model is valid!')

if args.VERBOSE:
    print('The model before conversion:\n{}'.format(original_model))

if args.no_convert:
    quit()

# Opset version supported by current onnx-mlir
# Should be consistent with gen_onnx_mlir.py
current_onnx_mlir_support_version = 13

converted_model = version_converter.convert_version(
    original_model, current_onnx_mlir_support_version)

if args.VERBOSE:
    print('The model after conversion:\n{}'.format(converted_model))

if args.save:
    inputFile = args.model
    if inputFile.endswith('-opset' + str(current_onnx_mlir_support_version) +
                          '.onnx'):
        printf('Converted model is not saved due to name conflict')
    else:
        outFile = inputFile[:inputFile.rfind(".onnx")] + '-opset-' + str(
            current_onnx_mlir_support_version) + '.onnx'
        onnx.save(converted_model, outFile)
        print('The converted model is aved to ' + outFile)
예제 #16
0
def convert(onnx_model_filename,
            save_dir,
            model_basename='model.py',
            model_func_name='inference',
            embed_params=False,
            onnx_opset_version=None,
            onnx_opset_pedantic=True,
            onnx_skip_optimization=False,
            debug=False,
            **kwargs):
    """
    convert an ONNX model to Paddle fluid Python code and desc pb
    """

    assert isinstance(onnx_model_filename, str)
    assert isinstance(save_dir, str)
    assert isinstance(model_basename, str)
    assert isinstance(model_func_name, str)
    assert onnx_opset_version is None or isinstance(onnx_opset_version, int)

    import onnx

    from onnx.checker import ValidationError
    from onnx.checker import check_model
    from onnx.version_converter import convert_version

    from .onnx_utils import DEFAULT_OP_DOMAIN
    from .onnx_utils import graph_ops, graph_weights
    from .onnx_utils import inferred_model_value_info
    from .onnx_utils import polish_model, optimize_model_strip_initializer
    from .writer import Program, Writer

    logger = logging.getLogger('onnx2fluid')

    # prepare onnx model
    logger.info('loading model: %s ...', onnx_model_filename)
    onnx_model = onnx.load(onnx_model_filename)

    try:
        logger.info('checking model ...')
        check_model(onnx_model)
        if onnx_opset_version is None:  # WORKAROUND: RuntimeError: No Adapter For OP
            logger.warning(
                'opset conversion skipped for onnx_opset_pedantic is OFF')
            logger.info('assumed opset version: %d',
                        DEFAULT_ONNX_OPSET_VERSION)
        else:
            logger.info('using opset version: %d', onnx_opset_version)
            onnx_model = convert_version(onnx_model, onnx_opset_version)
    except ValidationError as e:
        if onnx_opset_pedantic:
            raise e
        else:
            logger.warning('due to onnx_opset_pedantic is OFF')
            logger.warning(
                'the ONNX model sanity checking error is suppressed')
            logger.warning('value_info inferring may be uncompleted')

    # onnx model optimization
    logger.info('model has %d ops', len(onnx_model.graph.node))
    if onnx_skip_optimization:
        logger.info('stripping model ...')
        onnx_model = optimize_model_strip_initializer(onnx_model)
    else:
        logger.info('optimizing model ...')
        onnx_model = polish_model(onnx_model, checking=onnx_opset_pedantic)

    # prepare filesystem
    shutil.rmtree(save_dir, ignore_errors=True)
    shutil.os.makedirs(save_dir, exist_ok=True)
    logger.info('folder %s cleared', save_dir)

    # DEBUG:
    if debug:
        debug_model_filename, _ = shutil.os.path.splitext(onnx_model_filename)
        onnx.save(onnx_model, debug_model_filename + '.polished.onnx')

    # I/O instances
    onnx_graph = onnx_model.graph
    fluid_program = Program()
    fluid_writer = Writer()

    # model components
    inp_vars = [make_var_name(value.name) for value in onnx_graph.input]
    out_vars = [make_var_name(value.name) for value in onnx_graph.output]
    par_vars = []
    value_infos = inferred_model_value_info(onnx_model)
    value_infos = {
        make_var_name(key): value
        for key, value in value_infos.items()
    }

    # prepare additional value_info
    # for weights
    for name, weight in graph_weights(onnx_graph):
        var_name = make_var_name(name)
        value_info = value_infos[var_name]
        value_info['lod'] = []
        value_info['embedded_as'] = []
        value_info['get_weight'] = (lambda w: lambda: w.tolist())(
            weight)  # lazy getter

    logger.info('conversion started')
    # op set conversion
    #    topo = 'backward' if embed_params else 'forward'
    topo = 'forward'
    for name, domain, op_type, inputs, outputs, attrs in graph_ops(onnx_graph,
                                                                   topo=topo):
        op_name = make_var_name(name)
        inputs = list(map(make_var_name, inputs))
        outputs = list(map(make_var_name, outputs))
        logger.debug('translating op %s(%s) %s::%s ...', name, op_name, domain,
                     op_type)
        if domain == DEFAULT_OP_DOMAIN:
            domain = ''
        try:
            fluid_writer.emit_op(
                fluid_program,
                op_name,
                domain,
                op_type,
                inputs,
                outputs,
                attrs,
                value_infos,
                embed_params=embed_params,
            )
        except BaseException as e:
            logger.fatal('conversion failed for:\n\t%s -> %s::%s -> %s',
                         inputs, domain, op_type, outputs)
            raise e
    op_codes = fluid_program.codes
    fluid_program.codes = []
    logger.info('%d ops in, %d ops out', len(onnx_graph.node),
                len(fluid_program.op_descs))

    # type-shape info copy
    for var_name, value_info in value_infos.items():
        fluid_program.VarTypeShapeInfo(var_name,
                                       value_info,
                                       remove_batch=False)  #
    bad_vars = []
    for var_name, var_desc in fluid_program.var_descs.items():
        if not var_desc.type.lod_tensor.HasField('tensor'):
            bad_vars.append(var_name)
    if bad_vars:
        logger.warning('type-shape not infered for var %s ...',
                       ', '.join(bad_vars[:5]))
        logger.warning('this causes little problem for PaddlePaddle, '
                       'but Paddle Mobile may not infer correctly')
        logger.warning('please consider running validation with -i '
                       'to invoke type-shape inference in PaddlePaddle')

    # weight writer
    for name, weight in graph_weights(onnx_graph):
        var_name = make_var_name(name)
        par_vars.append(var_name)
        value_info = value_infos[var_name]
        embedded_names = value_info.get('embedded_as', [])
        if embedded_names:
            if len(embedded_names) > 1:
                logger.info(
                    'weight %s is shared between ops, more disk space will be consumed',
                    name)
            logger.debug('saving weight %s(%s[%d], %dB) as %s ...', name,
                         weight.dtype, weight.size, weight.nbytes,
                         embedded_names)
            for embedded_name in embedded_names:  # multiple references
                fluid_writer.write_weight(weight,
                                          shutil.os.path.join(
                                              save_dir, embedded_name),
                                          lod=value_info['lod'])
        else:
            logger.debug('saving weight %s(%s[%d], %dB) to %s ...', name,
                         weight.dtype, weight.size, weight.nbytes, var_name)
            fluid_writer.write_weight(weight,
                                      shutil.os.path.join(save_dir, var_name),
                                      lod=value_info['lod'])
        fluid_writer.emit_param(fluid_program, var_name, value_info)
    param_codes = fluid_program.codes
    fluid_program.codes = []
    logger.info('%d weights converted', len(par_vars))

    # input writer
    external_inputs = []
    for var_name in inp_vars:
        if var_name not in par_vars:
            value_info = value_infos[var_name]
            assert value_info['external']
            external_inputs.append(var_name)
    fluid_writer.emit_inputs(fluid_program,
                             external_inputs,
                             value_infos,
                             remove_batch=False)  # TODO:
    input_codes = fluid_program.codes
    fluid_program.codes = []
    logger.info('%d inputs converted', len(external_inputs))

    # output writer
    external_outputs = []
    for var_name in out_vars:
        if var_name not in par_vars:
            value_info = value_infos[var_name]
            assert value_info['external']
            external_outputs.append(var_name)
    fluid_writer.emit_outputs(fluid_program, external_outputs)
    output_codes = [''] + fluid_program.codes  # add an empty line
    fluid_program.codes = []
    logger.info('%d outputs converted', len(external_outputs))

    # code generation
    header_codes = fluid_writer.header_code(
        model_func_name,
        'From: {}'.format(onnx_model_filename),
    )
    code_filename = shutil.os.path.join(save_dir, model_basename)
    fluid_writer.write_code_file(
        code_filename,
        header_codes,
        input_codes,
        param_codes,
        op_codes,
        output_codes,
    )
    logger.info('code saved to %s, factory function: %s', code_filename,
                model_func_name)

    # desc generation
    desc_filename = shutil.os.path.join(save_dir, '__model__')
    fluid_writer.write_desc_file(
        desc_filename,
        op_descs=fluid_program.op_descs,
        var_descs=list(fluid_program.var_descs.values()),
    )
    logger.info('program saved to %s', desc_filename)

    logger.info('conversion finished')
예제 #17
0
                # Ref attrs refer to other attrs, so we don't need to do anything
                if attr.ref_attr_name != "":
                    continue

                if attr.type == onnx.AttributeProto.GRAPH:
                    add_const_value_infos_to_graph(attr.g)
                if attr.type == onnx.AttributeProto.GRAPHS:
                    for g in attr.graphs:
                        add_const_value_infos_to_graph(g)


    return add_const_value_infos_to_graph(model.graph)

model_path = 'C:/Users/AkiyaSouken/Documents/GitHub/NNEngine/Source/ThirdParty/Models/movenet_lightning_v4_1x192x192xBGRxByte.onnx'
targetVersion = 12
model_path_after = model_path + '.op12.onnx'

# Preprocessing: load the model to be converted.
original_model = onnx.load(model_path)
#print('The model before conversion:\n{}'.format(original_model))

#model = onnx.shape_inference.infer_shapes(original_model)
add_value_info_for_constants(original_model)

# A full list of supported adapters can be found here:
# https://github.com/onnx/onnx/blob/master/onnx/version_converter.py#L21
# Apply the version conversion on the original model
converted_model = version_converter.convert_version(original_model, targetVersion) 
#print('The model after conversion:\n{}'.format(converted_model))

onnx.save(converted_model, model_path_after)
예제 #18
0

if __name__ == '__main__':
    logging.basicConfig(
        format=
        '[%(levelname)8s]%(name)s::%(funcName)s:%(lineno)04d: %(message)s',
        level=logging.DEBUG,
    )

    from onnx.version_converter import convert_version

    model = onnx.load('/tmp/export.onnx')
    print_pb_structure(model, loop_iterative=False)

    check_model(model)
    model = convert_version(model, 9)
    model = polish_model(model)

    onnx.save(model, '/tmp/export.polished.onnx')

    graph = model.graph
    value_info = inferred_model_value_info(model)

    name = graph.name
    inputs = [value.name for value in graph.input]
    outputs = [value.name for value in graph.output]
    weights = []

    logger.info('ops:')
    for name, domain, op_type, _, _, attrs in graph_ops(graph, topo='forward'):
        logger.info('- \t%s %s::%s: %s', name, domain, op_type, attrs)
예제 #19
0
def test_type_cast_UINT8ToINT32():

    # Build an onnx proto with a single constant node
    # Create output tensors of the type to cast
    X = helper.make_tensor_value_info('X', TensorProto.UINT8, [3, 2])
    Y = helper.make_tensor_value_info('Y', TensorProto.UINT8, [3, 2])

    values = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.uint8)
    # We use ConstantOfShape even if it is not followint the onnx
    # spec because we need an op that can store several actual values
    # in the graph to make sure the type conversion is succesful. Constant
    # would not work here becauseit has only been available since opset 12.
    # Constant-9 is not compatible with the default opset-version 11
    node_def = onnx.helper.make_node('ConstantOfShape',
                                     inputs=['X'],
                                     outputs=['Y'],
                                     value=onnx.helper.make_tensor(
                                         name='const_tensor',
                                         data_type=onnx.TensorProto.UINT8,
                                         dims=values.shape,
                                         vals=values.flatten().astype(
                                             np.uint8).tobytes(),
                                         raw=True))

    # Create the graph (GraphProto)
    graph_def = helper.make_graph(
        [node_def],
        'test-model',
        [X],
        [Y],
    )

    # Create the model (ModelProto)
    onnx_model = helper.make_model(graph_def)

    # Make sure the opset version is version 9 (by default it would be 11
    # which would crash subsequent function calls)
    onnx_model = version_converter.convert_version(onnx_model, 9)

    # Compile the model to an onnx graph
    onnx.save_model(onnx_model, "type_test.onnx")

    # Load proto into a graph transfomer and apply cast
    graph_transformer = popart.GraphTransformer("type_test.onnx")
    graph_transformer.convertUINT8ToINT32()

    # Retrieve modeified graph proto
    proto = graph_transformer.getModelProto()
    popart.Builder(proto).saveModelProto("type_test_modified.onnx")

    # Load the model as an onnx model again
    # modified_onnx_model = onnx.load(proto)
    modified_onnx_model = onnx.load("type_test_modified.onnx")

    # Make sure the graph is still good
    onnx.checker.check_model(modified_onnx_model)

    # Get only the first input of the input array (there should only be one)
    i = modified_onnx_model.graph.input[0]
    o = modified_onnx_model.graph.output[0]

    input_type = i.type.tensor_type
    output_type = o.type.tensor_type

    # Make sure shapes remain untouched
    assert (input_type.HasField("shape")
            ), "Modified graph output has no shape attribute"
    assert (output_type.HasField("shape")
            ), "Modified graph output has no shape attribute"
    assert (input_type.shape.dim[0].dim_value == 3
            ), "Dimensions were not conserved by cast"
    assert (input_type.shape.dim[1].dim_value == 2
            ), "Dimensions were not conserved by cast"
    assert (output_type.shape.dim[0].dim_value == 3
            ), "Dimensions were not conserved by cast"
    assert (output_type.shape.dim[1].dim_value == 2
            ), "Dimensions were not conserved by cast"

    # Test whether the new tensor has the right size
    assert (len(
        modified_onnx_model.graph.node[0].attribute[0].t.int32_data) == len(
            onnx_model.graph.node[0].attribute[0].t.raw_data)
            ), "Wrong number of Bytes in casted version."

    # Retrieve the two constant tensors and compare the values
    assert np.allclose(
        modified_onnx_model.graph.node[0].attribute[0].t.int32_data,
        values.flatten()), "Data was not conserved by cast"
예제 #20
0
def test_type_cast_BFloatToFloat():

    # Build an onnx proto with a single constant node
    # Create output tensors of the type to cast
    X = helper.make_tensor_value_info('X', TensorProto.BFLOAT16, [3, 2])
    Y = helper.make_tensor_value_info('Y', TensorProto.BFLOAT16, [3, 2])

    # Define the target values as float32 and cast to bytes
    float_values = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]).astype(np.float32)
    float_bytes = float_values.tobytes()

    # Reinterpret byte string as int16 values. That way we have split the floats
    # in 2 sets of 16bits
    int16_values = np.frombuffer(float_bytes, dtype=np.uint16)

    # Keep only the second 2 bytes of each float (for some reason it seems
    # that np.array.tobytes() puts the fractional bytes first), ie every other int16
    # and convert back to bytes. We should now have some bfloat which values
    # are close enough to the original floats (precision loss of around 5e-3)
    bfloat_as_int16 = int16_values[1::2]
    bfloat = bfloat_as_int16.tobytes()

    # This data is generated to check against to make sure that we actually get
    # the same "truncated" data with our method
    bfloat_values = np.frombuffer(bfloat, dtype=np.uint16)
    int16_from_bfloat = bfloat_values
    for i in range(6):
        int16_from_bfloat = np.insert(int16_from_bfloat, 5 - i, 0)

    float_again_bytes = np.array(int16_from_bfloat).tobytes()
    float_again = np.frombuffer(float_again_bytes, dtype=np.float32)

    node_def = onnx.helper.make_node('ConstantOfShape',
                                     inputs=['X'],
                                     outputs=['Y'],
                                     value=onnx.helper.make_tensor(
                                         name='const_tensor',
                                         data_type=onnx.TensorProto.BFLOAT16,
                                         dims=[3, 2],
                                         vals=bfloat,
                                         raw=True))

    # Create the graph (GraphProto)
    graph_def = helper.make_graph(
        [node_def],
        'test-model',
        [X],
        [Y],
    )

    # Create the model (ModelProto)
    onnx_model = helper.make_model(graph_def)

    # Make sure the opset version is version 9 (by default it would be 11
    # which would crash subsequent function calls)
    onnx_model = version_converter.convert_version(onnx_model, 9)

    # Compile the model to an onnx graph
    onnx.save_model(onnx_model, "type_test.onnx")

    # Load proto into a graph transfomer and apply cast
    graph_transformer = popart.GraphTransformer("type_test.onnx")
    graph_transformer.convertBFloats16ToFloat32()

    # Retrieve modeified graph proto
    proto = graph_transformer.getModelProto()
    popart.Builder(proto).saveModelProto("type_test_modified.onnx")

    # Load the model as an onnx model again
    # modified_onnx_model = onnx.load(proto)
    modified_onnx_model = onnx.load("type_test_modified.onnx")

    # Make sure the graph is still good
    onnx.checker.check_model(modified_onnx_model)

    # Get only the first input of the input array (there should only be one)
    i = modified_onnx_model.graph.input[0]
    o = modified_onnx_model.graph.output[0]

    input_type = i.type.tensor_type
    output_type = o.type.tensor_type

    # Make sure shapes remain untouched
    assert (input_type.HasField("shape")
            ), "Modified graph output has no shape attribute"
    assert (output_type.HasField("shape")
            ), "Modified graph output has no shape attribute"
    assert (input_type.shape.dim[0].dim_value == 3
            ), "Dimensions were not conserved by cast"
    assert (input_type.shape.dim[1].dim_value == 2
            ), "Dimensions were not conserved by cast"
    assert (output_type.shape.dim[0].dim_value == 3
            ), "Dimensions were not conserved by cast"
    assert (output_type.shape.dim[1].dim_value == 2
            ), "Dimensions were not conserved by cast"

    # Test whether the new tensor has the right size
    assert (len(
        modified_onnx_model.graph.node[0].attribute[0].t.float_data) == 6
            ), "Wrong number of Bytes in casted version."

    # Retrieve the two constant tensors and compare the values
    assert np.allclose(
        modified_onnx_model.graph.node[0].attribute[0].t.float_data,
        float_values,
        rtol=1e-2), "Data was not conserved by cast"
    assert np.allclose(
        modified_onnx_model.graph.node[0].attribute[0].t.float_data,
        float_again), "Data was not conserved by cast"
예제 #21
0
def test_type_cast_DoubleToFloat():

    # Build an onnx proto with a single constant node
    # Create output tensors of the type to cast
    X = helper.make_tensor_value_info('X', TensorProto.DOUBLE, [3, 2])
    Y = helper.make_tensor_value_info('Y', TensorProto.DOUBLE, [3, 2])

    values = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]).astype(np.double)
    node_def = onnx.helper.make_node(
        'ConstantOfShape',
        inputs=['X'],
        outputs=['Y'],
        value=onnx.helper.make_tensor(name='const_tensor',
                                      data_type=onnx.TensorProto.DOUBLE,
                                      dims=values.shape,
                                      vals=values.flatten().astype(np.double),
                                      raw=False))

    # Create the graph (GraphProto)
    graph_def = helper.make_graph(
        [node_def],
        'test-model',
        [X],
        [Y],
    )

    # Create the model (ModelProto)
    onnx_model = helper.make_model(graph_def)

    # Make sure the opset version is version 9 (by default it would be 11
    # which would crash subsequent function calls)
    onnx_model = version_converter.convert_version(onnx_model, 9)

    # Compile the model to an onnx graph
    onnx.save_model(onnx_model, "type_test.onnx")

    # Load proto into a graph transfomer and apply cast
    graph_transformer = popart.GraphTransformer("type_test.onnx")
    graph_transformer.convertDoublesToFloats()

    # Retrieve modeified graph proto
    proto = graph_transformer.getModelProto()
    popart.Builder(proto).saveModelProto("type_test_modified.onnx")

    # Load the model as an onnx model again
    # modified_onnx_model = onnx.load(proto)
    modified_onnx_model = onnx.load("type_test_modified.onnx")

    # Make sure the graph is still good
    onnx.checker.check_model(modified_onnx_model)

    # Get only the first input of the input array (there should only be one)
    i = modified_onnx_model.graph.input[0]
    o = modified_onnx_model.graph.output[0]

    input_type = i.type.tensor_type
    output_type = o.type.tensor_type

    # Make sure shapes remain untouched
    assert (input_type.HasField("shape")
            ), "Modified graph output has no shape attribute"
    assert (output_type.HasField("shape")
            ), "Modified graph output has no shape attribute"
    assert (input_type.shape.dim[0].dim_value == 3
            ), "Dimensions were not conserved by cast"
    assert (input_type.shape.dim[1].dim_value == 2
            ), "Dimensions were not conserved by cast"
    assert (output_type.shape.dim[0].dim_value == 3
            ), "Dimensions were not conserved by cast"
    assert (output_type.shape.dim[1].dim_value == 2
            ), "Dimensions were not conserved by cast"

    # Test whether the new tensor has the right size
    assert (len(
        modified_onnx_model.graph.node[0].attribute[0].t.float_data) == len(
            onnx_model.graph.node[0].attribute[0].t.double_data)
            ), "Wrong number of Bytes in casted version."

    # Retrieve the two constant tensors and compare the values
    assert np.allclose(
        modified_onnx_model.graph.node[0].attribute[0].t.float_data,
        values.flatten()), "Data was not conserved by cast"