示例#1
0
def export_onnx_model(model, save_file, opset_version=10):
    if model.__class__.__name__ == "FastSCNN" or (
            model.model_type == "detector"
            and model.__class__.__name__ != "YOLOv3"):
        logging.error(
            "Only image classifier models, detection models(YOLOv3) and semantic segmentation models(except FastSCNN) are supported to export to ONNX"
        )
    try:
        import paddle2onnx
    except:
        logging.error(
            "You need to install paddle2onnx first, pip install paddle2onnx==0.4"
        )

    import paddle2onnx as p2o

    if p2o.__version__ != '0.4':
        logging.error(
            "You need install paddle2onnx==0.4, but the version of paddle2onnx is {}"
            .format(p2o.__version__))

    if opset_version == 10 and model.__class__.__name__ == "YOLOv3":
        logging.warning(
            "Export for openVINO by default, the output of multiclass_nms exported to onnx will contains background. If you need onnx completely consistent with paddle, please use paddle2onnx to export"
        )

    p2o.register_op_mapper('multiclass_nms', MultiClassNMS4OpenVINO)

    p2o.program2onnx(model.test_prog,
                     scope=model.scope,
                     save_file=save_file,
                     opset_version=opset_version)
示例#2
0
    def export_onnx_model(self, dirname: str, **kwargs):
        '''
        Export the model to ONNX format.

        Args:
            dirname(str): The directory to save the onnx model.
            **kwargs(dict|optional): Other export configuration options for compatibility, some may be removed 
            in the future. Don't use them If not necessary. Refer to https://github.com/PaddlePaddle/paddle2onnx
            for more information.
        '''
        feed_dict, fetch_dict, program = self.context(for_test=True,
                                                      trainable=False)
        inputs = set([var.name for var in feed_dict.values()])
        if self.type == 'CV/classification':
            outputs = [fetch_dict['class_probs']]
        else:
            outputs = set([var.name for var in fetch_dict.values()])
            outputs = [program.global_block().vars[key] for key in outputs]

        save_file = os.path.join(dirname, '{}.onnx'.format(self.name))
        paddle2onnx.program2onnx(program=program,
                                 scope=paddle.static.global_scope(),
                                 feed_var_names=inputs,
                                 target_vars=outputs,
                                 save_file=save_file,
                                 **kwargs)
示例#3
0
文件: module.py 项目: dlfming/dd_demo
    def export_onnx_model(self, dirname: str, **kwargs):
        '''
        Export the model to ONNX format.

        Args:
            dirname(str): The directory to save the onnx model.
            **kwargs(dict|optional): Other export configuration options for compatibility, some may be removed 
            in the future. Don't use them If not necessary. Refer to https://github.com/PaddlePaddle/paddle2onnx
            for more information.
        '''
        if not self._pretrained_model_path:
            raise NotImplementedError

        place = paddle.CPUPlace()
        exe = paddle.static.Executor(place)

        model_filename = None
        params_filename = None

        if os.path.exists(os.path.join(self._pretrained_model_path, 'model')):
            model_filename = 'model'

        if os.path.exists(os.path.join(self._pretrained_model_path, 'params')):
            params_filename = 'params'

        if os.path.exists(os.path.join(self._pretrained_model_path, '__params__')):
            params_filename = '__params__'

        program, inputs, outputs = paddle.fluid.io.load_inference_model(
            dirname=self._pretrained_model_path,
            model_filename=model_filename,
            params_filename=params_filename,
            executor=exe)

        save_file = os.path.join(dirname, '{}.onnx'.format(self.name))
        paddle2onnx.program2onnx(
            program=program,
            scope=paddle.static.global_scope(),
            feed_var_names=inputs,
            target_vars=outputs,
            save_file=save_file,
            **kwargs
        )
示例#4
0
def program2onnx(model_dir,
                 save_file,
                 model_filename=None,
                 params_filename=None,
                 opset_version=9,
                 enable_onnx_checker=False):
    try:
        import paddle
    except:
        logging.error(
            "paddlepaddle not installed, use \"pip install paddlepaddle\"")

    v0, v1, v2 = paddle.__version__.split('.')
    if v0 == '0' and v1 == '0' and v2 == '0':
        logging.warning("You are use develop version of paddlepaddle")
    elif int(v0) <= 1 and int(v1) < 8:
        raise ImportError("paddlepaddle>=1.8.0 is required")

    import paddle2onnx as p2o
    # convert model save with 'paddle.fluid.io.save_inference_model'
    if hasattr(paddle, 'enable_static'):
        paddle.enable_static()
    exe = fluid.Executor(fluid.CPUPlace())
    if model_filename is None and params_filename is None:
        [program, feed_var_names, fetch_vars] = fluid.io.load_inference_model(
            model_dir, exe)
    else:
        [program, feed_var_names, fetch_vars] = fluid.io.load_inference_model(
            model_dir,
            exe,
            model_filename=model_filename,
            params_filename=params_filename)
    p2o.program2onnx(
        program,
        fluid.global_scope(),
        save_file,
        feed_var_names=feed_var_names,
        target_vars=fetch_vars,
        opset_version=opset_version,
        enable_onnx_checker=enable_onnx_checker)
示例#5
0
def program2onnx(model_dir,
                 save_file,
                 model_filename=None,
                 params_filename=None,
                 opset_version=9,
                 enable_onnx_checker=False,
                 operator_export_type="ONNX",
                 input_shape_dict=None,
                 output_names=None,
                 auto_update_opset=True):
    try:
        import paddle
    except:
        logging.error(
            "paddlepaddle not installed, use \"pip install paddlepaddle\"")

    v0, v1, v2 = paddle.__version__.split('.')
    if v0 == '0' and v1 == '0' and v2 == '0':
        logging.warning("You are use develop version of paddlepaddle")
    elif int(v0) <= 1 and int(v1) < 8:
        raise ImportError("paddlepaddle>=1.8.0 is required")

    import paddle2onnx as p2o
    # convert model save with 'paddle.fluid.io.save_inference_model'
    if hasattr(paddle, 'enable_static'):
        paddle.enable_static()
    exe = fluid.Executor(fluid.CPUPlace())
    if model_filename is None and params_filename is None:
        [program, feed_var_names,
         fetch_vars] = fluid.io.load_inference_model(model_dir, exe)
    else:
        [program, feed_var_names, fetch_vars
         ] = fluid.io.load_inference_model(model_dir,
                                           exe,
                                           model_filename=model_filename,
                                           params_filename=params_filename)

    OP_WITHOUT_KERNEL_SET = {
        'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
        'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
        'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
        'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id',
        'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream',
        'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv',
        'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl',
        'copy_cross_scope'
    }
    if input_shape_dict is not None:
        import paddle2onnx
        paddle2onnx.process_old_ops_desc(program)
        paddle_version = paddle.__version__
        model_version = program.desc._version()
        major_ver = model_version // 1000000
        minor_ver = (model_version - major_ver * 1000000) // 1000
        patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
        model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
        if model_version != paddle_version:
            logging.warning(
                "The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model"
                .format(model_version, paddle_version))

        for k, v in input_shape_dict.items():
            program.blocks[0].var(k).desc.set_shape(v)
        for i in range(len(program.blocks[0].ops)):
            if program.blocks[0].ops[i].type in OP_WITHOUT_KERNEL_SET:
                continue
            program.blocks[0].ops[i].desc.infer_shape(program.blocks[0].desc)
    p2o.program2onnx(program,
                     fluid.global_scope(),
                     save_file,
                     feed_var_names=feed_var_names,
                     target_vars=fetch_vars,
                     opset_version=opset_version,
                     enable_onnx_checker=enable_onnx_checker,
                     operator_export_type=operator_export_type,
                     auto_update_opset=auto_update_opset,
                     output_names=output_names)
示例#6
0
    def export_onnx_model(self,
                          dirname: str,
                          input_spec: List[paddle.static.InputSpec] = None,
                          include_sub_modules: bool = True,
                          **kwargs):
        '''
        Export the model to ONNX format.

        Args:
            dirname(str): The directory to save the onnx model.
            input_spec(list): Describes the input of the saved model's forward method, which can be described by
                InputSpec or example Tensor. If None, all input variables of the original Layer's forward method
                would be the inputs of the saved model. Default None.
            include_sub_modules(bool): Whether to export sub modules. Default to True.
            **kwargs(dict|optional): Other export configuration options for compatibility, some may be removed in
                the future. Don't use them If not necessary. Refer to https://github.com/PaddlePaddle/paddle2onnx
                for more information.
        '''
        if include_sub_modules:
            for key, _sub_module in self.sub_modules().items():
                try:
                    sub_dirname = os.path.normpath(os.path.join(dirname, key))
                    _sub_module.export_onnx_model(
                        sub_dirname,
                        include_sub_modules=include_sub_modules,
                        **kwargs)
                except:
                    utils.record_exception(
                        'Failed to export sub module {}'.format(
                            _sub_module.name))

        if isinstance(self, paddle.nn.Layer):
            save_file = os.path.join(dirname, '{}'.format(self.name))
            if not input_spec:
                if hasattr(self, 'input_spec'):
                    input_spec = self.input_spec
                else:
                    _type = self.type.lower()
                    if _type.startswith('cv/image'):
                        input_spec = [
                            paddle.static.InputSpec(
                                shape=[None, 3, None, None], dtype='float32')
                        ]
                    else:
                        raise RuntimeError(
                            'Module {} lacks `input_spec`, please specify it when calling `export_onnx_model`.'
                            .format(self.name))

            paddle.onnx.export(self,
                               save_file,
                               input_spec=input_spec,
                               **kwargs)
            return

        if not self._pretrained_model_path:
            raise RuntimeError(
                'Module {} does not support exporting models in ONNX format.'.
                format(self.name))
        elif not os.path.exists(self._pretrained_model_path):
            log.logger.warning(
                'The model path of Module {} does not exist.'.format(
                    self.name))
            return

        place = paddle.CPUPlace()
        exe = paddle.static.Executor(place)

        model_filename = None
        params_filename = None

        if os.path.exists(os.path.join(self._pretrained_model_path, 'model')):
            model_filename = 'model'

        if os.path.exists(os.path.join(self._pretrained_model_path, 'params')):
            params_filename = 'params'

        if os.path.exists(
                os.path.join(self._pretrained_model_path, '__params__')):
            params_filename = '__params__'

        save_file = os.path.join(dirname, '{}.onnx'.format(self.name))

        program, inputs, outputs = paddle.fluid.io.load_inference_model(
            dirname=self._pretrained_model_path,
            model_filename=model_filename,
            params_filename=params_filename,
            executor=exe)

        paddle2onnx.program2onnx(program=program,
                                 scope=paddle.static.global_scope(),
                                 feed_var_names=inputs,
                                 target_vars=outputs,
                                 save_file=save_file,
                                 **kwargs)