Exemplo n.º 1
0
 def build_from_dygraph(layer, input_spec=None, output_spec=None):
     from paddle.nn import Layer
     from paddle.fluid import core
     from paddle.fluid.framework import Variable
     from paddle2onnx.graph import dygraph_helper as dg_helper
     if isinstance(layer, dygraph.TranslatedLayer):
         program = layer.program()
         parameters_dict = {}
         pruned_vars = program.global_block().vars
         for param in layer.parameters():
             if param.name.endswith('feed') or param.name.endswith('fetch'):
                 continue
             if not param.persistable:
                 continue
             if param.name in pruned_vars:
                 parameters_dict[param.name] = {
                     'data': np.array(param.value().get_tensor()),
                     'dtype': param.dtype,
                     'shape': param.shape
                 }
         if input_spec is not None:
             logging.warning(
                 "Although input_spec is specified, TranslatedLayer is not support prune. An Complete network will be exported."
             )
             input_spec = layer._input_spec()
         if output_spec is not None:
             logging.warning(
                 "Although output_spec is specified, TranslatedLayer is not support prune. An Complete network will be exported."
             )
         feed_var_names = [ipt.name for ipt in layer._input_spec()]
         fetch_vars = [
             program.global_block().var(opt.name)
             for opt in layer._output_spec()
         ]
         graph = PaddleGraph(program, parameters_dict, feed_var_names,
                             fetch_vars)
         return graph
     elif isinstance(layer, Layer):
         program, feed_var_names, fetch_vars = dg_helper.get_program(
             layer, input_spec, output_spec)
         parameters_dict = {}
         pruned_vars = program.global_block().vars
         for param in layer.parameters():
             if param.name.endswith('feed') or param.name.endswith('fetch'):
                 continue
             if not param.persistable:
                 continue
             if param.name in pruned_vars:
                 parameters_dict[param.name] = {
                     'data': np.array(param.value().get_tensor()),
                     'dtype': param.dtype,
                     'shape': param.shape
                 }
         graph = PaddleGraph(program, parameters_dict, feed_var_names,
                             fetch_vars)
         return graph
     else:
         raise TypeError(
             "The input Layer should be 'Layer' or 'TranslatedLayer', but received  type is %s."
             % type(layer))
Exemplo n.º 2
0
 def opset_7(cls, graph, node, **kw):
     equal_val = None
     if node.input_dtype('X', 0) in [paddle.float32, paddle.float64]:
         warning_info = "Operator 'not_equal' only support input with dtype of int/bool, now the dtype of input is {}, this may cause wrong results, it is more recommend converting this model with opset version >= 11.".format(
             node.input_dtype('X', 0))
         logging.warning(warning_info)
         x_node = graph.make_node('Cast',
                                  inputs=node.input('X'),
                                  to=dtypes.ONNX.INT32)
         y_node = graph.make_node('Cast',
                                  inputs=node.input('Y'),
                                  to=dtypes.ONNX.INT32)
         equal_val = graph.make_node('Equal',
                                     inputs=[x_node, y_node],
                                     outputs=node.output('Out'))
     else:
         equal_val = graph.make_node(
             'Equal',
             inputs=[node.input('X', 0),
                     node.input('Y', 0)],
             outputs=node.output('Out'))
     k_node = graph.make_node('Cast',
                              inputs=[equal_val],
                              to=dtypes.ONNX.INT64)
     const = graph.make_node('Constant', dtype=dtypes.ONNX.INT64, value=1)
     sub_ = graph.make_node('Sub', inputs=[const, k_node])
     graph.make_node('Cast',
                     inputs=[sub_],
                     outputs=node.output('Out'),
                     to=dtypes.ONNX.BOOL)
Exemplo n.º 3
0
    def run_pass(cls, onnx_graph):
        renamer = {}
        tensor_names = set()
        for name, node in onnx_graph.parameters.items():
            output = node.output
            for opt in output:
                assert opt not in tensor_names, "There's dumplicate names in parameters."
                tensor_names.add(opt)

        for ipt in onnx_graph.input_nodes:
            assert ipt.name not in tensor_names, "There's dumplicate names in exported parameters and inputs."
            tensor_names.add(ipt.name)

        for name, node in onnx_graph.node_map.items():
            inputs = node.inputs
            outputs = node.outputs
            update_node = False
            for idx in range(len(inputs)):
                ipt = inputs[idx]
                if ipt not in renamer:
                    continue
                updated_name = renamer[ipt]
                while updated_name in renamer:
                    updated_name = renamer[updated_name]
                inputs[idx] = updated_name
                update_node = True

            for idx in range(len(outputs)):
                opt = outputs[idx]
                if opt not in tensor_names:
                    tensor_names.add(opt)
                    continue
                renamed_tensor_name = opt
                while renamed_tensor_name in renamer:
                    renamed_tensor_name = renamer[renamed_tensor_name]
                new_name = cls.generate_new_name(renamed_tensor_name)
                logging.warning("[Renamer Pass] Will rename {}, to {}".format(
                    renamed_tensor_name, new_name))
                outputs[idx] = new_name
                update_node = True
                renamer[renamed_tensor_name] = new_name

            if update_node:
                node.set_inputs(inputs)
                node.set_outputs(outputs)
                onnx_graph.update_node(node)

        for opt in onnx_graph.output_nodes:
            if opt.name not in renamer:
                continue
            updated_name = renamer[opt.name]
            while updated_name in renamer:
                updated_name = renamer[updated_name]
            opt.name = updated_name

        return onnx_graph
Exemplo n.º 4
0
    def opset_10(cls, graph, node, **kw):
        if node.input_shape("BBoxes", 0)[0] != 1:
            logging.warning(
                "Due to the operator:{}, the converted ONNX model will only supports input[batch_size] == 1."
                .format(node.type))
        scores = node.input('Scores', 0)
        bboxes = node.input('BBoxes', 0)
        num_class = node.input_shape('Scores', 0)[1]
        if len(node.input_shape('Scores', 0)) == 2:
            # inputs: scores & bboxes is lod tensor
            scores = graph.make_node('Transpose', inputs=[scores], perm=[1, 0])
            scores = mapper_helper.unsqueeze_helper(graph, scores, [0])

            scores_list = mapper_helper.split_helper(graph,
                                                     scores,
                                                     axis=1,
                                                     split=[1] * num_class,
                                                     outputs=num_class)

            bboxes = graph.make_node('Transpose',
                                     inputs=bboxes,
                                     perm=[1, 0, 2])
            bboxes_list = mapper_helper.split_helper(graph,
                                                     bboxes,
                                                     axis=0,
                                                     split=[1] * num_class,
                                                     outputs=num_class)

            bbox_ids = []
            for i in range(num_class):
                bbox_id = cls.nms(graph,
                                  node,
                                  scores_list[i],
                                  bboxes_list[i],
                                  class_id=i)
                bbox_ids.append(bbox_id)
            bbox_ids = graph.make_node('Concat', inputs=bbox_ids, axis=0)
            const_shape = graph.make_node('Constant',
                                          dtype=dtypes.ONNX.INT64,
                                          value=[1, -1, 4])
            bboxes = graph.make_node('Reshape', inputs=[bboxes, const_shape])
            cls.keep_top_k(graph,
                           node,
                           bbox_ids,
                           scores,
                           bboxes,
                           is_lod_input=True)
        else:
            bbox_ids = cls.nms(graph, node, scores, bboxes)
            cls.keep_top_k(graph, node, bbox_ids, scores, bboxes)
Exemplo n.º 5
0
def get_all_registered_ops(save_file=None):
    ops = list(OpMapper.OPSETS.keys())
    logging.warning("The number of all registered OPs is: {}".format(len(ops)))
    if save_file is None:
        return
    with open(save_file, "w") as f:
        logging.warning(
            "All registered OPs will be written to the file: {}".format(
                save_file))
        f.write("Total OPs num: {} \n".format(len(ops)))
        for index in range(len(ops)):
            op = ops[index]
            f.write(str(index + 1) + ". " + op + "\n")
        return
Exemplo n.º 6
0
    def check_support_status(node_map, opset_version, for_check=False):
        op_mapping_status = {
            OP_MAPPING_NO_REGISTER: [],
            OP_MAPPING_NO_VERSION: [],
        }
        for name, node in list(node_map.items()):
            if node.type in OpMapper.REGISTER_CUSTOM_PADDLE_OP:
                continue
            if node.type not in OpMapper.OPSETS:
                op_mapping_status[OP_MAPPING_NO_REGISTER].append(node)
            else:
                opsets = OpMapper.OPSETS[node.type]
                versions = list(opsets.keys())
                convert_version = get_max_support_version(
                    versions, opset_version)
                if convert_version == -1:
                    op_mapping_status[OP_MAPPING_NO_VERSION].append(node)

        if len(op_mapping_status[OP_MAPPING_NO_REGISTER]) > 0:
            unsupported_op_types = set([
                node.type for node in op_mapping_status[OP_MAPPING_NO_REGISTER]
            ])
            error_info = "\nThere's {} ops are not supported yet\n".format(
                len(unsupported_op_types))
            for op_type in unsupported_op_types:
                error_info += "=========== {} ===========\n".format(op_type)
            raise NotImplementedError(error_info)

        if len(op_mapping_status[OP_MAPPING_NO_VERSION]) > 0:
            unsupported_op_types = set([
                node.type for node in op_mapping_status[OP_MAPPING_NO_VERSION]
            ])

            recommend_opset_version = -1
            for op_type in unsupported_op_types:
                opsets = OpMapper.OPSETS[op_type]
                if min(opsets.keys()) > recommend_opset_version:
                    recommend_opset_version = min(opsets.keys())
            warning_info = "\nThere are {} ops that are not supported in opset version {}, please set opset version >= {}.\n".format(
                len(unsupported_op_types), opset_version,
                recommend_opset_version)

            for op_type in unsupported_op_types:
                warning_info += "=========== {} ===========\n".format(op_type)
            if for_check:
                logging.warning(warning_info)
                return recommend_opset_version
            raise NotImplementedError(warning_info)
        return opset_version
Exemplo n.º 7
0
 def get_recommend_opset_version(node_map, opset_version):
     recommend_opset_version = OpMapper.check_support_status(
         node_map, opset_version, True)
     for name, node in list(node_map.items()):
         if node.type in OpMapper.REGISTER_CUSTOM_PADDLE_OP:  #如果是custom的op,获取custom的推荐op
             custom_paddle_op = OpMapper.REGISTER_CUSTOM_PADDLE_OP[
                 node.type](node)
             custom_paddle_graph = custom_paddle_op.get_paddle_graph()
             custom_recommend_opset_version = OpMapper.check_support_status(
                 custom_paddle_graph.node_map, opset_version, True)
             recommend_opset_version = max(recommend_opset_version,
                                           custom_recommend_opset_version)
     if opset_version != recommend_opset_version:
         warning_info = "\n======================\n"
         warning_info += "\nFor a successful conversion, set the recommended opset version : {}\n".format(
             recommend_opset_version)
         warning_info += "\n======================\n"
         logging.warning(warning_info)
     return recommend_opset_version
Exemplo n.º 8
0
 def opset_7(cls, graph, node, **kw):
     if node.input_dtype('X', 0) in [paddle.float32, paddle.float64]:
         warning_info = "Operator 'Equal' only support input with dtype of int/bool, now the dtype of input is {}, this may cause wrong results, it is more recommend converting this model with opset version >= 11.".format(
             node.input_dtype('X', 0))
         logging.warning(warning_info)
         x_node = graph.make_node('Cast',
                                  inputs=node.input('X'),
                                  to=dtypes.ONNX.INT32)
         y_node = graph.make_node('Cast',
                                  inputs=node.input('Y'),
                                  to=dtypes.ONNX.INT32)
         onnx_node = graph.make_node('Equal',
                                     inputs=[x_node, y_node],
                                     outputs=node.output('Out'))
     else:
         onnx_node = graph.make_node(
             'Equal',
             inputs=[node.input('X', 0),
                     node.input('Y', 0)],
             outputs=node.output('Out'))
Exemplo n.º 9
0
def program2onnx(model_dir,
                 save_file,
                 model_filename=None,
                 params_filename=None,
                 opset_version=9,
                 enable_onnx_checker=False):
    try:
        import paddle
    except:
        logging.error(
            "paddlepaddle not installed, use \"pip install paddlepaddle\"")

    v0, v1, v2 = paddle.__version__.split('.')
    if v0 == '0' and v1 == '0' and v2 == '0':
        logging.warning("You are use develop version of paddlepaddle")
    elif int(v0) <= 1 and int(v1) < 8:
        raise ImportError("paddlepaddle>=1.8.0 is required")

    import paddle2onnx as p2o
    # convert model save with 'paddle.fluid.io.save_inference_model'
    if hasattr(paddle, 'enable_static'):
        paddle.enable_static()
    exe = fluid.Executor(fluid.CPUPlace())
    if model_filename is None and params_filename is None:
        [program, feed_var_names, fetch_vars] = fluid.io.load_inference_model(
            model_dir, exe)
    else:
        [program, feed_var_names, fetch_vars] = fluid.io.load_inference_model(
            model_dir,
            exe,
            model_filename=model_filename,
            params_filename=params_filename)
    p2o.program2onnx(
        program,
        fluid.global_scope(),
        save_file,
        feed_var_names=feed_var_names,
        target_vars=fetch_vars,
        opset_version=opset_version,
        enable_onnx_checker=enable_onnx_checker)
Exemplo n.º 10
0
    def nms(cls, graph, node, scores, bboxes, class_id=None):
        normalized = node.attr('normalized')
        nms_top_k = node.attr('nms_top_k')
        if node.type == 'matrix_nms':
            iou_threshold = 0.5
            logging.warning(
                "Operator:{} is not supported completely, so we use traditional"
                " NMS (iou_theshold={}) to instead it, which introduce some difference."
                .format(node.type, str(iou_threshold)))
        else:
            iou_threshold = node.attr('nms_threshold')
        if nms_top_k == -1:
            nms_top_k = 100000

        #convert the paddle attribute to onnx tensor
        score_threshold = graph.make_node(
            'Constant',
            dtype=dtypes.ONNX.FLOAT,
            value=[float(node.attr('score_threshold'))])
        iou_threshold = graph.make_node('Constant',
                                        dtype=dtypes.ONNX.FLOAT,
                                        value=[float(iou_threshold)])
        nms_top_k = graph.make_node('Constant',
                                    dtype=dtypes.ONNX.INT64,
                                    value=[np.int64(nms_top_k)])

        # the paddle data format is x1,y1,x2,y2
        kwargs = {'center_point_box': 0}

        if normalized:
            select_bbox_indices = graph.make_node('NonMaxSuppression',
                                                  inputs=[
                                                      bboxes, scores,
                                                      nms_top_k, iou_threshold,
                                                      score_threshold
                                                  ])
        elif not normalized:
            value_one = graph.make_node('Constant',
                                        dims=[1],
                                        dtype=dtypes.ONNX.FLOAT,
                                        value=1.0)
            new_bboxes = graph.make_node('Split',
                                         inputs=[bboxes],
                                         outputs=4,
                                         axis=2,
                                         split=[1, 1, 1, 1])
            new_xmax = graph.make_node('Add',
                                       inputs=[new_bboxes[2], value_one])
            new_ymax = graph.make_node('Add',
                                       inputs=[new_bboxes[3], value_one])
            new_bboxes = graph.make_node(
                'Concat',
                inputs=[new_bboxes[0], new_bboxes[1], new_xmax, new_ymax],
                axis=2)
            select_bbox_indices = graph.make_node('NonMaxSuppression',
                                                  inputs=[
                                                      new_bboxes, scores,
                                                      nms_top_k, iou_threshold,
                                                      score_threshold
                                                  ])

        if class_id is not None and class_id != 0:
            class_id = graph.make_node('Constant',
                                       dtype=dtypes.ONNX.INT64,
                                       value=[0, class_id, 0])
            class_id = graph.make_node('Unsqueeze',
                                       inputs=[class_id],
                                       axes=[0])
            select_bbox_indices = graph.make_node(
                'Add', inputs=[select_bbox_indices, class_id])

        return select_bbox_indices
Exemplo n.º 11
0
def program2onnx(model_dir,
                 save_file,
                 model_filename=None,
                 params_filename=None,
                 opset_version=9,
                 enable_onnx_checker=False,
                 operator_export_type="ONNX",
                 input_shape_dict=None,
                 output_names=None,
                 auto_update_opset=True):
    try:
        import paddle
    except:
        logging.error(
            "paddlepaddle not installed, use \"pip install paddlepaddle\"")

    v0, v1, v2 = paddle.__version__.split('.')
    if v0 == '0' and v1 == '0' and v2 == '0':
        logging.warning("You are use develop version of paddlepaddle")
    elif int(v0) <= 1 and int(v1) < 8:
        raise ImportError("paddlepaddle>=1.8.0 is required")

    import paddle2onnx as p2o
    # convert model save with 'paddle.fluid.io.save_inference_model'
    if hasattr(paddle, 'enable_static'):
        paddle.enable_static()
    exe = fluid.Executor(fluid.CPUPlace())
    if model_filename is None and params_filename is None:
        [program, feed_var_names,
         fetch_vars] = fluid.io.load_inference_model(model_dir, exe)
    else:
        [program, feed_var_names, fetch_vars
         ] = fluid.io.load_inference_model(model_dir,
                                           exe,
                                           model_filename=model_filename,
                                           params_filename=params_filename)

    OP_WITHOUT_KERNEL_SET = {
        'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
        'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
        'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
        'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id',
        'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream',
        'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv',
        'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl',
        'copy_cross_scope'
    }
    if input_shape_dict is not None:
        import paddle2onnx
        paddle2onnx.process_old_ops_desc(program)
        paddle_version = paddle.__version__
        model_version = program.desc._version()
        major_ver = model_version // 1000000
        minor_ver = (model_version - major_ver * 1000000) // 1000
        patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
        model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
        if model_version != paddle_version:
            logging.warning(
                "The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model"
                .format(model_version, paddle_version))

        for k, v in input_shape_dict.items():
            program.blocks[0].var(k).desc.set_shape(v)
        for i in range(len(program.blocks[0].ops)):
            if program.blocks[0].ops[i].type in OP_WITHOUT_KERNEL_SET:
                continue
            program.blocks[0].ops[i].desc.infer_shape(program.blocks[0].desc)
    p2o.program2onnx(program,
                     fluid.global_scope(),
                     save_file,
                     feed_var_names=feed_var_names,
                     target_vars=fetch_vars,
                     opset_version=opset_version,
                     enable_onnx_checker=enable_onnx_checker,
                     operator_export_type=operator_export_type,
                     auto_update_opset=auto_update_opset,
                     output_names=output_names)
Exemplo n.º 12
0
def run_convert(model, input_shape_dict=None, scope=None, opset_version=9):
    paddle_version = paddle.__version__
    if isinstance(model, paddle.static.Program):
        process_old_ops_desc(model)
        if input_shape_dict is not None:
            model_version = model.desc._version()
            major_ver = model_version // 1000000
            minor_ver = (model_version - major_ver * 1000000) // 1000
            patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
            model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
            if model_version != paddle_version:
                logging.warning(
                    "The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model"
                    .format(model_version, paddle_version))
            for k, v in input_shape_dict.items():
                model.blocks[0].var(k).desc.set_shape(v)
            for i in range(len(model.blocks[0].ops)):
                if model.blocks[0].ops[i].type in OP_WITHOUT_KERNEL_SET:
                    continue
                model.blocks[0].ops[i].desc.infer_shape(model.blocks[0].desc)
        if scope is None:
            scope = paddle.static.global_scope()
        input_names = list()
        output_vars = list()
        for i in range(len(model.blocks[0].ops)):
            if model.blocks[0].ops[i].type == "feed":
                input_names.append(model.blocks[0].ops[i].output("Out")[0])
            if model.blocks[0].ops[i].type == "fetch":
                output_vars.append(model.blocks[0].var(
                    model.blocks[0].ops[i].input("X")[0]))
        return program2onnx(model,
                            scope,
                            save_file=None,
                            feed_var_names=input_names,
                            target_vars=output_vars,
                            opset_version=opset_version,
                            enable_onnx_checker=True)
    elif isinstance(model, paddle.jit.TranslatedLayer):
        process_old_ops_desc(model.program())
        model_version = model.program().desc._version()
        major_ver = model_version // 1000000
        minor_ver = (model_version - major_ver * 1000000) // 1000
        patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
        model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
        if model_version != paddle_version:
            logging.warning(
                "The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model"
                .format(model_version, paddle_version))

        if input_shape_dict is not None:
            for k, v in input_shape_dict.items():
                model.program().blocks[0].var(k).desc.set_shape(v)
            for i in range(len(model.program().blocks[0].ops)):
                if model.program(
                ).blocks[0].ops[i].type in OP_WITHOUT_KERNEL_SET:
                    continue
                model.program().blocks[0].ops[i].desc.infer_shape(
                    model.program().blocks[0].desc)
        return dygraph2onnx(model, save_file=None, opset_version=opset_version)
    else:
        raise Exception(
            "Only support model loaded from paddle.static.load_inference_model() or paddle.jit.load()"
        )