Example #1
0
def main():
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config['PostProcess'],
                                            config['Global'])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        config['Architecture']["Head"]['out_channels'] = char_num
    model = build_model(config['Architecture'])
    init_model(config, model, logger)
    model.eval()

    save_path = '{}/inference'.format(config['Global']['save_inference_dir'])

    if config['Architecture']['algorithm'] == "SRN":
        max_text_length = config['Architecture']['Head']['max_text_length']
        other_shape = [
            paddle.static.InputSpec(shape=[None, 1, 64, 256], dtype='float32'),
            [
                paddle.static.InputSpec(shape=[None, 256, 1], dtype="int64"),
                paddle.static.InputSpec(shape=[None, max_text_length, 1],
                                        dtype="int64"),
                paddle.static.InputSpec(
                    shape=[None, 8, max_text_length, max_text_length],
                    dtype="int64"),
                paddle.static.InputSpec(
                    shape=[None, 8, max_text_length, max_text_length],
                    dtype="int64")
            ]
        ]
        model = to_static(model, input_spec=other_shape)
    else:
        infer_shape = [3, -1, -1]
        if config['Architecture']['model_type'] == "rec":
            infer_shape = [3, 32, -1]  # for rec model, H must be 32
            if 'Transform' in config['Architecture'] and config[
                    'Architecture']['Transform'] is not None and config[
                        'Architecture']['Transform']['name'] == 'TPS':
                logger.info(
                    'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training'
                )
                infer_shape[-1] = 100
        model = to_static(model,
                          input_spec=[
                              paddle.static.InputSpec(shape=[None] +
                                                      infer_shape,
                                                      dtype='float32')
                          ])

    paddle.jit.save(model, save_path)
    logger.info('inference model is saved to {}'.format(save_path))
Example #2
0
def export_single_model(model, arch_config, save_path, logger):
    if arch_config["algorithm"] == "SRN":
        max_text_length = arch_config["Head"]["max_text_length"]
        other_shape = [
            paddle.static.InputSpec(
                shape=[None, 1, 64, 256], dtype="float32"), [
                    paddle.static.InputSpec(
                        shape=[None, 256, 1],
                        dtype="int64"), paddle.static.InputSpec(
                            shape=[None, max_text_length, 1], dtype="int64"),
                    paddle.static.InputSpec(
                        shape=[None, 8, max_text_length, max_text_length],
                        dtype="int64"), paddle.static.InputSpec(
                            shape=[None, 8, max_text_length, max_text_length],
                            dtype="int64")
                ]
        ]
        model = to_static(model, input_spec=other_shape)
    elif arch_config["algorithm"] == "SAR":
        other_shape = [
            paddle.static.InputSpec(
                shape=[None, 3, 48, 160], dtype="float32"),
        ]
        model = to_static(model, input_spec=other_shape)
    else:
        infer_shape = [3, -1, -1]
        if arch_config["model_type"] == "rec":
            infer_shape = [3, 32, -1]  # for rec model, H must be 32
            if "Transform" in arch_config and arch_config[
                    "Transform"] is not None and arch_config["Transform"][
                        "name"] == "TPS":
                logger.info(
                    "When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training"
                )
                infer_shape[-1] = 100
            if arch_config["algorithm"] == "NRTR":
                infer_shape = [1, 32, 100]
        elif arch_config["model_type"] == "table":
            infer_shape = [3, 488, 488]
        model = to_static(
            model,
            input_spec=[
                paddle.static.InputSpec(
                    shape=[None] + infer_shape, dtype="float32")
            ])

    paddle.jit.save(model, save_path)
    logger.info("inference model is saved to {}".format(save_path))
    return
Example #3
0
def main():
    args = parse_args()
    cfg, model_name = _trim(get_config(args.config, show=False), args)
    print(f"Building model({model_name})...")
    model = build_model(cfg)
    assert osp.isfile(
        args.pretrained_params
    ), f"pretrained params ({args.pretrained_params} is not a file path.)"

    if not os.path.isdir(args.output_path):
        os.makedirs(args.output_path)

    print(f"Loading params from ({args.pretrained_params})...")
    params = paddle.load(args.pretrained_params)
    model.set_dict(params)
    model.eval()

    model = to_static(model,
                      input_spec=[
                          paddle.static.InputSpec(shape=[
                              None, args.num_seg, 3, args.img_size,
                              args.img_size
                          ],
                                                  dtype='float32'),
                      ])
    paddle.jit.save(model, osp.join(args.output_path, model_name))
    print(
        f"model ({model_name}) has been already saved in ({args.output_path}).")
Example #4
0
def main():
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config['PostProcess'],
                                            config['Global'])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        config['Architecture']["Head"]['out_channels'] = char_num
    model = build_model(config['Architecture'])
    init_model(config, model, logger)
    model.eval()

    save_path = '{}/inference'.format(config['Global']['save_inference_dir'])

    infer_shape = [3, int(FLAGS.height), int(FLAGS.width)] 
    if config['Architecture']['model_type'] == "rec":
         infer_shape = [3, 32, -1]

    model = to_static(
        model,
        input_spec=[
            paddle.static.InputSpec(
                shape=[None] + infer_shape, dtype='float32')
        ])
    paddle.jit.save(model, save_path)
    logger.info('inference model is saved to {}'.format(save_path))
Example #5
0
def dygraph_to_static(model, save_dir, cfg):
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    image_shape = None
    if 'inputs_def' in cfg['TestReader']:
        inputs_def = cfg['TestReader']['inputs_def']
        image_shape = inputs_def.get('image_shape', None)
    if image_shape is None:
        image_shape = [3, None, None]
    # Save infer cfg
    dump_infer_config(cfg, os.path.join(save_dir, 'infer_cfg.yml'),
                      image_shape, model)

    input_spec = [{
        "image":
        InputSpec(shape=[None] + image_shape, name='image'),
        "im_shape":
        InputSpec(shape=[None, 2], name='im_shape'),
        "scale_factor":
        InputSpec(shape=[None, 2], name='scale_factor')
    }]

    export_model = to_static(model, input_spec=input_spec)
    # save Model
    paddle.jit.save(export_model, os.path.join(save_dir, 'model'))
Example #6
0
def apply_to_static(config, model):
    support_to_static = config['Global'].get('to_static', False)

    if support_to_static:
        specs = None
        if 'image_shape' in config['Global']:
            specs = [InputSpec([None] + config['Global']['image_shape'])]
        model = to_static(model, input_spec=specs)
        logger.info("Successfully to apply @to_static with specs: {}".format(
            specs))
    return model
def main():
    args = parse_args()

    net = eval("backbones.{}".format(args.network))()
    load_dygraph_pretrain(net, path=args.pretrained_model)
    net.eval()

    net = to_static(net,
                    input_spec=[
                        paddle.static.InputSpec(shape=[None, 3, 112, 112],
                                                dtype='float32')
                    ])
    paddle.jit.save(net, os.path.join(args.output_path, "inference"))
Example #8
0
def main():
    args = parse_args()

    net = architectures.__dict__[args.model]
    model = Net(net, args.class_dim, args.model)
    load_dygraph_pretrain(model.pre_net,
                          path=args.pretrained_model,
                          load_static_weights=args.load_static_weights)
    model.eval()

    model = to_static(model,
                      input_spec=[
                          paddle.static.InputSpec(
                              shape=[None, 3, args.img_size, args.img_size],
                              dtype='float32')
                      ])
    paddle.jit.save(model, os.path.join(args.output_path, "inference"))
Example #9
0
def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"):
    assert mode in ALL_MODES
    from paddle.jit import to_static
    from paddle.nn import Layer
    new_function = to_static(function)
    if isinstance(function, Layer):
        mock_func = new_function.forward
    else:
        mock_func = new_function
    mock_func._cuda_graph_capture_mode = mode
    if memory_pool == "default":
        mock_func._cuda_graph_pool_id = 0
    elif memory_pool == "new":
        mock_func._cuda_graph_pool_id = CoreCUDAGraph.gen_new_memory_pool_id()
    else:
        if isinstance(memory_pool, Layer):
            mock_func._cuda_graph_pool_id = memory_pool.forward._cuda_graph_pool_id
        else:
            mock_func._cuda_graph_pool_id = memory_pool._cuda_graph_pool_id
    return new_function
Example #10
0
def main():
    args = parse_args()
    cfg, model_name = trim_config(get_config(args.config, show=False))
    print(f"Building model({model_name})...")
    model = build_model(cfg.MODEL)
    assert osp.isfile(
        args.pretrained_params
    ), f"pretrained params ({args.pretrained_params} is not a file path.)"

    if not os.path.isdir(args.output_path):
        os.makedirs(args.output_path)

    print(f"Loading params from ({args.pretrained_params})...")
    params = paddle.load(args.pretrained_params)
    model.set_dict(params)

    model.eval()

    input_spec = get_input_spec(cfg.INFERENCE, model_name)
    model = to_static(model, input_spec=input_spec)
    paddle.jit.save(model, osp.join(args.output_path, model_name))
    print(
        f"model ({model_name}) has been already saved in ({args.output_path}).")
Example #11
0
def export(args):

    paddle.set_device('cpu')
    test_reader = None
    if args.data == "cifar10":
        class_dim = 10
        image_shape = [3, 224, 224]
    elif args.data == "imagenet":
        class_dim = 1000
        image_shape = [3, 224, 224]
    else:
        raise ValueError("{} is not supported.".format(args.data))
    assert args.model in model_list, "{} is not in lists: {}".format(args.model,
                                                                     model_list)
    # model definition
    net = models.__dict__[args.model](pretrained=False, num_classes=class_dim)

    pruner = paddleslim.dygraph.L1NormFilterPruner(net, [1] + image_shape)
    params = get_pruned_params(args, net)
    ratios = {}
    for param in params:
        ratios[param] = args.pruned_ratio
    print(f"ratios: {ratios}")
    pruner.prune_vars(ratios, [0])

    param_state_dict = paddle.load(args.checkpoint + ".pdparams")
    net.set_dict(param_state_dict)

    net.eval()
    model = to_static(
        net,
        input_spec=[
            paddle.static.InputSpec(
                shape=[None] + image_shape, dtype='float32', name="image")
        ])
    paddle.jit.save(net, args.output_path)
Example #12
0
def main(config, device, logger, vdl_writer):

    global_config = config['Global']

    # build dataloader
    valid_dataloader = build_dataloader(config, 'Eval', device, logger)

    # build post process
    post_process_class = build_post_process(config['PostProcess'],
                                            global_config)

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        config['Architecture']["Head"]['out_channels'] = char_num
    model = build_model(config['Architecture'])

    flops = paddle.flops(model, [1, 3, 640, 640])
    logger.info(f"FLOPs before pruning: {flops}")

    from paddleslim.dygraph import FPGMFilterPruner
    model.train()
    pruner = FPGMFilterPruner(model, [1, 3, 640, 640])

    # build metric
    eval_class = build_metric(config['Metric'])

    def eval_fn():
        metric = program.eval(model, valid_dataloader, post_process_class,
                              eval_class)
        logger.info(f"metric['hmean']: {metric['hmean']}")
        return metric['hmean']

    params_sensitive = pruner.sensitive(eval_func=eval_fn,
                                        sen_file="./sen.pickle",
                                        skip_vars=[
                                            "conv2d_57.w_0",
                                            "conv2d_transpose_2.w_0",
                                            "conv2d_transpose_3.w_0"
                                        ])

    logger.info(
        "The sensitivity analysis results of model parameters saved in sen.pickle"
    )
    # calculate pruned params's ratio
    params_sensitive = pruner._get_ratios_by_loss(params_sensitive, loss=0.02)
    for key in params_sensitive.keys():
        logger.info(f"{key}, {params_sensitive[key]}")

    plan = pruner.prune_vars(params_sensitive, [0])

    flops = paddle.flops(model, [1, 3, 640, 640])
    logger.info(f"FLOPs after pruning: {flops}")

    # load pretrain model
    pre_best_model_dict = init_model(config, model, logger, None)
    metric = program.eval(model, valid_dataloader, post_process_class,
                          eval_class)
    logger.info(f"metric['hmean']: {metric['hmean']}")

    # start export model
    from paddle.jit import to_static

    infer_shape = [3, -1, -1]
    if config['Architecture']['model_type'] == "rec":
        infer_shape = [3, 32, -1]  # for rec model, H must be 32

        if 'Transform' in config['Architecture'] and config['Architecture'][
                'Transform'] is not None and config['Architecture'][
                    'Transform']['name'] == 'TPS':
            logger.info(
                'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training'
            )
            infer_shape[-1] = 100
    model = to_static(model,
                      input_spec=[
                          paddle.static.InputSpec(shape=[None] + infer_shape,
                                                  dtype='float32')
                      ])

    save_path = '{}/inference'.format(config['Global']['save_inference_dir'])
    paddle.jit.save(model, save_path)
    logger.info('inference model is saved to {}'.format(save_path))
Example #13
0
def apply_to_static(config, model):
    support_to_static = config.get('to_static', False)
    if support_to_static:
        specs = create_input_specs()
        model = to_static(model, input_spec=specs)
    return model
 def test_raise_error(self):
     with self.assertRaises(Exception):
         to_static(main_func)()
Example #15
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
from paddle.nn import Layer
from paddle.static import InputSpec
from paddle.jit import to_static
import sys


class AbsNet(paddle.nn.Layer):
    def __init__(self):
        super(AbsNet, self).__init__()

    def forward(self, x):
        x = paddle.abs(x)
        return x


if __name__ == '__main__':
    # build network
    model = AbsNet()
    # save inferencing format model
    net = to_static(model,
                    input_spec=[InputSpec(shape=[None, 1, 28, 28], name='x')])
    paddle.jit.save(net, sys.argv[1])
Example #16
0
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# url: https://aistudio.baidu.com/aistudio/projectdetail/3756986?forkThirdPart=1
from net import EfficientNet
from paddle.jit import to_static
from paddle.static import InputSpec
import paddle
import sys

model = EfficientNet.from_name('efficientnet-b4')
net = to_static(
    model, input_spec=[InputSpec(
        shape=[None, 3, 256, 256], name='x')])
paddle.jit.save(net, sys.argv[1])