示例#1
0
def main():
    import sys, os
    sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
    from torchocr.models import build_model
    from torchocr.utils.config_util import Config
    from torchocr.utils.checkpoints import load_checkpoint, save_checkpoint

    cfg_path = 'config/det/dbnet/61_hw_repb2_dbnet.py'
    model_path = 'work_dirs/61_hw_repb2_dbnet/det.pth'

    cfg = Config.fromfile(cfg_path)
    cfg.model.pretrained = None

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    # build model
    train_model = build_model(cfg.model)
    load_checkpoint(train_model, model_path, map_location=device)
    train_model = train_model.to(device)

    cfg.model.backbone.is_deploy = True
    deploy_model = build_model(cfg.model)
    deploy_model = deploy_model.to(device)

    deploy_weights = whole_model_convert(train_model, deploy_model)
    save_checkpoint(deploy_weights, filepath='db_repvgg.pth')
示例#2
0
    def __init__(self, cfg, args):
        self.postprocess = build_postprocess(cfg.postprocess)
        self.mode = args.mode
        if self.mode == 'torch':
            model_path = args.model_path
            # for rec cal head number
            if hasattr(self.postprocess, 'character'):
                char_num = len(getattr(self.postprocess, 'character'))
                cfg.model.head.n_class = char_num
            self.model = build_model(cfg.model)
            self.device = torch.device(
                'cuda:0' if torch.cuda.is_available() else 'cpu')
            load_checkpoint(self.model, model_path, map_location=self.device)
            self.model.to(self.device)
            self.model.eval()
        elif self.mode == 'onnx':
            onnx_path = args.onnx_path
            self.model = ONNXModel(onnx_path)
        elif self.mode == 'engine':
            engine_path = args.engine_path
            self.model = TRTModel(engine_path)

        # pre-heat
        self.resize = RecResizeImg(image_shape=[3, 32, 800],
                                   infer_mode=False,
                                   character_type='ch')
示例#3
0
def main():
    args = parse_args()
    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    # 通用配置
    global_config = cfg.options

    # build model
    model = build_model(cfg.model)
    device, gpu_ids = select_device(global_config.gpu_ids)
    load_checkpoint(model, args.model_path,map_location=device)
    model = model.to(device)

    model.device = device

    eval_dataset = build_dataset(cfg.test_data.dataset)
    eval_loader = build_dataloader(eval_dataset, loader_cfg=cfg.test_data.loader)
    # build postprocess
    postprocess = build_postprocess(cfg.postprocess)
    # build metric
    metric = build_metrics(cfg.metric)

    result_metirc = eval(model, eval_loader, postprocess, metric)
    print(result_metirc)
示例#4
0
    def __init__(self, cfg, model_path):
        self.model = build_model(cfg.model)
        load_checkpoint(self.model, model_path)
        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.model.to(self.device)
        self.model.eval()
        self.postprocess = build_postprocess(cfg.postprocess)

        self.resize = RecResizeImg(image_shape=[3, 32, 100],
                                   infer_mode=False,
                                   character_type='ch')
示例#5
0
    def __init__(self, cfg, model_path):

        self.model = build_model(cfg.model)
        load_checkpoint(self.model, model_path)
        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.model.to(self.device)
        self.model.eval()
        self.postprocess = build_postprocess(cfg.postprocess)
        self.normalize = NormalizeImage(mean=[0.485, 0.456, 0.406],
                                        std=[0.229, 0.224, 0.225])
        self.to_chw = ToCHWImage()
        self.resize = DetResizeForTest([736, 1280])
示例#6
0
 def init_weights(self, pretrained=None):
     if pretrained is None:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                 m.weight.data.normal_(0, math.sqrt(2. / n))
             elif isinstance(m, nn.BatchNorm2d):
                 m.weight.data.fill_(1)
                 m.bias.data.zero_()
     elif isinstance(pretrained, str):
         load_checkpoint(self, pretrained)
     else:
         raise TypeError('pretrained must be a str or None')
示例#7
0
    def resume(self,
               checkpoint,
               resume_optimizer=True,
               map_location='default'):
        if map_location == 'default':
            if torch.cuda.is_available():
                device_id = torch.cuda.current_device()
                checkpoint = load_checkpoint(
                    self.model,
                    checkpoint,
                    map_location=lambda storage, loc: storage.cuda(device_id))
            else:
                checkpoint = self.load_checkpoint(checkpoint)
        else:
            checkpoint = self.load_checkpoint(checkpoint,
                                              map_location=map_location)

        self._epoch = checkpoint['meta']['epoch']
        self._iter = checkpoint['meta']['iter']

        if 'optimizer' in checkpoint and resume_optimizer:
            # 考虑了多个不同的优化器
            if isinstance(self.optimizer, Optimizer):
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            elif isinstance(self.optimizer, dict):
                for k in self.optimizer.keys():
                    self.optimizer[k].load_state_dict(
                        checkpoint['optimizer'][k])
            else:
                raise TypeError(
                    'Optimizer should be dict or torch.optim.Optimizer but got {}'
                    .format(type(self.optimizer)))
        self.logger_info('resumed epoch {}, iter {}'.format(
            self.epoch, self.iter))
示例#8
0
文件: test.py 项目: hua1024/OpenOCR
def main():
    args = parse_args()
    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    # set pretrained model None
    cfg.model.pretrained = None

    # build postprocess
    postprocess = build_postprocess(cfg.postprocess)
    # for rec cal head number
    if hasattr(postprocess, 'character'):
        char_num = len(getattr(postprocess, 'character'))
        cfg.model.head.n_class = char_num

    eval_dataset = build_dataset(cfg.test_data.dataset)
    eval_loader = build_dataloader(eval_dataset,
                                   loader_cfg=cfg.test_data.loader)
    # build metric
    metric = build_metrics(cfg.metric)

    mode = args.mode
    if mode == 'torch':
        # build model
        model = build_model(cfg.model)
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        load_checkpoint(model, args.model_path, map_location=device)
        if args.simple:
            (filepath, filename) = os.path.split(args.model_path)
            simple_model_path = os.path.join(filepath,
                                             'sim_{}'.format(filename))
            save_checkpoint(model, simple_model_path)

        model = model.to(device)
        model.device = device
        result_metirc = eval(model, eval_loader, postprocess, metric)

    elif mode == 'engine':

        engine_path = args.engine_path
        model = TRTModel(engine_path)
        result_metirc = engine_eval(model, eval_loader, postprocess, metric)

    print(result_metirc)
示例#9
0
    def __init__(self, cfg, args):

        self.mode = args.mode
        self.postprocess = build_postprocess(cfg.postprocess)
        if self.mode == 'torch':
            model_path = args.model_path
            self.model = build_model(cfg.model)
            self.device = torch.device(
                'cuda:0' if torch.cuda.is_available() else 'cpu')
            load_checkpoint(self.model, model_path, map_location=self.device)
            self.model.to(self.device)
            self.model.eval()
        elif self.mode == 'onnx':
            onnx_path = args.onnx_path
            self.model = ONNXModel(onnx_path)
        elif self.mode == 'engine':
            engine_path = args.engine_path
            self.model = TRTModel(engine_path)

        self.normalize = NormalizeImage(mean=[0.485, 0.456, 0.406],
                                        std=[0.229, 0.224, 0.225])
        self.to_chw = ToCHWImage()
        self.resize = DetResizeForTest(short_size=736, mode='db')
示例#10
0
 def load_checkpoint(self, filename, map_location='cpu', strict=False):
     self.logger_info('load checkpoint from {}'.format(filename))
     return load_checkpoint(self.model, filename, map_location, strict,
                            self.logger)
示例#11
0
def main():
    args = parse_args()
    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)
    # set pretrained model None
    cfg.model.pretrained = None

    # build postprocess
    postprocess = build_postprocess(cfg.postprocess)
    # for rec cal head number
    if hasattr(postprocess, 'character'):
        char_num = len(getattr(postprocess, 'character'))
        cfg.model.head.n_class = char_num

    # use config build model
    model = build_model(cfg.model)

    # set weights to model and set model to device/eval()
    model_path = args.weights
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    load_checkpoint(model, model_path, map_location=device)
    model = model.to(device)
    model.eval()

    onnx_output = args.onnx_output
    input_shape = args.input_shape
    input_shape = eval(input_shape)
    mode = args.mode


    # transform torch model to onnx model
    input_names = ['input']
    output_names = ['output']

    # input shape
    input_data = torch.randn(input_shape).to(device)

    if args.is_dynamic:
        if mode == 'rec':
            # #rec
            dynamic_axes = {"input": {0: "batch_size"}, "output": {0: "batch_size"}}
        elif mode == 'det':
            ## det
            dynamic_axes = {"input": {0: "batch_size", 2: 'height', 3: 'width'},
                            "output": {0: "batch_size", 2: 'height', 3: 'width'}}

    else:
        dynamic_axes = None

    onnx_model_name = torch2onnx(
        model=model,
        dummy_input=input_data,
        onnx_model_name=onnx_output,
        input_names=input_names,
        output_names=output_names,
        opset_version=12,
        is_dynamic=args.is_dynamic,
        dynamic_axes=dynamic_axes
    )

    onnx_model = onnx.load(onnx_model_name)
    # check that the model converted fine
    onnx.checker.check_model(onnx_model)
    onnx.helper.printable_graph(onnx_model.graph)
    print("Model was successfully converted to ONNX format.")
    print("It was saved to", onnx_model_name)