Beispiel #1
0
def main():
    args = parse_args()
    out_name = args.out

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')

    runner = InferenceRunner(deploy_cfg, common_cfg)
    assert runner.use_gpu, 'Please use valid gpu to export model.'
    runner.load_checkpoint(args.checkpoint)

    image = cv2.imread(args.image)

    aug = runner.transform(image=image, label='')
    image, label = aug['image'], aug['label']
    image = image.unsqueeze(0).cuda()
    dummy_input = (image, runner.converter.test_encode(['']))
    model = runner.model.cuda().eval()
    need_text = runner.need_text
    if not need_text:
        dummy_input = dummy_input[0]

    if args.onnx:
        runner.logger.info('Convert to onnx model')
        torch2onnx(model, dummy_input, out_name)
    else:
        max_batch_size = args.max_batch_size
        max_workspace_size = args.max_workspace_size
        fp16_mode = args.fp16
        int8_mode = args.int8
        int8_calibrator = None
        if int8_mode:
            runner.logger.info('Convert to trt engine with int8')
            if args.calibration_images:
                runner.logger.info('Use calibration with mode {} and data {}'
                                   .format(args.calibration_mode,
                                           args.calibration_images))
                dataset = CalibDataset(args.calibration_images, runner.converter,
                                       runner.transform, need_text)
                int8_calibrator = CALIBRATORS[args.calibration_mode](
                    dataset=dataset)
            else:
                runner.logger.info('Use default calibration mode and data')
        elif fp16_mode:
            runner.logger.info('Convert to trt engine with fp16')
        else:
            runner.logger.info('Convert to trt engine with fp32')
        trt_model = torch2trt(
            model, dummy_input, max_batch_size=max_batch_size,
            max_workspace_size=max_workspace_size, fp16_mode=fp16_mode,
            int8_mode=int8_mode, int8_calibrator=int8_calibrator)
        save(trt_model, out_name)
    runner.logger.info(
        'Convert successfully, save model to {}'.format(out_name))
def get_model(file_config, weights):
    cfg = Config.fromfile(file_config)
    deploy_cfg = cfg['deploy']

    model = build_from_cfg(deploy_cfg['model'], MODELS)

    checkpoint = torch.load(weights, map_location='cpu')
    model.load_state_dict(checkpoint['state_dict'])

    return model
Beispiel #3
0
    def __init__(self, config, weights) -> None:
        super().__init__()
        self.config = config
        self.weights = weights
        cfg = Config.fromfile(self.config)

        deploy_cfg = cfg['deploy']
        common_cfg = cfg.get('common')
        cfg['batch_max_length'] = 40
        runner = InferenceRunner(deploy_cfg, common_cfg)
        runner.load_checkpoint(self.weights)
        self.runner = runner
Beispiel #4
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')
    if torch.cuda.is_available():
        device = torch.cuda.current_device()
        deploy_cfg['gpu_id'] = str(device)
    else:
        raise AssertionError('Please use gpu for benchmark.')

    runner = InferenceRunner(deploy_cfg, common_cfg)
    runner.load_checkpoint(args.checkpoint)

    C, H, W = [int(_.strip()) for _ in args.dummy_input_shape.split(',')]
    dummy_image = np.random.random_integers(0, 255, (H, W, C)).astype(np.uint8)

    aug = runner.transform(image=dummy_image, label='')
    image, label = aug['image'], aug['label']
    image = image.unsqueeze(0).cuda()
    dummy_input = (image, runner.converter.test_encode(['']))
    model = runner.model.cuda().eval()
    need_text = runner.need_text
    if not need_text:
        dummy_input = dummy_input[0]

    if args.dynamic_shape:
        print(f'Convert to Onnx with dynamic input shape and opset version'
              f'{args.opset_version}')
    else:
        print(f'Convert to Onnx with constant input shape'
              f' {args.dummy_input_shape} and opset version '
              f'{args.opset_version}')

    torch2onnx(
        model,
        dummy_input,
        args.out,
        verbose=args.verbose,
        dynamic_shape=args.dynamic_shape,
        opset_version=args.opset_version,
        do_constant_folding=args.do_constant_folding,
    )

    runner.logger.info(
        f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')
Beispiel #5
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    test_cfg = cfg['test']
    deploy_cfg = cfg['deploy']
    common_cfg = cfg['common']

    runner = TestRunner(test_cfg, deploy_cfg, common_cfg)
    assert runner.use_gpu, 'Please use gpu for benchmark.'
    runner.load_checkpoint(args.checkpoint)

    # image = Image.open(args.image)
    image = cv2.imread(args.image)
    aug= runner.transform(image=image,label='1')
    image, dummy_label = aug['image'], aug['label']
    image = image.unsqueeze(0)
    input_len = runner.converter.test_encode(1)[0]
    model = runner.model
    need_text = runner.need_text
    if need_text:
        shape = tuple(image.shape), tuple(input_len.shape)
    else:
        shape = tuple(image.shape)

    dtypes = args.dtypes
    iters = args.iters
    int8_calibrator = None
    if args.calibration_images:
        calib_dataset = CalibDataset(args.calibration_images, runner.converter,
                                     runner.transform, need_text)
        int8_calibrator = [CALIBRATORS[mode](dataset=calib_dataset)
                           for mode in args.calibration_modes]
    # assert isinstance(runner.test_dataloader['all'], tud.DataLoader), \
    #     "Only suppor single dataloader in training phase. " \
    #     "Check the type of dataset please. " \
    #     "If the type of dataset is list, then the type of build datalodaer will be dict." \
    #     "If the type of dataset is torch.utils.data.Dataset, " \
    #     "the type of build dataloader will be torch.utils.data.Dataloader. " \
    #     "If you wanna combine different dataset, consider using ConcatDataset in your config file please."

    # dataset = runner.test_dataloader['all'].dataset
    # dataset = MetricDataset(dataset, runner.converter, need_text)
    metric = Metric(runner.metric, runner.converter)
    benchmark(model, shape, dtypes=dtypes, iters=iters,
              int8_calibrator=int8_calibrator, metric=metric)
Beispiel #6
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    test_cfg = cfg['test']
    deploy_cfg = cfg['deploy']
    common_cfg = cfg['common']

    runner = TestRunner(test_cfg, deploy_cfg, common_cfg)
    assert runner.use_gpu, 'Please use gpu for benchmark.'
    runner.load_checkpoint(args.checkpoint)

    image = Image.open(args.image)
    image, dummy_label = runner.transform(image, '')
    image = image.unsqueeze(0)
    input_len = runner.converter.test_encode(1)[0]
    model = runner.model
    need_text = runner.need_text
    if need_text:
        shape = tuple(image.shape), tuple(input_len.shape)
    else:
        shape = tuple(image.shape)

    dtypes = args.dtypes
    iters = args.iters
    int8_calibrator = None
    if args.calibration_images:
        calib_dataset = CalibDataset(args.calibration_images, runner.converter,
                                     runner.transform, need_text)
        int8_calibrator = [
            CALIBRATORS[mode](dataset=calib_dataset)
            for mode in args.calibration_modes
        ]
    dataset = runner.test_dataloader.dataset
    dataset = MetricDataset(dataset, runner.converter, need_text)
    metric = Metric(runner.metric, runner.converter)
    benchmark(model,
              shape,
              dtypes=dtypes,
              iters=iters,
              int8_calibrator=int8_calibrator,
              dataset=dataset,
              metric=metric)
def load_model():
    cfg_path = os.path.join(os.path.dirname(__file__),
                            "vedastr/configs/small_satrn.py")
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')

    checkpoint = os.path.join(home, *["OCR", "vedastr", "small_satrn.pth"])
    if not os.path.exists(checkpoint):
        if not os.path.exists(os.path.dirname(checkpoint)):
            os.makedirs(os.path.dirname(checkpoint))
        download_drive_file(file_id="1bcKtEcYGIOehgPfGi_TqPkvrm6rjOUKR",
                            output=checkpoint)

    runner = InferenceRunner(deploy_cfg, common_cfg)
    runner.load_checkpoint(checkpoint)

    # print("model loaded..")
    return runner
Beispiel #8
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    _, fullname = os.path.split(cfg_path)
    fname, ext = os.path.splitext(fullname)

    root_workdir = cfg.pop('root_workdir')
    workdir = os.path.join(root_workdir, fname)
    os.makedirs(workdir, exist_ok=True)

    train_cfg = cfg['train']
    deploy_cfg = cfg['deploy']
    common_cfg = cfg['common']
    common_cfg['workdir'] = workdir

    runner = TrainRunner(train_cfg, deploy_cfg, common_cfg)
    runner()
Beispiel #9
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')

    runner = DeployRunner(deploy_cfg, common_cfg)
    runner.load_checkpoint(args.checkpoint)
    if os.path.isfile(args.image):
        images = [args.image]
    else:
        images = [
            os.path.join(args.image, name) for name in os.listdir(args.image)
        ]
    for img in images:
        image = Image.open(img)
        pred_str, probs = runner(image)
        runner.logger.info('predict string: {} \t of {}'.format(pred_str, img))
Beispiel #10
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    _, fullname = os.path.split(cfg_path)
    fname, ext = os.path.splitext(fullname)

    root_workdir = cfg.pop('root_workdir')
    workdir = os.path.join(root_workdir, fname)
    os.makedirs(workdir, exist_ok=True)

    test_cfg = cfg['test']
    deploy_cfg = cfg['deploy']
    common_cfg = cfg['common']
    common_cfg['workdir'] = workdir
    deploy_cfg['gpu_id'] = args.gpus.replace(" ", "")

    runner = TestRunner(test_cfg, deploy_cfg, common_cfg)
    runner.load_checkpoint(args.checkpoint)
    runner()
Beispiel #11
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')
    deploy_cfg['gpu_id'] = args.gpus.replace(" ", "")

    runner = InferenceRunner(deploy_cfg, common_cfg)
    runner.load_checkpoint(args.checkpoint)
    if os.path.isfile(args.image):
        images = [args.image]
    else:
        images = [
            os.path.join(args.image, name) for name in os.listdir(args.image)
        ]
    for img in images:
        image = cv2.imread(img)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        pred_str, probs = runner(image)
        runner.logger.info('predict string: {} \t of {}'.format(pred_str, img))
Beispiel #12
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    test_cfg = cfg['test']
    deploy_cfg = cfg['deploy']
    common_cfg = cfg['common']
    if torch.cuda.is_available():
        device = torch.cuda.current_device()
        deploy_cfg['gpu_id'] = str(device)
    else:
        raise AssertionError('Please use gpu for benchmark.')

    runner = TestRunner(test_cfg, deploy_cfg, common_cfg)
    runner.load_checkpoint(args.checkpoint)

    # image = Image.open(args.image)
    C, H, W = [int(_.strip()) for _ in args.dummy_input_shape.split(',')]
    dummy_image = np.random.random_integers(0, 255, (H, W, C)).astype(np.uint8)

    aug = runner.transform(image=dummy_image, label='')
    image, dummy_label = aug['image'], aug['label']
    image = image.unsqueeze(0)
    input_len = runner.converter.test_encode(1)[0]
    model = runner.model
    need_text = runner.need_text
    if need_text:
        shape = tuple(image.shape), tuple(input_len.shape)
    else:
        shape = tuple(image.shape)

    dtypes = args.dtypes
    iters = args.iters
    int8_calibrator = None
    if args.calibration_images:
        calib_dataset = CalibDataset(
            args.calibration_images,
            runner.converter,
            runner.transform,
            need_text
        )
        int8_calibrator = [
            CALIBRATORS[mode](dataset=calib_dataset)
            for mode in args.calibration_modes
        ]

    if isinstance(runner.test_dataloader, dict):
        target_key = list(runner.test_dataloader.keys())[0]
        runner.logger.info(
            f'There are multiple datasets in for testing, using {target_key}'
        )
        dataset = runner.test_dataloader[target_key].dataset
    else:
        dataset = runner.test_dataloader.dataset
    dataset = MetricDataset(dataset, runner.converter, need_text)
    metric = Metric(runner.metric, runner.converter)
    benchmark(
        model,
        shape,
        iters=iters,
        metric=metric,
        dtypes=dtypes,
        dataset=dataset,
        int8_calibrator=int8_calibrator,
    )