예제 #1
0
def main():
    args = parse_args()

    # Following strings of text style are from colorama package
    bright_style, reset_style = '\x1b[1m', '\x1b[0m'
    red_text, blue_text = '\x1b[31m', '\x1b[34m'
    white_background = '\x1b[107m'

    msg = white_background + bright_style + red_text
    msg += 'DeprecationWarning: This tool will be deprecated in future. '
    msg += blue_text + 'Welcome to use the unified model deployment toolbox '
    msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
    msg += reset_style
    warnings.warn(msg)

    if args.device == 'cpu':
        args.device = None

    cfg = Config.fromfile(args.model_config)

    # build the model
    if args.model_type == 'det':
        if args.backend == 'TensorRT':
            model = TensorRTDetector(args.model_file, cfg, 0)
        else:
            model = ONNXRuntimeDetector(args.model_file, cfg, 0)
    else:
        if args.backend == 'TensorRT':
            model = TensorRTRecognizer(args.model_file, cfg, 0)
        else:
            model = ONNXRuntimeRecognizer(args.model_file, cfg, 0)

    # build the dataloader
    samples_per_gpu = 1
    cfg = disable_text_recog_aug_test(cfg)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=samples_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=False,
                                   shuffle=False)

    model = MMDataParallel(model, device_ids=[0])
    outputs = single_gpu_test(model, data_loader)

    rank, _ = get_dist_info()
    if rank == 0:
        kwargs = {}
        if args.eval:
            eval_kwargs = cfg.get('evaluation', {}).copy()
            # hard-code way to remove EvalHook args
            for key in [
                    'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
                    'rule'
            ]:
                eval_kwargs.pop(key, None)
            eval_kwargs.update(dict(metric=args.eval, **kwargs))
            print(dataset.evaluate(outputs, **eval_kwargs))
예제 #2
0
파일: deploy_test.py 프로젝트: xyzhu8/mmocr
def main():
    args = parse_args()
    if args.device == 'cpu':
        args.device = None

    cfg = Config.fromfile(args.model_config)

    # build the model
    if args.model_type == 'det':
        if args.backend == 'TensorRT':
            model = TensorRTDetector(args.model_file, cfg, 0)
        else:
            model = ONNXRuntimeDetector(args.model_file, cfg, 0)
    else:
        if args.backend == 'TensorRT':
            model = TensorRTRecognizer(args.model_file, cfg, 0)
        else:
            model = ONNXRuntimeRecognizer(args.model_file, cfg, 0)

    # build the dataloader
    samples_per_gpu = 1
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=samples_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=False,
                                   shuffle=False)

    model = MMDataParallel(model, device_ids=[0])
    outputs = single_gpu_test(model, data_loader)

    rank, _ = get_dist_info()
    if rank == 0:
        kwargs = {}
        if args.eval:
            eval_kwargs = cfg.get('evaluation', {}).copy()
            # hard-code way to remove EvalHook args
            for key in [
                    'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
                    'rule'
            ]:
                eval_kwargs.pop(key, None)
            eval_kwargs.update(dict(metric=args.eval, **kwargs))
            print(dataset.evaluate(outputs, **eval_kwargs))
예제 #3
0
def pytorch2onnx(model: nn.Module,
                 model_type: str,
                 img_path: str,
                 verbose: bool = False,
                 show: bool = False,
                 opset_version: int = 11,
                 output_file: str = 'tmp.onnx',
                 verify: bool = False,
                 dynamic_export: bool = False,
                 device_id: int = 0):
    """Export PyTorch model to ONNX model and verify the outputs are same
    between PyTorch and ONNX.

    Args:
        model (nn.Module): PyTorch model we want to export.
        model_type (str): Model type, detection or recognition model.
        img_path (str): We need to use this input to execute the model.
        opset_version (int): The onnx op version. Default: 11.
        verbose (bool): Whether print the computation graph. Default: False.
        show (bool): Whether visialize final results. Default: False.
        output_file (string): The path to where we store the output ONNX model.
            Default: `tmp.onnx`.
        verify (bool): Whether compare the outputs between PyTorch and ONNX.
            Default: False.
        dynamic_export (bool): Whether apply dynamic export.
            Default: False.
        device_id (id): Device id to place model and data.
            Default: 0
    """
    device = torch.device(type='cuda', index=device_id)
    model.to(device).eval()
    _convert_batchnorm(model)

    # prepare inputs
    mm_inputs = _prepare_data(cfg=model.cfg, imgs=img_path)
    imgs = mm_inputs.pop('img')
    img_metas = mm_inputs.pop('img_metas')

    if isinstance(imgs, list):
        imgs = imgs[0]

    img_list = [img[None, :].to(device) for img in imgs]

    origin_forward = model.forward
    if (model_type == 'det'):
        model.forward = partial(model.simple_test,
                                img_metas=img_metas,
                                rescale=True)
    else:
        model.forward = partial(model.forward,
                                img_metas=img_metas,
                                return_loss=False,
                                rescale=True)

    # pytorch has some bug in pytorch1.3, we have to fix it
    # by replacing these existing op
    register_extra_symbolics(opset_version)
    dynamic_axes = None
    if dynamic_export and model_type == 'det':
        dynamic_axes = {
            'input': {
                0: 'batch',
                2: 'height',
                3: 'width'
            },
            'output': {
                0: 'batch',
                2: 'height',
                3: 'width'
            }
        }
    elif dynamic_export and model_type == 'recog':
        dynamic_axes = {
            'input': {
                0: 'batch',
                3: 'width'
            },
            'output': {
                0: 'batch',
                1: 'seq_len',
                2: 'num_classes'
            }
        }
    with torch.no_grad():
        torch.onnx.export(model, (img_list[0], ),
                          output_file,
                          input_names=['input'],
                          output_names=['output'],
                          export_params=True,
                          keep_initializers_as_inputs=False,
                          verbose=verbose,
                          opset_version=opset_version,
                          dynamic_axes=dynamic_axes)
    print(f'Successfully exported ONNX model: {output_file}')
    if verify:
        # check by onnx
        import onnx
        onnx_model = onnx.load(output_file)
        onnx.checker.check_model(onnx_model)

        scale_factor = (0.5, 0.5) if model_type == 'det' else (1, 0.5)
        if dynamic_export:
            # scale image for dynamic shape test
            img_list = [
                nn.functional.interpolate(_, scale_factor=scale_factor)
                for _ in img_list
            ]
            if model_type == 'det':
                img_metas[0][0]['scale_factor'] = img_metas[0][0][
                    'scale_factor'] * (scale_factor * 2)

        # check the numerical value
        # get pytorch output
        with torch.no_grad():
            model.forward = origin_forward
            pytorch_out = model.simple_test(img_list[0],
                                            img_metas[0],
                                            rescale=True)

        # get onnx output
        if model_type == 'det':
            onnx_model = ONNXRuntimeDetector(output_file, model.cfg, device_id)
        else:
            onnx_model = ONNXRuntimeRecognizer(output_file, model.cfg,
                                               device_id)
        onnx_out = onnx_model.simple_test(img_list[0],
                                          img_metas[0],
                                          rescale=True)

        # compare results
        same_diff = 'same'
        if model_type == 'recog':
            for onnx_result, pytorch_result in zip(onnx_out, pytorch_out):
                if onnx_result['text'] != pytorch_result[
                        'text'] or not np.allclose(
                            np.array(onnx_result['score']),
                            np.array(pytorch_result['score']),
                            rtol=1e-4,
                            atol=1e-4):
                    same_diff = 'different'
                    break
        else:
            for onnx_result, pytorch_result in zip(
                    onnx_out[0]['boundary_result'],
                    pytorch_out[0]['boundary_result']):
                if not np.allclose(np.array(onnx_result),
                                   np.array(pytorch_result),
                                   rtol=1e-4,
                                   atol=1e-4):
                    same_diff = 'different'
                    break
        print('The outputs are {} between PyTorch and ONNX'.format(same_diff))

        if show:
            onnx_img = onnx_model.show_result(img_path,
                                              onnx_out[0],
                                              out_file='onnx.jpg',
                                              show=False)
            pytorch_img = model.show_result(img_path,
                                            pytorch_out[0],
                                            out_file='pytorch.jpg',
                                            show=False)
            if onnx_img is None:
                onnx_img = cv2.imread(img_path)
            if pytorch_img is None:
                pytorch_img = cv2.imread(img_path)

            cv2.imshow('PyTorch', pytorch_img)
            cv2.imshow('ONNXRuntime', onnx_img)
            cv2.waitKey()
    return
예제 #4
0
def test_recognizer_wrapper():
    try:
        import onnxruntime as ort  # noqa: F401
        import tensorrt as trt
        from mmcv.tensorrt import (onnx2trt, save_trt_engine)
    except ImportError:
        pytest.skip('ONNXRuntime or TensorRT is not available.')

    cfg = dict(
        label_convertor=dict(
            type='CTCConvertor',
            dict_type='DICT36',
            with_unknown=False,
            lower=True),
        model=dict(
            type='CRNNNet',
            preprocessor=None,
            backbone=dict(
                type='VeryDeepVgg', leaky_relu=False, input_channels=1),
            encoder=None,
            decoder=dict(type='CRNNDecoder', in_channels=512, rnn_flag=True),
            loss=dict(type='CTCLoss'),
            label_convertor=dict(
                type='CTCConvertor',
                dict_type='DICT36',
                with_unknown=False,
                lower=True),
            pretrained=None),
        train_cfg=None,
        test_cfg=None)

    cfg = mmcv.Config(cfg)

    pytorch_model = build_detector(cfg.model, None, None)

    # prepare data
    inputs = torch.rand(1, 1, 32, 32)
    img_metas = [{
        'img_shape': [1, 1, 32, 32],
        'ori_shape': [1, 1, 32, 32],
        'pad_shape': [1, 1, 32, 32],
        'filename': None,
        'scale_factor': np.array([1, 1, 1, 1])
    }]

    pytorch_model.forward = partial(
        pytorch_model.forward,
        img_metas=img_metas,
        return_loss=False,
        rescale=True)
    with tempfile.TemporaryDirectory() as tmpdirname:
        onnx_path = f'{tmpdirname}/tmp.onnx'
        with torch.no_grad():
            torch.onnx.export(
                pytorch_model,
                inputs,
                onnx_path,
                input_names=['input'],
                output_names=['output'],
                export_params=True,
                keep_initializers_as_inputs=False,
                verbose=False,
                opset_version=11)

        # TensorRT part
        def get_GiB(x: int):
            """return x GiB."""
            return x * (1 << 30)

        trt_path = onnx_path.replace('.onnx', '.trt')
        min_shape = [1, 1, 32, 32]
        max_shape = [1, 1, 32, 32]
        # create trt engine and wrapper
        opt_shape_dict = {'input': [min_shape, min_shape, max_shape]}
        max_workspace_size = get_GiB(1)
        trt_engine = onnx2trt(
            onnx_path,
            opt_shape_dict,
            log_level=trt.Logger.ERROR,
            fp16_mode=False,
            max_workspace_size=max_workspace_size)
        save_trt_engine(trt_engine, trt_path)
        print(f'Successfully created TensorRT engine: {trt_path}')

        wrap_onnx = ONNXRuntimeRecognizer(onnx_path, cfg, 0)
        wrap_trt = TensorRTRecognizer(trt_path, cfg, 0)

    assert isinstance(wrap_onnx, ONNXRuntimeRecognizer)
    assert isinstance(wrap_trt, TensorRTRecognizer)

    with torch.no_grad():
        onnx_outputs = wrap_onnx.simple_test(inputs, img_metas, rescale=False)
        trt_outputs = wrap_onnx.simple_test(inputs, img_metas, rescale=False)

    assert isinstance(onnx_outputs[0], dict)
    assert isinstance(trt_outputs[0], dict)
    assert 'text' in onnx_outputs[0]
    assert 'text' in trt_outputs[0]
예제 #5
0
def onnx2tensorrt(onnx_file: str,
                  model_type: str,
                  trt_file: str,
                  config: dict,
                  input_config: dict,
                  fp16: bool = False,
                  verify: bool = False,
                  show: bool = False,
                  workspace_size: int = 1,
                  verbose: bool = False):
    import tensorrt as trt
    min_shape = input_config['min_shape']
    max_shape = input_config['max_shape']
    # create trt engine and wrapper
    opt_shape_dict = {'input': [min_shape, min_shape, max_shape]}
    max_workspace_size = get_GiB(workspace_size)
    trt_engine = onnx2trt(
        onnx_file,
        opt_shape_dict,
        log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
        fp16_mode=fp16,
        max_workspace_size=max_workspace_size)
    save_dir, _ = osp.split(trt_file)
    if save_dir:
        os.makedirs(save_dir, exist_ok=True)
    save_trt_engine(trt_engine, trt_file)
    print(f'Successfully created TensorRT engine: {trt_file}')

    if verify:
        mm_inputs = _prepare_input_img(input_config['input_path'],
                                       config.data.test.pipeline)

        imgs = mm_inputs.pop('img')
        img_metas = mm_inputs.pop('img_metas')

        if isinstance(imgs, list):
            imgs = imgs[0]

        img_list = [img[None, :] for img in imgs]
        # update img_meta
        img_list, img_metas = _update_input_img(img_list, img_metas)

        # Get results from ONNXRuntime
        if model_type == 'det':
            onnx_model = ONNXRuntimeDetector(onnx_file, config, 0)
        else:
            onnx_model = ONNXRuntimeRecognizer(onnx_file, config, 0)
        onnx_out = onnx_model.simple_test(img_list[0],
                                          img_metas[0],
                                          rescale=True)

        # Get results from TensorRT
        if model_type == 'det':
            trt_model = TensorRTDetector(trt_file, config, 0)
        else:
            trt_model = TensorRTRecognizer(trt_file, config, 0)
        img_list[0] = img_list[0].to(torch.device('cuda:0'))
        trt_out = trt_model.simple_test(img_list[0],
                                        img_metas[0],
                                        rescale=True)

        # compare results
        same_diff = 'same'
        if model_type == 'recog':
            for onnx_result, trt_result in zip(onnx_out, trt_out):
                if onnx_result['text'] != trt_result['text'] or \
                     not np.allclose(
                            np.array(onnx_result['score']),
                            np.array(trt_result['score']),
                            rtol=1e-4,
                            atol=1e-4):
                    same_diff = 'different'
                    break
        else:
            for onnx_result, trt_result in zip(onnx_out[0]['boundary_result'],
                                               trt_out[0]['boundary_result']):
                if not np.allclose(np.array(onnx_result),
                                   np.array(trt_result),
                                   rtol=1e-4,
                                   atol=1e-4):
                    same_diff = 'different'
                    break
        print('The outputs are {} between TensorRT and ONNX'.format(same_diff))

        if show:
            onnx_img = onnx_model.show_result(input_config['input_path'],
                                              onnx_out[0],
                                              out_file='onnx.jpg',
                                              show=False)
            trt_img = trt_model.show_result(input_config['input_path'],
                                            trt_out[0],
                                            out_file='tensorrt.jpg',
                                            show=False)
            if onnx_img is None:
                onnx_img = cv2.imread(input_config['input_path'])
            if trt_img is None:
                trt_img = cv2.imread(input_config['input_path'])

            cv2.imshow('TensorRT', trt_img)
            cv2.imshow('ONNXRuntime', onnx_img)
            cv2.waitKey()
    return