Example #1
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    inference_cfg = cfg['inference']
    common_cfg = cfg.get('common')

    runner = InferenceRunner(inference_cfg, common_cfg)
    assert runner.use_gpu, 'Please use valid gpu to export model.'
    runner.load_checkpoint(args.checkpoint)
    model = runner.model

    shape = map(int, args.dummy_input_shape.split(','))
    dummy_input = torch.randn(1, *shape)

    if args.dynamic_shape:
        print(f'Convert to Onnx with dynamic input shape and '
              f'opset version {args.opset_version}')
    else:
        print(f'Convert to Onnx with constant input shape '
              f'{args.dummy_input_shape} and '
              f'opset version {args.opset_version}')
    torch2onnx(model,
               dummy_input,
               args.out,
               dynamic_shape=args.dynamic_shape,
               opset_version=args.opset_version,
               do_constant_folding=args.do_constant_folding,
               verbose=args.verbose)
    print(
        f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')
Example #2
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    if torch.cuda.is_available():
        device = torch.cuda.current_device()
    else:
        device = 'cpu'

    model = build_detector(cfg.model)
    load_weights(model, args.checkpoint)
    model.to(device)
    model.forward = model.forward_impl

    shape = map(int, args.dummy_input_shape.split(','))
    dummy_input = torch.randn(1, *shape)

    if args.dynamic_shape:
        print(f'Convert to Onnx with dynamic input shape and '
              f'opset version {args.opset_version}')
    else:
        print(f'Convert to Onnx with constant input shape '
              f'{args.dummy_input_shape} and '
              f'opset version {args.opset_version}')
    torch2onnx(model,
               dummy_input,
               args.out,
               dynamic_shape=args.dynamic_shape,
               opset_version=args.opset_version,
               do_constant_folding=args.do_constant_folding,
               verbose=args.verbose)
    print(
        f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')
Example #3
0
def main():
    args = parse_args()
    out_name = args.out

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    inference_cfg = cfg['inference']
    common_cfg = cfg.get('common')

    runner = InferenceRunner(inference_cfg, common_cfg)
    assert runner.use_gpu, 'Please use valid gpu to export model.'
    runner.load_checkpoint(args.checkpoint)

    image = cv2.imread(args.image)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    h, w, c = image.shape
    dummy_mask = np.zeros((h, w))
    image = runner.transform(image=image, masks=[dummy_mask])['image']

    dummy_input = image.unsqueeze(0).cuda()
    model = runner.model.cuda().eval()

    if args.onnx:
        runner.logger.info('Convert to onnx model')
        torch2onnx(model, dummy_input, out_name)
    else:
        max_batch_size = args.max_batch_size
        max_workspace_size = args.max_workspace_size
        fp16_mode = args.fp16
        int8_mode = args.int8
        int8_calibrator = None
        if int8_mode:
            runner.logger.info('Convert to trt engine with int8')
            if args.calibration_images:
                runner.logger.info(
                    'Use calibration with mode {} and data {}'.format(
                        args.calibration_mode, args.calibration_images))
                dataset = CalibDataset(args.calibration_images,
                                       runner.transform)
                int8_calibrator = CALIBRATORS[args.calibration_mode](
                    dataset=dataset)
            else:
                runner.logger.info('Use default calibration mode and data')
        elif fp16_mode:
            runner.logger.info('Convert to trt engine with fp16')
        else:
            runner.logger.info('Convert to trt engine with fp32')
        trt_model = torch2trt(model,
                              dummy_input,
                              max_batch_size=max_batch_size,
                              max_workspace_size=max_workspace_size,
                              fp16_mode=fp16_mode,
                              int8_mode=int8_mode,
                              int8_calibrator=int8_calibrator)
        save(trt_model, out_name)
    runner.logger.info(
        'Convert successfully, save model to {}'.format(out_name))
Example #4
0
def main():
    args = parse_args()
    out_name = args.out

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')

    runner = InferenceRunner(deploy_cfg, common_cfg)
    assert runner.use_gpu, 'Please use valid gpu to export model.'
    runner.load_checkpoint(args.checkpoint)

    image = cv2.imread(args.image)

    aug = runner.transform(image=image, label='')
    image, label = aug['image'], aug['label']
    image = image.unsqueeze(0).cuda()
    dummy_input = (image, runner.converter.test_encode(['']))
    model = runner.model.cuda().eval()
    need_text = runner.need_text
    if not need_text:
        dummy_input = dummy_input[0]

    if args.onnx:
        runner.logger.info('Convert to onnx model')
        torch2onnx(model, dummy_input, out_name)
    else:
        max_batch_size = args.max_batch_size
        max_workspace_size = args.max_workspace_size
        fp16_mode = args.fp16
        int8_mode = args.int8
        int8_calibrator = None
        if int8_mode:
            runner.logger.info('Convert to trt engine with int8')
            if args.calibration_images:
                runner.logger.info('Use calibration with mode {} and data {}'
                                   .format(args.calibration_mode,
                                           args.calibration_images))
                dataset = CalibDataset(args.calibration_images, runner.converter,
                                       runner.transform, need_text)
                int8_calibrator = CALIBRATORS[args.calibration_mode](
                    dataset=dataset)
            else:
                runner.logger.info('Use default calibration mode and data')
        elif fp16_mode:
            runner.logger.info('Convert to trt engine with fp16')
        else:
            runner.logger.info('Convert to trt engine with fp32')
        trt_model = torch2trt(
            model, dummy_input, max_batch_size=max_batch_size,
            max_workspace_size=max_workspace_size, fp16_mode=fp16_mode,
            int8_mode=int8_mode, int8_calibrator=int8_calibrator)
        save(trt_model, out_name)
    runner.logger.info(
        'Convert successfully, save model to {}'.format(out_name))
Example #5
0
def main():
    args = parse_args()

    cfg_path = args.config
    cfg = Config.fromfile(cfg_path)

    deploy_cfg = cfg['deploy']
    common_cfg = cfg.get('common')
    if torch.cuda.is_available():
        device = torch.cuda.current_device()
        deploy_cfg['gpu_id'] = str(device)
    else:
        raise AssertionError('Please use gpu for benchmark.')

    runner = InferenceRunner(deploy_cfg, common_cfg)
    runner.load_checkpoint(args.checkpoint)

    C, H, W = [int(_.strip()) for _ in args.dummy_input_shape.split(',')]
    dummy_image = np.random.random_integers(0, 255, (H, W, C)).astype(np.uint8)

    aug = runner.transform(image=dummy_image, label='')
    image, label = aug['image'], aug['label']
    image = image.unsqueeze(0).cuda()
    dummy_input = (image, runner.converter.test_encode(['']))
    model = runner.model.cuda().eval()
    need_text = runner.need_text
    if not need_text:
        dummy_input = dummy_input[0]

    if args.dynamic_shape:
        print(f'Convert to Onnx with dynamic input shape and opset version'
              f'{args.opset_version}')
    else:
        print(f'Convert to Onnx with constant input shape'
              f' {args.dummy_input_shape} and opset version '
              f'{args.opset_version}')

    torch2onnx(
        model,
        dummy_input,
        args.out,
        verbose=args.verbose,
        dynamic_shape=args.dynamic_shape,
        opset_version=args.opset_version,
        do_constant_folding=args.do_constant_folding,
    )

    runner.logger.info(
        f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')