Exemplo n.º 1
0
 def __init__(self,
              backbone,
              neck,
              bbox_head,
              train_cfg=None,
              test_cfg=None,
              pretrained=None,
              show_score=False):
     SingleStageTextDetector.__init__(self, backbone, neck, bbox_head,
                                      train_cfg, test_cfg, pretrained)
     TextDetectorMixin.__init__(self, show_score)
Exemplo n.º 2
0
    def __init__(self,
                 onnx_file: str,
                 cfg: Any,
                 device_id: int,
                 show_score: bool = False):
        SingleStageTextDetector.__init__(self, cfg.model.backbone,
                                         cfg.model.neck, cfg.model.bbox_head,
                                         cfg.model.train_cfg,
                                         cfg.model.test_cfg,
                                         cfg.model.pretrained)
        TextDetectorMixin.__init__(self, show_score)
        import onnxruntime as ort
        # get the custom op path
        ort_custom_op_path = ''
        try:
            from mmcv.ops import get_onnxruntime_op_path
            ort_custom_op_path = get_onnxruntime_op_path()
        except (ImportError, ModuleNotFoundError):
            warnings.warn('If input model has custom op from mmcv, \
                you may have to build mmcv with ONNXRuntime from source.')
        session_options = ort.SessionOptions()
        # register custom op for onnxruntime
        if osp.exists(ort_custom_op_path):
            session_options.register_custom_ops_library(ort_custom_op_path)
        sess = ort.InferenceSession(onnx_file, session_options)
        providers = ['CPUExecutionProvider']
        options = [{}]
        is_cuda_available = ort.get_device() == 'GPU'
        if is_cuda_available:
            providers.insert(0, 'CUDAExecutionProvider')
            options.insert(0, {'device_id': device_id})

        sess.set_providers(providers, options)

        self.sess = sess
        self.device_id = device_id
        self.io_binding = sess.io_binding()
        self.output_names = [_.name for _ in sess.get_outputs()]
        for name in self.output_names:
            self.io_binding.bind_output(name)
        self.cfg = cfg
Exemplo n.º 3
0
    def __init__(self,
                 trt_file: str,
                 cfg: Any,
                 device_id: int,
                 show_score: bool = False):
        if 'type' in cfg.model:
            cfg.model.pop('type')
        SingleStageTextDetector.__init__(self, **(cfg.model))
        TextDetectorMixin.__init__(self, show_score)
        from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin
        try:
            load_tensorrt_plugin()
        except (ImportError, ModuleNotFoundError):
            warnings.warn('If input model has custom op from mmcv, \
                you may have to build mmcv with TensorRT from source.')
        model = TRTWrapper(trt_file,
                           input_names=['input'],
                           output_names=['output'])

        self.model = model
        self.device_id = device_id
        self.cfg = cfg
Exemplo n.º 4
0
 def __init__(self,
              backbone,
              rpn_head,
              roi_head,
              train_cfg,
              test_cfg,
              neck=None,
              pretrained=None,
              text_repr_type='quad',
              show_score=False,
              init_cfg=None):
     TextDetectorMixin.__init__(self, show_score)
     MaskRCNN.__init__(self,
                       backbone=backbone,
                       neck=neck,
                       rpn_head=rpn_head,
                       roi_head=roi_head,
                       train_cfg=train_cfg,
                       test_cfg=test_cfg,
                       pretrained=pretrained,
                       init_cfg=init_cfg)
     assert text_repr_type in ['quad', 'poly']
     self.text_repr_type = text_repr_type