Esempio n. 1
0
 def __init__(self,
              ann_file,
              pipeline,
              pre_pipeline,
              dicm2png_cfg,
              data_root=None,
              image_path='/cluster/home/it_stu167/wwj/adrenal/x/',
              label_path='/cluster/home/it_stu167/wwj/adrenal/y/',
              seg_prefix=None,
              proposal_file=None,
              test_mode=False,
              ratio=0.5):
     self.data_path = data_root
     self.classes = ['__background__', 'anomoly', 'adrenal']
     self.num_classes = len(self.classes)
     self.load_annotations(ann_file)
     self.img_ids = [a['filename'] for a in self.ann]
     self.cat_ids = self.classes
     self.cfg = Config(dicm2png_cfg)
     self.pipeline = Compose(pipeline)
     self.pre_pipeline = Compose(pre_pipeline)
     self.img_path = image_path
     self.seg_prefix = seg_prefix
     self.ratio = ratio
     self.label_path = label_path
     self.proposals = None
     if proposal_file is not None:
         self.proposals = None
     self.slice_num = 3
     # self.slice_num = self.cfg.NUM_SLICES # 3
     self.is_train = not test_mode
Esempio n. 2
0
    def __init__(self,
                 ann_file,
                 pipeline,
                 pre_pipeline,
                 dicm2png_cfg,
                 data_root=None,
                 image_path='',
                 seg_prefix=None,
                 proposal_file=None,
                 test_mode=False):
        self.data_path = data_root
        self.classes = ['__background__', 'lesion']
        self.num_classes = len(self.classes)
        self.load_annotations(ann_file)
        self.img_ids = [a['filename'] for a in self.ann]
        self.cat_ids = self.classes
        # self.image_fn_list, self.lesion_idx_grouped = self.load_split_index()
        # self.num_images = len(self.image_fn_list)
        self.cfg = Config(dicm2png_cfg)
        self.pipeline = Compose(pipeline)
        self.pre_pipeline = Compose(pre_pipeline)
        self.img_path = image_path
        self.seg_prefix = seg_prefix
        self.proposals = None
        if proposal_file is not None:
            self.proposals = None
        self.slice_num = self.cfg.NUM_SLICES
        self.is_train = not test_mode

        if self.is_train:
            self._set_group_flag()
Esempio n. 3
0
def inference_detector(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    if isinstance(img, list):
        batch = []
        batch_meta = []
        for img_ in img:
            test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
            test_pipeline = Compose(test_pipeline)
            # prepare data
            data = dict(img=img_)
            data = test_pipeline(data)
            data = scatter(collate([data], samples_per_gpu=1), [device])[0]
            batch.append(data['img'][0])
            batch_meta.append(data['img_meta'][0][0])

        batch = torch.cat(batch)
        with torch.no_grad():
            result = model(return_loss=False,
                           rescale=True,
                           img=[batch],
                           img_meta=[batch_meta])
        return result

    else:
        test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
        test_pipeline = Compose(test_pipeline)
        # prepare data
        data = dict(img=img)
        data = test_pipeline(data)
        data = scatter(collate([data], samples_per_gpu=1), [device])[0]
        # forward the model
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

        return result
Esempio n. 4
0
def inference_detector(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = collate([data], samples_per_gpu=1)
    if mmdet.CPU_ONLY:
        # just get the actual data from DataContainer
        data['img_meta'] = data['img_meta'][0].data
    else:
        # scatter to multiple GPUs
        data = scatter(data, [device])[0]

    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
Esempio n. 5
0
def inference_detector(cfg, model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    # build the data pipeline
    # We don't want to crop bottom
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[2:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = collate([data], samples_per_gpu=1)
    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)

    return result
Esempio n. 6
0
    def __init__(self,
                 ann_file,
                 pipeline,
                 load_and_dump_config_name: str = 'load_and_dump_config',
                 composer_config_name: str = 'composer_config',
                 generated_objects_fields: Tuple[str,
                                                 str] = ('bboxes', 'labels'),
                 test_mode=False):
        self._load_config_filename = ann_file
        self._test_mode = test_mode
        self._load_and_dump_config_name = load_and_dump_config_name
        self._composer_config_name = composer_config_name
        self._pipeline = Compose(pipeline)
        self._categories_dict = {}
        self._generated_objects_fields = generated_objects_fields
        self._generated_objects_default_field = self._generated_objects_fields == (
            'bboxes', 'labels')
        self._trassir_composer: TrassirComposer = self.load_trassir_composer(
            self._load_config_filename)

        if not self._test_mode:
            self._set_group_flag()

        # Need for coco wrapper (CocoMapEval)
        self._coco = None
        self._img_ids = None
        self._cat_ids = None
Esempio n. 7
0
def inference_detector(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)

    # modified device type to prevent Error
    device = int(str(device).split(":")[-1])
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
Esempio n. 8
0
    def backbone(self, images, **kwargs):
        r"""Returns list of backbone features and transformed images as well as meta info.
        """
        from mmdet.apis.inference import inference_detector, LoadImage
        from mmdet.datasets.pipelines import Compose
        from mmcv.parallel import collate, scatter
        model = self.module
        cfg = model.cfg
        device = next(model.parameters()).device
        test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
        test_pipeline = Compose(test_pipeline)
        results = []
        for img in images:
            data = dict(img=img)
            data = test_pipeline(data)
            data = scatter(collate([data], samples_per_gpu=1), [device])[0]
            img = data['img'][0]
            img_meta = data['img_meta'][0]
            data['img'] = img
            data['img_meta'] = img_meta
            data['feats'] = model.extract_feat(img)
            results.append(data)
            #print(img.shape, img_meta)

        #return model.backbone(images.tensors), images, original_image_sizes
        return results
Esempio n. 9
0
async def async_inference_detector(model, img):
    """Async inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        img (str | ndarray): Either image files or loaded images.

    Returns:
        Awaitable detection results.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # prepare data
    if isinstance(img, np.ndarray):
        # directly add img
        data = dict(img=img)
        cfg = cfg.copy()
        # set loading pipeline type
        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
    else:
        # add information into dict
        data = dict(img_info=dict(filename=img), img_prefix=None)
    # build the data pipeline
    test_pipeline = Compose(cfg.data.test.pipeline)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]

    # We don't restore `torch.is_grad_enabled()` value during concurrent
    # inference since execution can overlap
    torch.set_grad_enabled(False)
    result = await model.aforward_test(rescale=True, **data)
    return result
class CutConfig(object):
    # process module
    train_pipeline = [
        dict(type='LoadImageFromFile'),
        dict(type='CutROI', training=False),
        dict(type='CutImage', training=False, window=(1000, 1000), step=(500, 500), order_index=False,
             is_keep_none=True)
    ]
    compose = Compose(train_pipeline)

    # data module
    img_dir = "/home/lifeng/undone-work/DefectNet/tools/data/tile/raw/tile_round1_testA_20201231/testA_imgs"
    test_file = "/home/lifeng/undone-work/dataset/detection/tile/annotations/instance_testA.json"
    save_file = "/home/lifeng/undone-work/DetCompetition/mmdet-v2/work_dirs/tile/baseline_cut_1000x1000/do_submit_testA.json"
    original_coco = COCO(test_file)
    label2name = {x['id']: x['name'] for x in original_coco.dataset['categories']}
    main_thread_lock = threading.Lock()
    save_results = []
    num_workers = 7
    process_cnt = 0

    # inference module
    device = 'cuda:0'
    config_file = '/home/lifeng/undone-work/DefectNet/configs/tile/baseline_model_2000x2000.py'
    checkpoint_file = '/data/liphone/detcomp/mmdet-v2/tile/baseline_cut_1000x1000/epoch_12.pth'
    model = init_detector(config_file, checkpoint_file, device=device)
Esempio n. 11
0
 def __init__(self, datadir, pipeline, stage='train', repeat=1):
     self.images = get_spec_files(datadir, ext=IMG_EXT, oswalk=True)
     if stage == 'train':
         self.images = self.images * repeat
     self.pipeline = Compose(pipeline)
     self.flag = np.zeros(len(self), 'u1')
     self.stage = stage
Esempio n. 12
0
def inference_recognizer(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    mask_h, mask_w = cfg.data.test.pipeline[1]['img_scale']
    text_max_len = cfg.text_max_len + 2
    data = dict(img=img,
                target_variable=torch.zeros(text_max_len, 1, dtype=torch.long),
                mask=np.zeros((1, mask_h, mask_w)))
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device.index])[0]
    # forward the model
    with torch.no_grad():
        pre, score = model(return_loss=False, rescale=True, **data)
    return pre, score
Esempio n. 13
0
def inference_detector2(model, img_path):
    cfg = model.cfg
    device = next(model.parameters()).device  # model device

    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    data = dict(img=img_path)
    data = test_pipeline(data)

    data = collate([data], samples_per_gpu=1)

    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        # Use torchvision ops for CPU mode instead
        for m in model.modules():
            if isinstance(m, (RoIPool, RoIAlign)):
                if not m.aligned:
                    # aligned=False is not implemented on CPU
                    # set use_torchvision on-the-fly
                    m.use_torchvision = True
        warnings.warn('We set use_torchvision=True in CPU mode.')
        # just get the actual data from DataContainer
        data['img_metas'] = data['img_metas'][0].data
    imgs = data['img'][0]
    img_metas = data['img_metas'][0]
    return imgs, img_metas
def inference_detector(model, img, cfg, device):
    if isinstance(cfg, str):
        cfg = mmcv.Config.fromfile(cfg)

    device = torch.device(device)

    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)

    tensor = data['img'][0].unsqueeze(0).to(device)
    img_metas = data['img_metas']
    scale_factor = img_metas[0].data['scale_factor']
    scale_factor = torch.tensor(scale_factor,
                                dtype=torch.float32,
                                device=device)

    with torch.no_grad():
        result = model(tensor)
        result = list(result)
        result[1] = result[1] / scale_factor

    return result
Esempio n. 15
0
def get_fake_input(cfg, orig_img_shape=(128, 128, 3), device='cuda'):
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    data = dict(img=np.zeros(orig_img_shape, dtype=np.uint8))
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    return data
Esempio n. 16
0
def export_onnx(model, img, onnx_save_path, verbose=False, output_names=None):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]

    if hasattr(model, 'forward_export'):
        model.forward = model.forward_export
    else:
        raise NotImplementedError(
            'Export onnx is currently not currently supported with {}'.format(
                model.__class__.__name__))

    torch.onnx.export(
        model,  # model being run
        data['img'][0],  # model input (or a tuple for multiple inputs)
        onnx_save_path,  # where to save the model (can be a file or file-like object)
        verbose=verbose,
        output_names=output_names)
Esempio n. 17
0
    def infer(self,img):
        '''
        Args:
            img: image path or a image array

        Returns:

        '''

        test_pipeline = [LoadImage()] + self.config.test_pipeline
        test_pipeline = Compose(test_pipeline)
        # prepare data
        data = dict(img=img)
        data = test_pipeline(data)
        data = scatter(collate([data], samples_per_gpu=1), [torch.device(self.device)])[0]
        # forward the model
        with torch.no_grad():
            result = self.deviceLocator(return_loss=False, rescale=True, **data)


        # filter bb according to threshold
        filterResult = []
        for cls , bbs in enumerate(result[0]):  # result:tuple (bb,segm)
            thr = self.clsThr[cls]
            filterResult.append(bbs[np.nonzero(bbs[:, -1] >= thr)])

        return filterResult
Esempio n. 18
0
def inference_detector_feats(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    img_metas = [i.data for i in data['img_metas'][0]]
    imgs = data['img']
    # forward the model
    with torch.no_grad():
        # return  model.aug_test_featmap( imgs, img_metas,rescale=True)
        return model.simple_test(imgs, img_metas, rescale=True)
Esempio n. 19
0
def text_model_inference(model, input_sentence):
    """Inference text(s) with the entity recognizer.

    Args:
        model (nn.Module): The loaded recognizer.
        input_sentence (str): A text entered by the user.

    Returns:
        result (dict): Predicted results.
    """

    assert isinstance(input_sentence, str)

    cfg = model.cfg
    test_pipeline = Compose(cfg.data.test.pipeline)
    data = {'text': input_sentence, 'label': {}}

    # build the data pipeline
    data = test_pipeline(data)
    if isinstance(data['img_metas'], dict):
        img_metas = data['img_metas']
    else:
        img_metas = data['img_metas'].data

    assert isinstance(img_metas, dict)
    img_metas = {
        'input_ids': img_metas['input_ids'].unsqueeze(0),
        'attention_masks': img_metas['attention_masks'].unsqueeze(0),
        'token_type_ids': img_metas['token_type_ids'].unsqueeze(0),
        'labels': img_metas['labels'].unsqueeze(0)
    }
    # forward the model
    with torch.no_grad():
        result = model(None, img_metas, return_loss=False)
    return result
Esempio n. 20
0
    def __init__(self,
                 transforms,
                 img_scale,
                 pts_scale_ratio,
                 flip=False,
                 flip_direction='horizontal',
                 pcd_horizontal_flip=False,
                 pcd_vertical_flip=False):
        self.transforms = Compose(transforms)
        self.img_scale = img_scale if isinstance(img_scale,
                                                 list) else [img_scale]
        self.pts_scale_ratio = pts_scale_ratio \
            if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)]

        assert mmcv.is_list_of(self.img_scale, tuple)
        assert mmcv.is_list_of(self.pts_scale_ratio, float)

        self.flip = flip
        self.pcd_horizontal_flip = pcd_horizontal_flip
        self.pcd_vertical_flip = pcd_vertical_flip

        self.flip_direction = flip_direction if isinstance(
            flip_direction, list) else [flip_direction]
        assert mmcv.is_list_of(self.flip_direction, str)
        if not self.flip and self.flip_direction != ['horizontal']:
            warnings.warn(
                'flip_direction has no effect when flip is set to False')
        if (self.flip and not any([(t['type'] == 'RandomFlip3D'
                                    or t['type'] == 'RandomFlip')
                                   for t in transforms])):
            warnings.warn(
                'flip has no effect when RandomFlip is not in transforms')
Esempio n. 21
0
def get_data(img, cfg, device):
    # import ipdb; ipdb.set_trace()
    test_pipeline = Compose([LoadImage()] + cfg.test_pipeline[1:])
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    return data
Esempio n. 22
0
def get_img_meta(model, idx, dataset_train):
    cfg = model.cfg

    # get gt_ann
    #dataset_train = build_dataset(cfg.data.train)
    ann_info = dataset_train.get_ann_info(idx)
    img_path = '{}{}'.format(cfg.data.train.img_prefix,
                             ann_info['seg_map'].replace('png', 'jpg'))
    gt_bboxes = ann_info['bboxes']
    gt_labels = ann_info['labels']
    gt_segms = ann_info['masks']

    # get img_meta
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    data = dict(img=img_path)
    data = test_pipeline(data)
    # dict_keys(['img_meta', 'img'])
    img_metas = data['img_meta'][0].data
    # dict_keys(['filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg'])
    img_tensor = data['img'][0].data

    # get FPN feat_size
    c, h, w = img_tensor.shape
    input_img_feat = img_tensor.view(1, c, h, w)
    device = input_img_feat.device
    fpn_feats = model.extract_feat(input_img_feat)

    return img_path, fpn_feats, gt_bboxes, gt_labels, img_metas, gt_segms
async def async_inference_detector(model, img):
    """Async inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        Awaitable detection results.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]

    # We don't restore `torch.is_grad_enabled()` value during concurrent
    # inference since execution can overlap
    torch.set_grad_enabled(False)
    result = await model.aforward_test(rescale=True, **data)
    return result
Esempio n. 24
0
def inference_detector(model, img, cfg, device):
    if isinstance(cfg, str):
        cfg = mmcv.Config.fromfile(cfg)

    device = torch.device(device)

    if isinstance(img, np.ndarray):
        # directly add img
        data = dict(img=img)
        cfg = cfg.copy()
        # set loading pipeline type
        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
    else:
        # add information into dict
        data = dict(img_info=dict(filename=img), img_prefix=None)

    test_pipeline = cfg.data.test.pipeline
    test_pipeline = Compose(test_pipeline)

    # prepare data
    data = test_pipeline(data)

    tensor = data['img'][0].unsqueeze(0).to(device)
    img_metas = data['img_metas']
    scale_factor = img_metas[0].data['scale_factor']
    scale_factor = torch.tensor(scale_factor,
                                dtype=torch.float32,
                                device=device)

    with torch.no_grad():
        result = model(tensor)
        result = list(result)
        result[1] = result[1] / scale_factor

    return result
Esempio n. 25
0
    def load_transformed_gt_info(self):
        CLASSES = self.CLASSES
        img_infos = self.img_infos
        transfroms = Compose([
            dict(type='LoadImageFromFile'),
            dict(type='LoadAnnotations', with_bbox=True),
            dict(type='Resize', img_scale=[self._resize], keep_ratio=False)
        ])
        gt_boxes_all = dict()
        for idx in range(len(img_infos)):
            img_info = img_infos[idx]
            ann_info = self.get_ann_info(idx)
            results = dict(img_info=img_info, ann_info=ann_info)
            if self.proposals is not None:
                results['proposals'] = self.proposals[idx]
            self.pre_pipeline(results)
            results = transfroms(results)
            gt_boxes = results['gt_bboxes']
            labels = ann_info['labels']
            assert len(gt_boxes) == len(labels)
            for i, label in enumerate(labels):
                w = gt_boxes[i][2] - gt_boxes[i][0]
                h = gt_boxes[i][3] - gt_boxes[i][1]
                classname = CLASSES[label - 1]
                if classname not in gt_boxes_all:
                    gt_boxes_all[classname] = []
                gt_boxes_all[classname].append([w, h])

        return gt_boxes_all
Esempio n. 26
0
def get_Image_ready(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        imgs
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = collate([data], samples_per_gpu=1)
    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        # Use torchvision ops for CPU mode instead
        for m in model.modules():
            if isinstance(m, (RoIPool, RoIAlign)):
                if not m.aligned:
                    # aligned=False is not implemented on CPU
                    # set use_torchvision on-the-fly
                    m.use_torchvision = True
        warnings.warn('We set use_torchvision=True in CPU mode.')
        # just get the actual data from DataContainer
        data['img_metas'] = data['img_metas'][0].data
    return data
def inference_detector(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    # print(data['img'].size())
    # x=data
    # for index,layer in model._modules.items():
    #     x=layer(x)
    #     if (index == 5):
    #         print(x)
    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
Esempio n. 28
0
def inference_detector(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    data = dict(img_info=dict(filename=img), img_prefix=None)
    # build the data pipeline
    test_pipeline = Compose(cfg.data.test.pipeline)
    data = test_pipeline(data)
    data = collate([data], samples_per_gpu=1)
    if next(model.parameters()).is_cuda:
        data['img'][0] = data['img'][0].cuda()
        data['img_metas'] = data['img_metas'][0].data
    else:
        # just get the actual data from DataContainer
        data['img_metas'] = data['img_metas'][0].data

    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)[0]
    return result
Esempio n. 29
0
def prefetch_img_metas(cfg, ori_wh):
    w, h = ori_wh
    cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
    test_pipeline = Compose(cfg.data.test.pipeline)
    data = {'img': np.zeros((h, w, 3), dtype=np.uint8)}
    data = test_pipeline(data)
    img_metas = data['img_metas'][0].data
    return img_metas
Esempio n. 30
0
def ImagePreprocess(image, cfg):
    data = dict(img=image)
    cfg = cfg.copy()
    # set loading pipeline type
    cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
    test_pipeline = Compose(cfg.data.test.pipeline)
    data = test_pipeline(data)
    data['img'] = torch.stack(data['img'], dim=0)
    return data