示例#1
0
def get_data(img, cfg, device):
    # import ipdb; ipdb.set_trace()
    test_pipeline = Compose([LoadImage()] + cfg.test_pipeline[1:])
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    return data
示例#2
0
    def backbone(self, images, **kwargs):
        r"""Returns list of backbone features and transformed images as well as meta info.
        """
        from mmdet.apis.inference import inference_detector, LoadImage
        from mmdet.datasets.pipelines import Compose
        from mmcv.parallel import collate, scatter
        model = self.module
        cfg = model.cfg
        device = next(model.parameters()).device
        test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
        test_pipeline = Compose(test_pipeline)
        results = []
        for img in images:
            data = dict(img=img)
            data = test_pipeline(data)
            data = scatter(collate([data], samples_per_gpu=1), [device])[0]
            img = data['img'][0]
            img_meta = data['img_meta'][0]
            data['img'] = img
            data['img_meta'] = img_meta
            data['feats'] = model.extract_feat(img)
            results.append(data)
            #print(img.shape, img_meta)

        #return model.backbone(images.tensors), images, original_image_sizes
        return results
示例#3
0
def inference_detector2(model, img_path):
    cfg = model.cfg
    device = next(model.parameters()).device  # model device

    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    data = dict(img=img_path)
    data = test_pipeline(data)

    data = collate([data], samples_per_gpu=1)

    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        # Use torchvision ops for CPU mode instead
        for m in model.modules():
            if isinstance(m, (RoIPool, RoIAlign)):
                if not m.aligned:
                    # aligned=False is not implemented on CPU
                    # set use_torchvision on-the-fly
                    m.use_torchvision = True
        warnings.warn('We set use_torchvision=True in CPU mode.')
        # just get the actual data from DataContainer
        data['img_metas'] = data['img_metas'][0].data
    imgs = data['img'][0]
    img_metas = data['img_metas'][0]
    return imgs, img_metas
def inference_detector(model, img, cfg, device):
    if isinstance(cfg, str):
        cfg = mmcv.Config.fromfile(cfg)

    device = torch.device(device)

    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)

    tensor = data['img'][0].unsqueeze(0).to(device)
    img_metas = data['img_metas']
    scale_factor = img_metas[0].data['scale_factor']
    scale_factor = torch.tensor(scale_factor,
                                dtype=torch.float32,
                                device=device)

    with torch.no_grad():
        result = model(tensor)
        result = list(result)
        result[1] = result[1] / scale_factor

    return result
示例#5
0
def get_fake_input(cfg, orig_img_shape=(128, 128, 3), device='cuda'):
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    data = dict(img=np.zeros(orig_img_shape, dtype=np.uint8))
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    return data
示例#6
0
def inference_detector(model, img):
    cfg = model.cfg
    device = next(model.parameters()).device
    test_pipeline = [LoadImage()] + cfg.test_pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
示例#7
0
    def __init__(self, path, cfg, device='cpu'):
        self.path = path
        self.video = cv2.VideoCapture(self.path)
        assert self.video.isOpened()
        self.video.set(cv2.CAP_PROP_BUFFERSIZE, 1)

        self.cfg = cfg
        self.device = device

        # build the data pipeline
        self.test_pipeline = [LoadImage()] + self.cfg.test.pipeline[1:]
        self.test_pipeline = Compose(self.test_pipeline)
  def preprocess(self, img):
    """Use mmdetection utilities to preprocess image.

    :param img: A numpy array for test image
    :return: Preprocessed image
    """
    test_pipeline = [LoadImage()] + self.cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)

    return data['img'][0]
示例#9
0
 def cpu_inference_detector(self, model, img):
     cfg = model.cfg
     # build the data pipeline
     test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
     test_pipeline = Compose(test_pipeline)
     # prepare data
     data = dict(img=img)
     data = test_pipeline(data)
     data = collate([data], samples_per_gpu=1)
     data['img_meta'] = data['img_meta'][0].data
     # forward the model
     with torch.no_grad():
         result = model(return_loss=False, rescale=True, **data)
     return result
示例#10
0
def fake_data(model, input=(3, 800, 1333)):
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    if isinstance(input, (list, tuple)):
        img = np.random.random(input[::-1])
    elif isinstance(input, str):
        img = input
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    return data
示例#11
0
def run_on_onnxruntime():
    config_file = '../configs/solov2/solov2_light_448_r34_fpn_8gpu_3x.py'
    onnx_file = 'weights/SOLOv2_light_R34.onnx'
    input_names = ['input']
    # output_names = ['C0', 'C1', 'C2', 'C3', 'C4']
    # output_names = ['cate_pred_0', 'cate_pred_1', 'cate_pred_2', 'cate_pred_3', 'cate_pred_4',
    #                 'kernel_pred_0', 'kernel_pred_1', 'kernel_pred_2', 'kernel_pred_3', 'kernel_pred_4',
    #                 'seg_pred']  # Origin
    output_names = ['cate_pred', 'kernel_pred', 'seg_pred']  # add permute & concate
    if isinstance(config_file, str):
        cfg = mmcv.Config.fromfile(config_file)
    elif not isinstance(config_file, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        'but got {}'.format(type(config_file)))

    # 1. Preprocess
    # input demo img size 427x640 --> resized 448x671 --> pad 448x672
    img = 'images/demo.jpg'
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)

    # 2. Run inference on onnxruntime
    print("Load onnx model from {}.".format(onnx_file))
    sess = rt.InferenceSession(onnx_file)
    tic = cv2.getTickCount()
    onnx_output = sess.run(output_names, {input_names[0]: data['img'][0].unsqueeze(0).cpu().numpy()})
    print('-----> onnxruntime inference time: {}ms'.format((cv2.getTickCount() - tic) * 1000/ cv2.getTickFrequency()))

    # 3. Get seg
    # 调用pytorch定义的获取分割图和matrix nms 以及后处理
    from mmdet.models.anchor_heads.solov2_head import SOLOv2Head
    solov2_head = SOLOv2Head(num_classes=81,
                             in_channels=256,
                             num_grids=[40, 36, 24, 16, 12],
                             strides=[8, 8, 16, 32, 32],
                             ins_out_channels = 128,
                             loss_ins=cfg.model.bbox_head.loss_ins,
                             loss_cate=cfg.model.bbox_head.loss_cate)
    cate_preds = [torch.from_numpy(x) for x in onnx_output[:1]]
    kernel_preds = [torch.from_numpy(x) for x in onnx_output[1:2]]
    seg_pred = torch.from_numpy(onnx_output[2])
    result = solov2_head.get_seg(cate_preds, kernel_preds, seg_pred, [data['img_meta'][0].data], cfg.test_cfg, rescale=True)
    show_result_ins(img, result, get_classes('coco'), score_thr=0.25, out_file="images/demo_out_onnxrt_solov2.jpg")
    print('Script done!')
    def __init__(self, image_paths, config, opt_shape_param):
        r"""
        datas used to calibrate int8 model
        feed to int8_calib_dataset
        Args:
            image_paths (list[str]): image paths to calib
            config (str|dict): config of mmdetection model
            opt_shape_param: same as mmdet2trt
        """
        if isinstance(config, str):
            config = mmcv.Config.fromfile(config)

        self.cfg = config
        self.image_paths = image_paths
        self.opt_shape = opt_shape_param[0][1]

        test_pipeline = [LoadImage()] + config.data.test.pipeline[1:]
        self.test_pipeline = Compose(test_pipeline)
示例#13
0
def run_on_tensorrt():
    from deploy import common

    config_file = '../configs/solov2/solov2_light_448_r34_fpn_8gpu_3x.py'
    onnx_file = 'weights/SOLOv2_light_R34.onnx'
    input_names = ['input']
    # output_names = ['C0', 'C1', 'C2', 'C3', 'C4']
    # output_names = ['cate_pred_0', 'cate_pred_1', 'cate_pred_2', 'cate_pred_3', 'cate_pred_4',
    #                 'kernel_pred_0', 'kernel_pred_1', 'kernel_pred_2', 'kernel_pred_3', 'kernel_pred_4',
    #                 'seg_pred']  # Origin
    output_names = ['cate_pred', 'kernel_pred', 'seg_pred']  # add permute & concate
    if isinstance(config_file, str):
        cfg = mmcv.Config.fromfile(config_file)
    elif not isinstance(config_file, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        'but got {}'.format(type(config_file)))

    # 1. Preprocess
    # input demo img size 427x640 --> resized 448x671 --> pad 448x672
    img = 'images/demo.jpg'
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)

    # 2. Run inference on trt
    print("Load onnx model from {}.".format(onnx_file))
    image_shape = data['img_meta'][0].data['pad_shape']
    input_shapes = ((1, image_shape[2], image_shape[0], image_shape[1]),)  # explict shape
    # input_shapes = ((1, 3, 448, 448), (1, 3, 608, 608), (1, 3, 768, 768))  # dynamic shape
    # shape_matrix = [
    # [1, 40, 40, 80],
    # [1, 36, 36, 80],
    # [1, 24, 24, 80],
    # [1, 16, 16, 80],
    # [1, 12, 12, 80],
    # [1, 128, 40, 40],
    # [1, 128, 36, 36],
    # [1, 128, 24, 24],
    # [1, 128, 16, 16],
    # [1, 128, 12, 12],
    # [1, 128, image_shape[0]//4, image_shape[1]//4]
    # ]
    shape_matrix = [
        [3872, 80],
        [3872, 128],
        [1, 128, image_shape[0] // 4, image_shape[1] // 4]
    ]
    with common.get_engine(onnx_file, onnx_file.replace(".onnx", ".engine"),
                           input_shapes=input_shapes, force_rebuild=False) \
            as engine, engine.create_execution_context() as context:
        # Notice: Here we only allocate device memory for speed up
        # DYNAMIC shape
        # context.active_optimization_profile = 0
        # [context.set_binding_shape(x, tuple(y)) for x, y in enumerate(shape_matrix)]
        # inputs, outputs, bindings, stream = common.allocate_buffersV2(engine, context)
        # EXPLICIT shape
        inputs, outputs, bindings, stream = common.allocate_buffers(engine)

        # Speed test: cpu(0.976s) vs gpu(0.719s)
        # ==> Set host input to the data.
        # The common.do_inference function will copy the input to the GPU before executing.
        inputs[0].host = data['img'][0].unsqueeze(0).cpu().numpy()  # for torch.Tensor
        # ==> Or set device input to the data.
        # in this mode, common.do_inference function should not copy inputs.host to inputs.device anymore.
        # c_type_pointer = ctypes.c_void_p(int(inputs[0].device))
        # x.cpu().numpy().copy_to_external(c_type_pointer)
        tic = cv2.getTickCount()
        trt_outputs = common.do_inferenceV2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream,
                                            batch_size=1, h_=image_shape[0], w_=image_shape[1])
        print('-----> tensorRT inference time: {}ms'.format((cv2.getTickCount() - tic) * 1000 / cv2.getTickFrequency()))

    # 3. Get seg
    # 调用pytorch定义的获取分割图和matrix nms 以及后处理
    from mmdet.models.anchor_heads.solov2_head import SOLOv2Head
    solov2_head = SOLOv2Head(num_classes=81,
                             in_channels=256,
                             num_grids=[40, 36, 24, 16, 12],
                             strides=[8, 8, 16, 32, 32],
                             ins_out_channels = 128,
                             loss_ins=cfg.model.bbox_head.loss_ins,
                             loss_cate=cfg.model.bbox_head.loss_cate)
    # TODO: tensorrt output order is different from pytorch?
    # Origin
    # ids = [8, 9, 7, 6, 5, 3, 4, 2, 1, 0, 10]
    # ids = [9, 8, 7, 5, 6, 4, 3, 2, 0, 1, 10]  # TODO: tensorrt output order is different from pytorch?
    # Add permute & concate
    ids = [1, 0, 2]

    cate_preds = [torch.from_numpy(trt_outputs[x]).reshape(y) for x, y in zip(ids[:1], shape_matrix[:1])]
    kernel_preds = [torch.from_numpy(trt_outputs[x]).reshape(y) for x, y in zip(ids[1:2], shape_matrix[1:2])]
    seg_pred = torch.from_numpy(trt_outputs[2]).reshape(shape_matrix[2])
    result = solov2_head.get_seg(cate_preds, kernel_preds, seg_pred, [data['img_meta'][0].data], cfg.test_cfg, rescale=True)
    show_result_ins(img, result, get_classes('coco'), score_thr=0.25, out_file="images/demo_out_trt_solov2.jpg")
    print('Script done!')
示例#14
0
nor = transforms.Normalize([123.675/255., 116.28/255., 103.53/255.],[58.395/255., 57.12/255., 57.375/255.])

model1 = Yolov4(yolov4conv137weight=None, n_classes=80, inference=True)
pretrained_dict = torch.load('checkpoints/yolov4.pth', map_location=torch.device('cuda'))
model1.load_state_dict(pretrained_dict)
model1.eval().cuda()

config = './mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
checkpoint = './checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
meta = [{'filename': '../images/6.png', 'ori_filename': '../images/6.png', 'ori_shape': (500, 500, 3), 'img_shape': (800, 800, 3), 'pad_shape': (800, 800, 3), 'scale_factor': np.array([1.6, 1.6, 1.6, 1.6], dtype=np.float32), 'flip': False, 'flip_direction': None, 'img_norm_cfg': {'mean': np.array([123.675, 116.28 , 103.53 ], dtype=np.float32), 'std': np.array([58.395, 57.12 , 57.375], dtype=np.float32), 'to_rgb': True}}]
model2 = init_detector(config, checkpoint, device='cuda:0')

cfg = model2.cfg
device = next(model2.parameters()).device  # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
#print(test_pipeline)

def get_mask(image, meta, pixels):
    mask = torch.zeros((1,3,500,500)).cuda()
    bbox, label = model2(return_loss=False, rescale=True, img=image, img_metas=meta)
    bbox = bbox[bbox[:,4]>0.3]
    num = bbox.shape[0]
    if num > 10: num = 10
    if num == 0: return mask.float().cuda()
    lp = int(pixels / (3*num))
    for i in range(num):
        xc = int((bbox[i,0]+bbox[i,2])/2)
        yc = int((bbox[i,1]+bbox[i,3])/2)
        w = int(bbox[i,2]-bbox[i,0])