コード例 #1
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--img', help='Image file', default=img_path)
    parser.add_argument('--config', help='Config file', default=config)
    parser.add_argument('--checkpoint', help='Checkpoint file', default=ckpt)
    parser.add_argument(
        '--device', default='cuda:1', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default=None,
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)

    if args.img=='':
        list_img=get_list_file_in_folder(img_dir)
        list_img=sorted(list_img)
        for img_ in list_img:
            img=os.path.join(img_dir,img_)
            print(img)
            result = inference_segmentor(model, img)
            show_result_pyplot(model, img, result, get_palette(args.palette))
    else:
        result = inference_segmentor(model, args.img)
        show_result_pyplot(model, args.img, result, get_palette(args.palette))
コード例 #2
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='ade',
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    demo_im_names = os.listdir(args.img)
    random.shuffle(demo_im_names)
    for im_name in demo_im_names:
        if 'png' in im_name or 'jpg' in im_name:
            full_name = os.path.join(args.img, im_name)
            result = inference_segmentor(model, full_name)
            # show the results
            pl=[[220, 220, 220],[17, 142, 35], [152, 251, 152], [0, 60, 100], [70, 130, 180], [220, 20, 20]]
            show_result_pyplot(model, full_name, result,pl)
コード例 #3
0
    def get_score(self, idx, left):
        ''' idx : index string
            left : string indicates left/right camera 
        return:
            a tensor H  * W * 4(deeplab)/5(deeplabv3plus), for each pixel we have 4/5 scorer that sums to 1
        '''
        output_reassign_softmax = None
        if self.seg_net_index == 0:
            filename = self.root_split_path + left + ('%s.png' % idx)
            input_image = Image.open(filename)
            preprocess = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ])

            input_tensor = preprocess(input_image)
            input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model

            # move the input and model to GPU for speed if available
            if torch.cuda.is_available():
                input_batch = input_batch.to('cuda')

            with torch.no_grad():
                output = self.model(input_batch)['out'][0]

            output_permute = output.permute(1,2,0)
            output_probability,output_predictions =  output_permute.max(2)

            other_object_mask = ~((output_predictions == 0) | (output_predictions == 2) | (output_predictions == 7) | (output_predictions == 15))
            detect_object_mask = ~other_object_mask
            sf = torch.nn.Softmax(dim=2)

            # bicycle = 2 car = 7 person = 15 background = 0
            output_reassign = torch.zeros(output_permute.size(0),output_permute.size(1),4)
            output_reassign[:,:,0] = detect_object_mask * output_permute[:,:,0] + other_object_mask * output_probability # background
            output_reassign[:,:,1] = output_permute[:,:,2] # bicycle
            output_reassign[:,:,2] = output_permute[:,:,7] # car
            output_reassign[:,:,3] = output_permute[:,:,15] #person
            output_reassign_softmax = sf(output_reassign).cpu().numpy()

        elif self.seg_net_index == 1:
            filename = self.root_split_path + left + ('%s.png' % idx)
            result = inference_segmentor(self.model, filename)
            # person 11, rider 12, vehicle 13/14/15/16, bike 17/18
            output_permute = torch.tensor(result[0]).permute(1,2,0) # H, W, 19
            sf = torch.nn.Softmax(dim=2)

            output_reassign = torch.zeros(output_permute.size(0),output_permute.size(1), 5)
            output_reassign[:,:,0], _ = torch.max(output_permute[:,:,:11], dim=2) # background
            output_reassign[:,:,1], _ = torch.max(output_permute[:,:,[17, 18]], dim=2) # bicycle
            output_reassign[:,:,2], _ = torch.max(output_permute[:,:,[13, 14, 15, 16]], dim=2) # car
            output_reassign[:,:,3] = output_permute[:,:,11] #person
            output_reassign[:,:,4] = output_permute[:,:,12] #rider
            output_reassign_softmax = sf(output_reassign).cpu().numpy()
        
        elif self.seg_net_index == 2:
            filename = self.root_split_path + "score_hma/" + left + ('%s.npy' % idx)
            output_reassign_softmax = np.load(filename)

        return output_reassign_softmax
コード例 #4
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='cityscapes',
        help='Color palette used for segmentation map')
    parser.add_argument(
        '--opacity',
        type=float,
        default=0.5,
        help='Opacity of painted segmentation map. In (0, 1] range.')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)
    # show the results
    show_result_pyplot(
        model,
        args.img,
        result,
        get_palette(args.palette),
        opacity=args.opacity)
コード例 #5
0
 def _inference(self, data):
     img = data["input_img"]
     data = img
     target_l = 1024
     x, y, c = data.shape
     label = np.zeros((x, y))
     x_num = (x // target_l + 1) if x % target_l else x // target_l
     y_num = (y // target_l + 1) if y % target_l else y // target_l
     for i in range(x_num):
         for j in range(y_num):
             x_s, x_e = i * target_l, (i + 1) * target_l
             y_s, y_e = j * target_l, (j + 1) * target_l
             img = data[x_s:x_e, y_s:y_e, :]
             if np.mean(img) == 0:
                 out_l = np.ones((img.shape[0], img.shape[1]))
             else:
                 out_l = inference_segmentor(self.model, img)[0]
             label[x_s:x_e, y_s:y_e] = out_l.astype(np.int8)
     _label = label.astype(np.int8).tolist()
     _len, __len = len(_label), len(_label[0])
     o_stack = []
     for _ in _label:
         out_s = {"s": [], "e": []}
         j = 0
         while j < __len:
             if _[j] == 0:
                 out_s["s"].append(str(j))
                 while j < __len and _[j] == 0:
                     j += 1
                 out_s["e"].append(str(j))
             j += 1
         o_stack.append(out_s)
     result = {"result": o_stack}
     return result
コード例 #6
0
def main():
    # # 定义 config 文件路径
    # config_file = 'mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py'
    # checkpoint_file = '../mmsegmentation/work_dirs/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/latest.pth'
    #
    # # build the model from a config file and a checkpoint file
    # model = init_segmentor(config_file, checkpoint_file)
    #
    # # test a single image and show the results
    # img_path = 'demo.png'  # or img = mmcv.imread(img), which will only load it once
    # img=cv2.imread(img_path)
    # print('img_cv.shape:',img.shape)
    # # imgPil=Image.open(img_path)
    # # imgPil=np.array(imgPil)
    # # print('imgPil.shape:',imgPil.shape)
    # # img_pil2cv = cv2.cvtColor(imgPil, cv2.COLOR_RGB2BGR)
    # # print('img_pil2cv.shape:',img_pil2cv.shape)
    # result = inference_segmentor(model, img_path)
    # print(result[0])
    # #result=result[0]
    # #result=result.astype(np.int8)
    # #print(type(result[0][0]))
    # #print(result)
    # # visualize the results in a new window
    # #model.show_result(img, result, show=True)
    # # or save the visualization results to image files
    # #model.show_result(img, result, out_file='resultimg_pil2cv.jpg')
    config_file = 'config_huawei/deeplabv3plus512x512.py'
    checkpoint_file = '/home/admins/qyl/huawei_compete/mmsegmentation/work_dirs/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/model_best.pth'

    # build the model from a config file and a checkpoint file
    model = init_segmentor(config_file, checkpoint_file, device='cuda')

    # test a single image and show the results
    img_path = './demo_test/182_7_26.png'  # or img = mmcv.imread(img), which will only load it once
    #img=cv2.imread(img_path)
    #print('img_cv.shape:',img.shape)
    #img = mmcv.imread(img_path)
    #(img.shape)
    imgPil = Image.open(img_path)
    imgPil = np.array(imgPil)
    print('imgPil.shape:', imgPil.shape)
    img_pil2cv = cv2.cvtColor(imgPil, cv2.COLOR_RGB2BGR)
    print('img_pil2cv.shape:', img_pil2cv.shape)
    result = inference_segmentor(model, imgPil)
    # result = result[0]
    # result = result.astype(np.int8)
    # print(type(result[0][0]))
    # print(result)
    model.show_result(imgPil, result, out_file='result1126_rgb.jpg')
コード例 #7
0
def process_image(transform, processing_model, img):
    tracks = []
    try:
        (model) = processing_model

        result = inference_segmentor(model, img)
        img = model.show_result(img, result, palette=None, show=False)

    except Exception as e:
        track = traceback.format_exc()
        print(track)
        print("SegFormer Exception", e)
        pass

    return tracks, img
コード例 #8
0
def inference_one_image(model, image, project_type, model_type):
    img = mmcv.imread(image)
    result = inference_segmentor(model, img)
    result = result[0]

    pred = np.zeros(result.shape, dtype=np.uint16)

    for idx, match in enumerate(opt.matches):
        # label_cvt[label == match] = tuple(opt.colors[idx])
        pred[result == match] = (idx + 1) * 100

    save_path = f'../../submission/{project_type}/{model_type}/results'
    os.makedirs(save_path, exist_ok=True)

    out_file = f"{save_path}/{Path(image).stem}.png"
    cv.imwrite(out_file, pred)
コード例 #9
0
def main():
    
    args = get_parser().parse_args()
    img_path = args.img_path
    save_path = args.save_path
    
    config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
    checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
    model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
    
    for path in tqdm(os.listdir(img_path)):
        if path.endswith('.jpg'):
            fullpath = os.path.join(img_path, path)
            result = inference_segmentor(model, fullpath)[0].astype(np.uint8)
            
            seg_path = os.path.join(save_path, path)
            cv2.imwrite(seg_path, result, [cv2.IMWRITE_JPEG_QUALITY, 75])
コード例 #10
0
def val_one_image(model, image_id):
    ori_image = f"{opt.image_path}/{image_id}.tif"
    ori_gt = f"{opt.label_path}/{image_id}.png"

    img = mmcv.imread(ori_image)
    gt_img = mmcv.imread(ori_gt)
    result = inference_segmentor(model, img)
    result = result[0]

    pred = cvt_id2color(result)
    gt = cvt_id2color(gt_img[..., 0])

    plt.imshow(img[:, :, ::-1])
    plt.show()
    plt.imshow(gt[:, :, ::-1])
    plt.show()
    plt.imshow(pred[:, :, ::-1])
    plt.show()
コード例 #11
0
def main(args):
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        tmp_res = requests.post(url, image)
    content = tmp_res.content
    if args.result_image:
        with open(args.result_image, 'wb') as out_image:
            out_image.write(content)
        plt.imshow(mmcv.imread(args.result_image, 'grayscale'))
        plt.show()
    else:
        plt.imshow(plt.imread(BytesIO(content)))
        plt.show()
    model = init_segmentor(args.config, args.checkpoint, args.device)
    image = mmcv.imread(args.img)
    result = inference_segmentor(model, image)
    plt.imshow(result[0])
    plt.show()
コード例 #12
0
def evaluate_dataset(checkpoint,
                     device,
                     config,
                     data_dir="/rd-temp/mohan/iccv09Data",
                     split_file="/rd-temp/mohan/iccv09Data/splits/val.txt",
                     **kwargs):
    # initiate model
    model = init_segmentor(config, checkpoint, device=device)

    # assign config to model (Required)
    model.config = config

    result_list = []
    gt_lst = []
    raw_image_ids = []

    with open(split_file, "r") as f:
        for line in f.readlines():
            raw_image_ids.append(line.strip())

    with tqdm(total=len(raw_image_ids)) as pbar:

        for image_id in raw_image_ids:
            # inference
            img = mmcv.imread(f"{data_dir}/images/{image_id}.jpg")
            result = inference_segmentor(model, img)

            # prepare image data and ground truth
            gt_image_path = f"{data_dir}/labels/{image_id}.regions.txt"
            seg_map = np.loadtxt(gt_image_path).astype(np.uint8)

            result_list.extend(result)
            gt_lst.append(seg_map)

            pbar.update(1)

    all_acc, acc, iou = mmseg.core.evaluation.mean_iou(
        results=result_list,
        gt_seg_maps=gt_lst,
        num_classes=config.num_classes,
        ignore_index=kwargs.get("ignore_index", 255),
        label_map=kwargs.get("label_map", {}))

    return (all_acc, acc, iou, iou[~np.isnan(iou)].mean())
コード例 #13
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--palette',
                        default='drive',
                        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)
    # show the results
    img = model.show_result(args.img, result, show=False)
    cv2.imwrite('out.jpg', img)
    show_result_pyplot(model, args.img, result)
コード例 #14
0
    def get_score(self, idx, left):
        filename = self.root_split_path + left + ('%s.png' % idx)
        result = inference_segmentor(self.model, filename)
        # person 11, rider 12, vehicle 13/14/15/16, bike 17/18
        output_permute = torch.tensor(result[0]).permute(1, 2, 0)  # H, W, 19
        sf = torch.nn.Softmax(dim=2)

        output_reassign = torch.zeros(output_permute.size(0),
                                      output_permute.size(1), 5)
        output_reassign[:, :, 0], _ = torch.max(output_permute[:, :, :11],
                                                dim=2)  # background
        output_reassign[:, :, 1], _ = torch.max(output_permute[:, :, [17, 18]],
                                                dim=2)  # bicycle
        output_reassign[:, :,
                        2], _ = torch.max(output_permute[:, :,
                                                         [13, 14, 15, 16]],
                                          dim=2)  # car
        output_reassign[:, :, 3] = output_permute[:, :, 11]  #person
        output_reassign[:, :, 4] = output_permute[:, :, 12]  #rider
        output_reassign_softmax = sf(output_reassign)
        return output_reassign_softmax
コード例 #15
0
def main():
    parser = ArgumentParser()
    # parser.add_argument('--img', default="Image_20200925100338349.bmp", help='Image file')
    parser.add_argument('--img', default="star.png", help='Image file')
    # parser.add_argument('--img', default="demo.png", help='Image file')
    parser.add_argument(
        '--config',
        # default="../configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes_custom_binary.py",
        default=
        "../configs/danet/danet_r50-d8_512x1024_40k_cityscapes_custom.py",
        # default="../configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py",
        help='Config file')
    parser.add_argument(
        '--checkpoint',
        # default="../tools/work_dirs/deeplabv3_r50-d8_512x1024_40k_cityscapes_custom_binary/iter_200.pth",
        default=
        "../tools/work_dirs/danet_r50-d8_512x1024_40k_cityscapes_custom/iter_4000.pth",
        # default="../checkpoints/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth",
        help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='cityscapes_custom',
        # default='cityscapes',
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)

    # io.imsave("result.png", result[0])
    # io.imshow(result[0])
    # io.show()
    # show the results
    show_result_pyplot(model, args.img, result, get_palette(args.palette))
    """
def test_test_time_augmentation_on_cpu():
    config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
    config = mmcv.Config.fromfile(config_file)

    # Remove pretrain model download for testing
    config.model.pretrained = None
    # Replace SyncBN with BN to inference on CPU
    norm_cfg = dict(type='BN', requires_grad=True)
    config.model.backbone.norm_cfg = norm_cfg
    config.model.decode_head.norm_cfg = norm_cfg
    config.model.auxiliary_head.norm_cfg = norm_cfg

    # Enable test time augmentation
    config.data.test.pipeline[1].flip = True

    checkpoint_file = None
    model = init_segmentor(config, checkpoint_file, device='cpu')

    img = mmcv.imread(osp.join(osp.dirname(__file__), 'data/color.jpg'),
                      'color')
    result = inference_segmentor(model, img)
    assert result[0].shape == (288, 512)
コード例 #17
0
def inference_model(config_name, checkpoint, args, logger=None):
    cfg = Config.fromfile(config_name)
    if args.aug:
        if 'flip' in cfg.data.test.pipeline[
                1] and 'img_scale' in cfg.data.test.pipeline[1]:
            cfg.data.test.pipeline[1].img_ratios = [
                0.5, 0.75, 1.0, 1.25, 1.5, 1.75
            ]
            cfg.data.test.pipeline[1].flip = True
        else:
            if logger is not None:
                logger.error(f'{config_name}: unable to start aug test')
            else:
                print(f'{config_name}: unable to start aug test', flush=True)

    model = init_segmentor(cfg, checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)

    # show the results
    if args.show:
        show_result_pyplot(model, args.img, result)
    return result
コード例 #18
0
def main():
    parser = ArgumentParser()
    parser.add_argument('video', help='Video file or webcam id')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--palette',
                        default='cityscapes',
                        help='Color palette used for segmentation map')
    parser.add_argument('--show',
                        action='store_true',
                        help='Whether to show draw result')
    parser.add_argument('--show-wait-time',
                        default=1,
                        type=int,
                        help='Wait time after imshow')
    parser.add_argument('--output-file',
                        default=None,
                        type=str,
                        help='Output video file path')
    parser.add_argument('--output-fourcc',
                        default='MJPG',
                        type=str,
                        help='Fourcc of the output video')
    parser.add_argument('--output-fps',
                        default=-1,
                        type=int,
                        help='FPS of the output video')
    parser.add_argument('--output-height',
                        default=-1,
                        type=int,
                        help='Frame height of the output video')
    parser.add_argument('--output-width',
                        default=-1,
                        type=int,
                        help='Frame width of the output video')
    parser.add_argument(
        '--opacity',
        type=float,
        default=0.5,
        help='Opacity of painted segmentation map. In (0, 1] range.')
    args = parser.parse_args()

    assert args.show or args.output_file, \
        'At least one output should be enabled.'

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)

    # build input video
    cap = cv2.VideoCapture(args.video)
    assert (cap.isOpened())
    input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    input_fps = cap.get(cv2.CAP_PROP_FPS)

    # init output video
    writer = None
    output_height = None
    output_width = None
    if args.output_file is not None:
        fourcc = cv2.VideoWriter_fourcc(*args.output_fourcc)
        output_fps = args.output_fps if args.output_fps > 0 else input_fps
        output_height = args.output_height if args.output_height > 0 else int(
            input_height)
        output_width = args.output_width if args.output_width > 0 else int(
            input_width)
        writer = cv2.VideoWriter(args.output_file, fourcc, output_fps,
                                 (output_width, output_height), True)

    # start looping
    try:
        while True:
            flag, frame = cap.read()
            if not flag:
                break

            # test a single image
            result = inference_segmentor(model, frame)

            # blend raw image and prediction
            draw_img = model.show_result(frame,
                                         result,
                                         palette=get_palette(args.palette),
                                         show=False,
                                         opacity=args.opacity)

            if args.show:
                cv2.imshow('video_demo', draw_img)
                cv2.waitKey(args.show_wait_time)
            if writer:
                if draw_img.shape[0] != output_height or draw_img.shape[
                        1] != output_width:
                    draw_img = cv2.resize(draw_img,
                                          (output_width, output_height))
                writer.write(draw_img)
    finally:
        if writer:
            writer.release()
        cap.release()
コード例 #19
0
# build the model from a config file and a checkpoint file
model = init_segmentor(config_file, checkpoint_file, device='cuda:0')

#########tiff
# for img_fn in enumerate_files("C:/_koray/test_data/space/test"):
for img_fn in enumerate_files(
        "C:/_koray/test_data/space/SN7_buildings_test_public/test_public/L15-0369E-1244N_1479_3214_13/images_masked"
):

    # tiff = tiff_helper.open_tiff(img_fn)
    # img = cv2.imread(img_fn)
    # cv2.imshow("aaa", img)
    # cv2.waitKey(0)

    result = inference_segmentor(model, img_fn)
    # model.show_result(img_fn, result, show=True, wait_time=1000)
    display(False, img_fn, result, wait=1000)

# #########screen capture
# for frame in ScreenCapturer(bbox=(400, 200, 400 + 1200, 200 + 1000)).get_frames():
#     frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#     result = inference_segmentor(model, frame)
#     # model.show_result(frame, result, show=True, wait_time=1)
#     display(True, frame, result)

##########youtube
# url = "https://youtu.be/ZORzsubQA_M"
# for frame in YoutubeVideoSource(url).get_frames():
#     result = inference_segmentor(model, frame)
#     model.show_result(frame, result, show=True, wait_time=1)
コード例 #20
0
def predict_save(model, img: str, out_path=None):
    result = inference_segmentor(model, img)
    if out_path:
        model.show_result(img, result, palette=PALETTE, out_file=out_path)
    else:
        model.show_result(img, result, palette=PALETTE, show=True)
コード例 #21
0
def plot_result(model, img: str):
    result = inference_segmentor(model, img)
    show_result_pyplot(model, img, result, palette=PALETTE)
コード例 #22
0
def predict(img: str):
    if type(img) is list:
        img = img[0]
    result = inference_segmentor(model, img)
    return result
コード例 #23
0
from mmseg.apis import inference_segmentor, init_segmentor
import mmcv

config_file = 'config.py'
checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'

config = mmcv.Config.fromfile(config_file)
print(config.keys())

model = init_segmentor(config, checkpoint_file, device='cuda:1')

# test a single image
img = 'sample/demo.png'
result = inference_segmentor(model, img)
 def inference(self, data, *args, **kwargs):
     results = [inference_segmentor(self.model, img) for img in data]
     return results