Exemplo n.º 1
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='cityscapes',
        help='Color palette used for segmentation map')
    parser.add_argument(
        '--opacity',
        type=float,
        default=0.5,
        help='Opacity of painted segmentation map. In (0, 1] range.')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)
    # show the results
    show_result_pyplot(
        model,
        args.img,
        result,
        get_palette(args.palette),
        opacity=args.opacity)
Exemplo n.º 2
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='ade',
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    demo_im_names = os.listdir(args.img)
    random.shuffle(demo_im_names)
    for im_name in demo_im_names:
        if 'png' in im_name or 'jpg' in im_name:
            full_name = os.path.join(args.img, im_name)
            result = inference_segmentor(model, full_name)
            # show the results
            pl=[[220, 220, 220],[17, 142, 35], [152, 251, 152], [0, 60, 100], [70, 130, 180], [220, 20, 20]]
            show_result_pyplot(model, full_name, result,pl)
Exemplo n.º 3
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--img', help='Image file', default=img_path)
    parser.add_argument('--config', help='Config file', default=config)
    parser.add_argument('--checkpoint', help='Checkpoint file', default=ckpt)
    parser.add_argument(
        '--device', default='cuda:1', help='Device used for inference')
    parser.add_argument(
        '--palette',
        default=None,
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)

    if args.img=='':
        list_img=get_list_file_in_folder(img_dir)
        list_img=sorted(list_img)
        for img_ in list_img:
            img=os.path.join(img_dir,img_)
            print(img)
            result = inference_segmentor(model, img)
            show_result_pyplot(model, img, result, get_palette(args.palette))
    else:
        result = inference_segmentor(model, args.img)
        show_result_pyplot(model, args.img, result, get_palette(args.palette))
Exemplo n.º 4
0
 def __init__(self):
     self.root_split_path = "../LidarDetector/data/kitti/training/"
     self.save_path = "../LidarDetector/data/kitti/training/painted_lidar_mm/"
     config_file = 'mmseg/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py'
     checkpoint_file = 'mmseg/checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'
     self.model = init_segmentor(config_file,
                                 checkpoint_file,
                                 device='cuda:1')
    def __init__(self, model_name, model_path):
        self.model_name = model_name
        self.model_path = model_path

        config_file = '/home/mind/model/deeplabv3plus512x512.py'
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.model = init_segmentor(config_file,
                                    self.model_path,
                                    device=self.device)
Exemplo n.º 6
0
def init_model(transform):

    # use this format SegFormr.b1-512-ade  in -t parameter to select a certain config/checkpoint
    (version, resolution, datasetType) = transform.split('-')
    config = "local_configs/segformer/" + version.upper(
    ) + "/segformer." + version + "." + resolution + "x" + resolution + "." + datasetType + ".160k.py"
    checkpoint = "checkpoints/segformer." + version + "." + resolution + "x" + resolution + "." + datasetType + ".160k.pth"

    # build the model from a config file and a checkpoint file
    model = init_segmentor(config, checkpoint, device='cuda:0')

    return (model), None
Exemplo n.º 7
0
def main():
    # # 定义 config 文件路径
    # config_file = 'mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py'
    # checkpoint_file = '../mmsegmentation/work_dirs/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/latest.pth'
    #
    # # build the model from a config file and a checkpoint file
    # model = init_segmentor(config_file, checkpoint_file)
    #
    # # test a single image and show the results
    # img_path = 'demo.png'  # or img = mmcv.imread(img), which will only load it once
    # img=cv2.imread(img_path)
    # print('img_cv.shape:',img.shape)
    # # imgPil=Image.open(img_path)
    # # imgPil=np.array(imgPil)
    # # print('imgPil.shape:',imgPil.shape)
    # # img_pil2cv = cv2.cvtColor(imgPil, cv2.COLOR_RGB2BGR)
    # # print('img_pil2cv.shape:',img_pil2cv.shape)
    # result = inference_segmentor(model, img_path)
    # print(result[0])
    # #result=result[0]
    # #result=result.astype(np.int8)
    # #print(type(result[0][0]))
    # #print(result)
    # # visualize the results in a new window
    # #model.show_result(img, result, show=True)
    # # or save the visualization results to image files
    # #model.show_result(img, result, out_file='resultimg_pil2cv.jpg')
    config_file = 'config_huawei/deeplabv3plus512x512.py'
    checkpoint_file = '/home/admins/qyl/huawei_compete/mmsegmentation/work_dirs/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/model_best.pth'

    # build the model from a config file and a checkpoint file
    model = init_segmentor(config_file, checkpoint_file, device='cuda')

    # test a single image and show the results
    img_path = './demo_test/182_7_26.png'  # or img = mmcv.imread(img), which will only load it once
    #img=cv2.imread(img_path)
    #print('img_cv.shape:',img.shape)
    #img = mmcv.imread(img_path)
    #(img.shape)
    imgPil = Image.open(img_path)
    imgPil = np.array(imgPil)
    print('imgPil.shape:', imgPil.shape)
    img_pil2cv = cv2.cvtColor(imgPil, cv2.COLOR_RGB2BGR)
    print('img_pil2cv.shape:', img_pil2cv.shape)
    result = inference_segmentor(model, imgPil)
    # result = result[0]
    # result = result.astype(np.int8)
    # print(type(result[0][0]))
    # print(result)
    model.show_result(imgPil, result, out_file='result1126_rgb.jpg')
Exemplo n.º 8
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)
    print(f'Config:\n{cfg.pretty_text}')
    # dump config
    cfg.dump('example.py')
    # dump models graph
    if args.graph:
        model = init_segmentor(args.config, device='cpu')
        print(f'Model graph:\n{str(model)}')
        with open('example-graph.txt', 'w') as f:
            f.writelines(str(model))
Exemplo n.º 9
0
    def initialize(self, context):
        properties = context.system_properties
        self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = torch.device(self.map_location + ':' +
                                   str(properties.get('gpu_id')) if torch.cuda.
                                   is_available() else self.map_location)
        self.manifest = context.manifest

        model_dir = properties.get('model_dir')
        serialized_file = self.manifest['model']['serializedFile']
        checkpoint = os.path.join(model_dir, serialized_file)
        self.config_file = os.path.join(model_dir, 'config.py')

        self.model = init_segmentor(self.config_file, checkpoint, self.device)
        self.initialized = True
Exemplo n.º 10
0
def get_model(model_path, epoch_name):
    project_type = model_path.split('/')[-2]
    model_type = model_path.split('/')[-1]

    config_file = f'../../configs/{project_type}/{model_type}.py'
    checkpoint_file = f'/fengyouliang/model_output/mmseg_work_dirs/{project_type}/{model_type}/{epoch_name}.pth'

    print(f"project name: {project_type}")
    print(f"model type: {model_type}")
    print(f"loading config form {config_file}")
    print(f"loading checkpoint form {checkpoint_file}")
    print('init model right now')
    start = time.time()
    model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
    end = time.time()
    print(f'loading done! time: {end - start: .4f}s')
    return model
Exemplo n.º 11
0
def main():
    
    args = get_parser().parse_args()
    img_path = args.img_path
    save_path = args.save_path
    
    config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
    checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
    model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
    
    for path in tqdm(os.listdir(img_path)):
        if path.endswith('.jpg'):
            fullpath = os.path.join(img_path, path)
            result = inference_segmentor(model, fullpath)[0].astype(np.uint8)
            
            seg_path = os.path.join(save_path, path)
            cv2.imwrite(seg_path, result, [cv2.IMWRITE_JPEG_QUALITY, 75])
Exemplo n.º 12
0
def main(args):
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        tmp_res = requests.post(url, image)
    content = tmp_res.content
    if args.result_image:
        with open(args.result_image, 'wb') as out_image:
            out_image.write(content)
        plt.imshow(mmcv.imread(args.result_image, 'grayscale'))
        plt.show()
    else:
        plt.imshow(plt.imread(BytesIO(content)))
        plt.show()
    model = init_segmentor(args.config, args.checkpoint, args.device)
    image = mmcv.imread(args.img)
    result = inference_segmentor(model, image)
    plt.imshow(result[0])
    plt.show()
Exemplo n.º 13
0
def evaluate_dataset(checkpoint,
                     device,
                     config,
                     data_dir="/rd-temp/mohan/iccv09Data",
                     split_file="/rd-temp/mohan/iccv09Data/splits/val.txt",
                     **kwargs):
    # initiate model
    model = init_segmentor(config, checkpoint, device=device)

    # assign config to model (Required)
    model.config = config

    result_list = []
    gt_lst = []
    raw_image_ids = []

    with open(split_file, "r") as f:
        for line in f.readlines():
            raw_image_ids.append(line.strip())

    with tqdm(total=len(raw_image_ids)) as pbar:

        for image_id in raw_image_ids:
            # inference
            img = mmcv.imread(f"{data_dir}/images/{image_id}.jpg")
            result = inference_segmentor(model, img)

            # prepare image data and ground truth
            gt_image_path = f"{data_dir}/labels/{image_id}.regions.txt"
            seg_map = np.loadtxt(gt_image_path).astype(np.uint8)

            result_list.extend(result)
            gt_lst.append(seg_map)

            pbar.update(1)

    all_acc, acc, iou = mmseg.core.evaluation.mean_iou(
        results=result_list,
        gt_seg_maps=gt_lst,
        num_classes=config.num_classes,
        ignore_index=kwargs.get("ignore_index", 255),
        label_map=kwargs.get("label_map", {}))

    return (all_acc, acc, iou, iou[~np.isnan(iou)].mean())
Exemplo n.º 14
0
    def __init__(self, seg_net_index):
        self.root_split_path = TRAINING_PATH
        self.save_path = TRAINING_PATH + "painted_lidar/"
        if not os.path.exists(self.save_path):
            os.mkdir(self.save_path)

        self.seg_net_index = seg_net_index
        self.model = None
        if seg_net_index == 0:
            print(f'Using Segmentation Network -- {SEG_NET_OPTIONS[seg_net_index]}')
            self.model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
            self.model.eval()
            if torch.cuda.is_available():
                self.model.to('cuda')
        elif seg_net_index == 1:
            print(f'Using Segmentation Network -- {SEG_NET_OPTIONS[seg_net_index]}')
            config_file = './mmseg/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py'
            checkpoint_file = './mmseg/checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'
            self.model = init_segmentor(config_file, checkpoint_file, device='cuda:0') # TODO edit here if you want to use different device
def run_submission(model_path, epoch_name):
    project_type = model_path.split('/')[-2]
    model_type = model_path.split('/')[-1]

    config_file = f'../../configs/{project_type}/{model_type}.py'
    checkpoint_file = f'/fengyouliang/model_output/mmseg_work_dirs/{project_type}/{model_type}/{epoch_name}.pth'

    print(f"project name: {project_type}")
    print(f"model type: {model_type}")
    print(f"loading config form {config_file}")
    print(f"loading checkpoint form {checkpoint_file}")
    print('init model right now')
    model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
    print('start inference')

    all_test_images = glob.glob(f"{opt.test_path}/*.tif")
    pbar = tqdm(all_test_images, total=len(all_test_images))

    for image_file in pbar:
        pbar.set_description(Path(image_file).name)
        inference_one_image(model, image_file, project_type, model_type)
Exemplo n.º 16
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--palette',
                        default='drive',
                        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)
    # show the results
    img = model.show_result(args.img, result, show=False)
    cv2.imwrite('out.jpg', img)
    show_result_pyplot(model, args.img, result)
def main():
    parser = ArgumentParser()
    # parser.add_argument('--img', default="Image_20200925100338349.bmp", help='Image file')
    parser.add_argument('--img', default="star.png", help='Image file')
    # parser.add_argument('--img', default="demo.png", help='Image file')
    parser.add_argument(
        '--config',
        # default="../configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes_custom_binary.py",
        default=
        "../configs/danet/danet_r50-d8_512x1024_40k_cityscapes_custom.py",
        # default="../configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py",
        help='Config file')
    parser.add_argument(
        '--checkpoint',
        # default="../tools/work_dirs/deeplabv3_r50-d8_512x1024_40k_cityscapes_custom_binary/iter_200.pth",
        default=
        "../tools/work_dirs/danet_r50-d8_512x1024_40k_cityscapes_custom/iter_4000.pth",
        # default="../checkpoints/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth",
        help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument(
        '--palette',
        default='cityscapes_custom',
        # default='cityscapes',
        help='Color palette used for segmentation map')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)

    # io.imsave("result.png", result[0])
    # io.imshow(result[0])
    # io.show()
    # show the results
    show_result_pyplot(model, args.img, result, get_palette(args.palette))
    """
def test_test_time_augmentation_on_cpu():
    config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
    config = mmcv.Config.fromfile(config_file)

    # Remove pretrain model download for testing
    config.model.pretrained = None
    # Replace SyncBN with BN to inference on CPU
    norm_cfg = dict(type='BN', requires_grad=True)
    config.model.backbone.norm_cfg = norm_cfg
    config.model.decode_head.norm_cfg = norm_cfg
    config.model.auxiliary_head.norm_cfg = norm_cfg

    # Enable test time augmentation
    config.data.test.pipeline[1].flip = True

    checkpoint_file = None
    model = init_segmentor(config, checkpoint_file, device='cpu')

    img = mmcv.imread(osp.join(osp.dirname(__file__), 'data/color.jpg'),
                      'color')
    result = inference_segmentor(model, img)
    assert result[0].shape == (288, 512)
def inference_model(config_name, checkpoint, args, logger=None):
    cfg = Config.fromfile(config_name)
    if args.aug:
        if 'flip' in cfg.data.test.pipeline[
                1] and 'img_scale' in cfg.data.test.pipeline[1]:
            cfg.data.test.pipeline[1].img_ratios = [
                0.5, 0.75, 1.0, 1.25, 1.5, 1.75
            ]
            cfg.data.test.pipeline[1].flip = True
        else:
            if logger is not None:
                logger.error(f'{config_name}: unable to start aug test')
            else:
                print(f'{config_name}: unable to start aug test', flush=True)

    model = init_segmentor(cfg, checkpoint, device=args.device)
    # test a single image
    result = inference_segmentor(model, args.img)

    # show the results
    if args.show:
        show_result_pyplot(model, args.img, result)
    return result
Exemplo n.º 20
0
    def __init__(self, args):
        self.args = args
        self.device = torch.device(args.device)

        # image transform
        input_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(cfg.DATASET.MEAN, cfg.DATASET.STD),
        ])

        # dataset and dataloader
        val_dataset = get_segmentation_dataset(cfg.DATASET.NAME, split='val', mode='testval', transform=input_transform)
        val_sampler = make_data_sampler(val_dataset, False, args.distributed)

        #####################
        # BATCH SIZE is always 1

        val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=cfg.TEST.BATCH_SIZE, drop_last=False)
        self.val_loader = data.DataLoader(dataset=val_dataset,
                                          batch_sampler=val_batch_sampler,
                                          num_workers=cfg.DATASET.WORKERS,
                                          pin_memory=True)
        self.classes = val_dataset.classes

        ### Create network ###

        # Segmentron model
        # self.model = get_segmentation_model().to(self.device)

        # MMSeg model
        mmseg_config_file = "mmseg-configs/deeplabv3plus_r101-d8_512x512_80k_ade20k.py"
        mmseg_pretrained = "pretrained_weights/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth"
        self.model = init_segmentor(mmseg_config_file, mmseg_pretrained)

        self.model.to(self.device)
        self.metric = SegmentationMetric(val_dataset.num_class, args.distributed)
Exemplo n.º 21
0
             color=[0, 0, 255],
             font_scale=1,
             thickness=1,
             back_color=[0, 0, 0])

    cv2.imshow(window_name, img)
    global _display_inited
    if not _display_inited:
        _display_inited = True
        # cv2.moveWindow(window_name, -1800, 50) #home
        # cv2.moveWindow(window_name, -1800, 220) #office
    cv2.waitKey(wait)


# build the model from a config file and a checkpoint file
model = init_segmentor(config_file, checkpoint_file, device='cuda:0')

#########tiff
# for img_fn in enumerate_files("C:/_koray/test_data/space/test"):
for img_fn in enumerate_files(
        "C:/_koray/test_data/space/SN7_buildings_test_public/test_public/L15-0369E-1244N_1479_3214_13/images_masked"
):

    # tiff = tiff_helper.open_tiff(img_fn)
    # img = cv2.imread(img_fn)
    # cv2.imshow("aaa", img)
    # cv2.waitKey(0)

    result = inference_segmentor(model, img_fn)
    # model.show_result(img_fn, result, show=True, wait_time=1000)
    display(False, img_fn, result, wait=1000)
Exemplo n.º 22
0
height, width = 480, 640
video_name = "sample1"
# height, width = 720, 1280
video_path = str(data_path / "origin" / f"{video_name}.mp4")
resized_video_path = str(data_path / f"{video_name}_{height}x{width}.mp4")
resized_frames_path = data_path / f"{video_name}_{height}x{width}"
if not resized_frames_path.exists():
    resized_frames_path.mkdir()

frames_path = resized_frames_path / "img_dir"

cityspaces_path = repo_path.parent / "data" / "cityscapes"
device = "cuda"
batch_size = 2

seg_model = init_segmentor(config_file, checkpoint_file, device=device)
crf = CRF(n_spatial_dims=2, returns="log-proba").to(device)

cfg = seg_model.cfg
train_dataset = CityscapesDataset(data_root=cityspaces_path,
                                  pipeline=cfg.data.train.pipeline,
                                  img_dir=cfg.data.train.img_dir,
                                  ann_dir=cfg.data.train.ann_dir,
                                  test_mode=False)

val_dataset = CityscapesDataset(data_root=cityspaces_path,
                                pipeline=cfg.data.val.pipeline,
                                img_dir=cfg.data.val.img_dir,
                                ann_dir=cfg.data.val.ann_dir,
                                test_mode=False)
Exemplo n.º 23
0
    def __init__(self, args):
        self.args = args
        self.device = torch.device(args.device)

        self.prefix = "ADE_cce_alpha={}".format(cfg.TRAIN.ALPHA)
        self.writer = SummaryWriter(log_dir=f"iccv_tensorboard/{self.prefix}")
        self.writer_noisy = SummaryWriter(
            log_dir=f"iccv_tensorboard/{self.prefix}-foggy")

        # image transform
        input_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(cfg.DATASET.MEAN, cfg.DATASET.STD),
        ])
        # dataset and dataloader
        train_data_kwargs = {
            'transform': input_transform,
            'base_size': cfg.TRAIN.BASE_SIZE,
            'crop_size': cfg.TRAIN.CROP_SIZE
        }
        val_data_kwargs = {
            'transform': input_transform,
            'base_size': cfg.TRAIN.BASE_SIZE,
            'crop_size': cfg.TEST.CROP_SIZE
        }
        train_dataset = get_segmentation_dataset(cfg.DATASET.NAME,
                                                 split='train',
                                                 mode='train',
                                                 **train_data_kwargs)
        val_dataset = get_segmentation_dataset(cfg.DATASET.NAME,
                                               split='val',
                                               mode="val",
                                               **val_data_kwargs)

        self.classes = val_dataset.classes
        self.iters_per_epoch = len(train_dataset) // (args.num_gpus *
                                                      cfg.TRAIN.BATCH_SIZE)
        self.max_iters = cfg.TRAIN.EPOCHS * self.iters_per_epoch

        self.ece_evaluator = ECELoss(n_classes=len(self.classes))
        self.cce_evaluator = CCELoss(n_classes=len(self.classes))

        train_sampler = make_data_sampler(train_dataset,
                                          shuffle=True,
                                          distributed=args.distributed)
        train_batch_sampler = make_batch_data_sampler(train_sampler,
                                                      cfg.TRAIN.BATCH_SIZE,
                                                      self.max_iters,
                                                      drop_last=True)
        val_sampler = make_data_sampler(val_dataset, False, args.distributed)
        val_batch_sampler = make_batch_data_sampler(val_sampler,
                                                    cfg.TEST.BATCH_SIZE,
                                                    drop_last=False)

        self.train_loader = data.DataLoader(dataset=train_dataset,
                                            batch_sampler=train_batch_sampler,
                                            num_workers=cfg.DATASET.WORKERS,
                                            pin_memory=True)
        self.val_loader = data.DataLoader(dataset=val_dataset,
                                          batch_sampler=val_batch_sampler,
                                          num_workers=cfg.DATASET.WORKERS,
                                          pin_memory=True)

        # DEFINE data for noisy
        # val_dataset_noisy = get_segmentation_dataset(cfg.DATASET.NOISY_NAME, split='val', mode="val", **train_data_kwargs)
        # self.val_loader_noisy = data.DataLoader(dataset=val_dataset_noisy,
        #                                   batch_sampler=val_batch_sampler,
        #                                   num_workers=cfg.DATASET.WORKERS,
        #                                   pin_memory=True)

        # create network
        # self.model = get_segmentation_model().to(self.device)
        mmseg_config_file = cfg.MODEL.MMSEG_CONFIG
        mmseg_pretrained = cfg.TRAIN.PRETRAINED_MODEL_PATH
        self.model = init_segmentor(mmseg_config_file, mmseg_pretrained)
        self.model.to(self.device)

        for params in self.model.backbone.parameters():
            params.requires_grad = False

        # print params and flops
        if get_rank() == 0:
            try:
                show_flops_params(copy.deepcopy(self.model), args.device)
            except Exception as e:
                logging.warning('get flops and params error: {}'.format(e))

        if cfg.MODEL.BN_TYPE not in ['BN']:
            logging.info(
                'Batch norm type is {}, convert_sync_batchnorm is not effective'
                .format(cfg.MODEL.BN_TYPE))
        elif args.distributed and cfg.TRAIN.SYNC_BATCH_NORM:
            self.model = nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
            logging.info('SyncBatchNorm is effective!')
        else:
            logging.info('Not use SyncBatchNorm!')

        # create criterion
        # self.criterion = get_segmentation_loss(cfg.MODEL.MODEL_NAME, use_ohem=cfg.SOLVER.OHEM,
        #                                        aux=cfg.SOLVER.AUX, aux_weight=cfg.SOLVER.AUX_WEIGHT,
        #                                        ignore_index=cfg.DATASET.IGNORE_INDEX).to(self.device)
        self.criterion = get_segmentation_loss(
            cfg.MODEL.MODEL_NAME,
            use_ohem=cfg.SOLVER.OHEM,
            aux=cfg.SOLVER.AUX,
            aux_weight=cfg.SOLVER.AUX_WEIGHT,
            ignore_index=cfg.DATASET.IGNORE_INDEX,
            n_classes=len(train_dataset.classes),
            alpha=cfg.TRAIN.ALPHA).to(self.device)

        # optimizer, for model just includes encoder, decoder(head and auxlayer).
        self.optimizer = get_optimizer_mmseg(self.model)

        # lr scheduling
        self.lr_scheduler = get_scheduler(self.optimizer,
                                          max_iters=self.max_iters,
                                          iters_per_epoch=self.iters_per_epoch)

        # resume checkpoint if needed
        self.start_epoch = 0
        if args.resume and os.path.isfile(args.resume):
            name, ext = os.path.splitext(args.resume)
            assert ext == '.pkl' or '.pth', 'Sorry only .pth and .pkl files supported.'
            logging.info('Resuming training, loading {}...'.format(
                args.resume))
            resume_sate = torch.load(args.resume)
            self.model.load_state_dict(resume_sate['state_dict'])
            self.start_epoch = resume_sate['epoch']
            logging.info('resume train from epoch: {}'.format(
                self.start_epoch))
            if resume_sate['optimizer'] is not None and resume_sate[
                    'lr_scheduler'] is not None:
                logging.info(
                    'resume optimizer and lr scheduler from resume state..')
                self.optimizer.load_state_dict(resume_sate['optimizer'])
                self.lr_scheduler.load_state_dict(resume_sate['lr_scheduler'])

        # evaluation metrics
        self.metric = SegmentationMetric(train_dataset.num_class,
                                         args.distributed)
        self.best_pred_miou = 0.0
        self.best_pred_cces = 1e15
Exemplo n.º 24
0
def main():
    parser = ArgumentParser()
    parser.add_argument('video', help='Video file or webcam id')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--palette',
                        default='cityscapes',
                        help='Color palette used for segmentation map')
    parser.add_argument('--show',
                        action='store_true',
                        help='Whether to show draw result')
    parser.add_argument('--show-wait-time',
                        default=1,
                        type=int,
                        help='Wait time after imshow')
    parser.add_argument('--output-file',
                        default=None,
                        type=str,
                        help='Output video file path')
    parser.add_argument('--output-fourcc',
                        default='MJPG',
                        type=str,
                        help='Fourcc of the output video')
    parser.add_argument('--output-fps',
                        default=-1,
                        type=int,
                        help='FPS of the output video')
    parser.add_argument('--output-height',
                        default=-1,
                        type=int,
                        help='Frame height of the output video')
    parser.add_argument('--output-width',
                        default=-1,
                        type=int,
                        help='Frame width of the output video')
    parser.add_argument(
        '--opacity',
        type=float,
        default=0.5,
        help='Opacity of painted segmentation map. In (0, 1] range.')
    args = parser.parse_args()

    assert args.show or args.output_file, \
        'At least one output should be enabled.'

    # build the model from a config file and a checkpoint file
    model = init_segmentor(args.config, args.checkpoint, device=args.device)

    # build input video
    cap = cv2.VideoCapture(args.video)
    assert (cap.isOpened())
    input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    input_fps = cap.get(cv2.CAP_PROP_FPS)

    # init output video
    writer = None
    output_height = None
    output_width = None
    if args.output_file is not None:
        fourcc = cv2.VideoWriter_fourcc(*args.output_fourcc)
        output_fps = args.output_fps if args.output_fps > 0 else input_fps
        output_height = args.output_height if args.output_height > 0 else int(
            input_height)
        output_width = args.output_width if args.output_width > 0 else int(
            input_width)
        writer = cv2.VideoWriter(args.output_file, fourcc, output_fps,
                                 (output_width, output_height), True)

    # start looping
    try:
        while True:
            flag, frame = cap.read()
            if not flag:
                break

            # test a single image
            result = inference_segmentor(model, frame)

            # blend raw image and prediction
            draw_img = model.show_result(frame,
                                         result,
                                         palette=get_palette(args.palette),
                                         show=False,
                                         opacity=args.opacity)

            if args.show:
                cv2.imshow('video_demo', draw_img)
                cv2.waitKey(args.show_wait_time)
            if writer:
                if draw_img.shape[0] != output_height or draw_img.shape[
                        1] != output_width:
                    draw_img = cv2.resize(draw_img,
                                          (output_width, output_height))
                writer.write(draw_img)
    finally:
        if writer:
            writer.release()
        cap.release()