Beispiel #1
0
def test_encoder_decoder():

    # test 1 decode head, w.o. aux head
    cfg = dict(type='EncoderDecoder',
               backbone=dict(type='ExampleBackbone'),
               decode_head=dict(type='ExampleDecodeHead'))
    test_cfg = mmcv.Config(dict(mode='whole'))
    segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg)
    _segmentor_forward_train_test(segmentor)

    # test slide mode
    test_cfg = mmcv.Config(dict(mode='slide', crop_size=(3, 3), stride=(2, 2)))
    segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg)
    _segmentor_forward_train_test(segmentor)

    # test 1 decode head, 1 aux head
    cfg = dict(type='EncoderDecoder',
               backbone=dict(type='ExampleBackbone'),
               decode_head=dict(type='ExampleDecodeHead'),
               auxiliary_head=dict(type='ExampleDecodeHead'))
    test_cfg = mmcv.Config(dict(mode='whole'))
    segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg)
    _segmentor_forward_train_test(segmentor)

    # test 1 decode head, 2 aux head
    cfg = dict(type='EncoderDecoder',
               backbone=dict(type='ExampleBackbone'),
               decode_head=dict(type='ExampleDecodeHead'),
               auxiliary_head=[
                   dict(type='ExampleDecodeHead'),
                   dict(type='ExampleDecodeHead')
               ])
    test_cfg = mmcv.Config(dict(mode='whole'))
    segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg)
    _segmentor_forward_train_test(segmentor)
def load_swin(cfg, ck_path):
    # make temp_cfg
    temp_cfg = copy.deepcopy(cfg)
    temp_cfg.model.decode_head.num_classes = 150
    temp_cfg.model.auxiliary_head.num_classes = 150

    # make original swin
    pre_trained = build_segmentor(temp_cfg.model,
                                  train_cfg=None,
                                  test_cfg=None)
    load_checkpoint(pre_trained, ck_path, map_location='cpu')

    model = build_segmentor(cfg.model, train_cfg=None, test_cfg=None)

    # make pretrained state dict
    pretrained_dict = pre_trained.backbone.state_dict()
    model_dict = model.backbone.state_dict()
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    model_dict.update(pretrained_dict)
    del pre_trained
    torch.cuda.empty_cache()
    model.backbone.load_state_dict(model_dict)
    return model
    def __init__(
        self,
        teacher_cfg,
        student_cfg,
        distill_cfg=None,
        teacher_pretrained=None,
    ):

        super(SegmentationDistiller, self).__init__()

        self.teacher = build_segmentor(teacher_cfg.model,
                                       train_cfg=teacher_cfg.get('train_cfg'),
                                       test_cfg=teacher_cfg.get('test_cfg'))
        self.init_weights_teacher(teacher_pretrained)

        self.teacher.eval()
        self.student = build_segmentor(student_cfg.model,
                                       train_cfg=student_cfg.get('train_cfg'),
                                       test_cfg=student_cfg.get('test_cfg'))

        self.distill_losses = nn.ModuleDict()

        self.distill_cfg = distill_cfg
        student_modules = dict(self.student.named_modules())
        teacher_modules = dict(self.teacher.named_modules())

        def regitster_hooks(student_module, teacher_module):
            def hook_teacher_forward(module, input, output):

                self.register_buffer(teacher_module, output)

            def hook_student_forward(module, input, output):

                self.register_buffer(student_module, output)

            return hook_teacher_forward, hook_student_forward

        for item_loc in distill_cfg:

            student_module = 'student_' + item_loc.student_module.replace(
                '.', '_')
            teacher_module = 'teacher_' + item_loc.teacher_module.replace(
                '.', '_')

            self.register_buffer(student_module, None)
            self.register_buffer(teacher_module, None)

            hook_teacher_forward, hook_student_forward = regitster_hooks(
                student_module, teacher_module)
            teacher_modules[item_loc.teacher_module].register_forward_hook(
                hook_teacher_forward)
            student_modules[item_loc.student_module].register_forward_hook(
                hook_student_forward)

            for item_loss in item_loc.methods:
                loss_name = item_loss.name
                self.distill_losses[loss_name] = build_distill_loss(item_loss)
def test_cascade_encoder_decoder():

    # test 1 decode head, w.o. aux head
    cfg = ConfigDict(
        type='CascadeEncoderDecoder',
        num_stages=2,
        backbone=dict(type='ExampleBackbone'),
        decode_head=[
            dict(type='ExampleDecodeHead'),
            dict(type='ExampleCascadeDecodeHead')
        ])
    cfg.test_cfg = ConfigDict(mode='whole')
    segmentor = build_segmentor(cfg)
    _segmentor_forward_train_test(segmentor)

    # test slide mode
    cfg.test_cfg = ConfigDict(mode='slide', crop_size=(3, 3), stride=(2, 2))
    segmentor = build_segmentor(cfg)
    _segmentor_forward_train_test(segmentor)

    # test 1 decode head, 1 aux head
    cfg = ConfigDict(
        type='CascadeEncoderDecoder',
        num_stages=2,
        backbone=dict(type='ExampleBackbone'),
        decode_head=[
            dict(type='ExampleDecodeHead'),
            dict(type='ExampleCascadeDecodeHead')
        ],
        auxiliary_head=dict(type='ExampleDecodeHead'))
    cfg.test_cfg = ConfigDict(mode='whole')
    segmentor = build_segmentor(cfg)
    _segmentor_forward_train_test(segmentor)

    # test 1 decode head, 2 aux head
    cfg = ConfigDict(
        type='CascadeEncoderDecoder',
        num_stages=2,
        backbone=dict(type='ExampleBackbone'),
        decode_head=[
            dict(type='ExampleDecodeHead'),
            dict(type='ExampleCascadeDecodeHead')
        ],
        auxiliary_head=[
            dict(type='ExampleDecodeHead'),
            dict(type='ExampleDecodeHead')
        ])
    cfg.test_cfg = ConfigDict(mode='whole')
    segmentor = build_segmentor(cfg)
    _segmentor_forward_train_test(segmentor)
Beispiel #5
0
def main():

    args = parse_args()

    if len(args.shape) == 1:
        input_shape = (3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (3, ) + tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    cfg = Config.fromfile(args.config)
    model = build_segmentor(cfg.model,
                            train_cfg=cfg.train_cfg,
                            test_cfg=cfg.test_cfg).cuda()
    model.eval()

    if hasattr(model, 'forward_dummy'):
        model.forward = model.forward_dummy
    else:
        raise NotImplementedError(
            'FLOPs counter is currently not currently supported with {}'.
            format(model.__class__.__name__))

    flops, params = get_model_complexity_info(model, input_shape)
    split_line = '=' * 30
    print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
        split_line, input_shape, flops, params))
    print('!!!Please be cautious if you use the results in papers. '
          'You may need to check if all ops are supported and verify that the '
          'flops computation is correct.')
Beispiel #6
0
def test_config_build_segmentor():
    """Test that all segmentation models defined in the configs can be
    initialized."""
    config_dpath = _get_config_directory()
    print('Found config_dpath = {!r}'.format(config_dpath))

    config_fpaths = []
    # one config each sub folder
    for sub_folder in os.listdir(config_dpath):
        if isdir(sub_folder):
            config_fpaths.append(
                list(glob.glob(join(config_dpath, sub_folder, '*.py')))[0])
    config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1]
    config_names = [relpath(p, config_dpath) for p in config_fpaths]

    print('Using {} config files'.format(len(config_names)))

    for config_fname in config_names:
        config_fpath = join(config_dpath, config_fname)
        config_mod = Config.fromfile(config_fpath)

        config_mod.model
        print('Building segmentor, config_fpath = {!r}'.format(config_fpath))

        # Remove pretrained keys to allow for testing in an offline environment
        if 'pretrained' in config_mod.model:
            config_mod.model['pretrained'] = None

        print('building {}'.format(config_fname))
        segmentor = build_segmentor(config_mod.model)
        assert segmentor is not None

        head_config = config_mod.model['decode_head']
        _check_decode_head(head_config, segmentor.decode_head)
Beispiel #7
0
    def __init__(self,
             # num_stages,
                 backbone,
                 decode_head,
                 neck=None,
                 auxiliary_head=None,
                 train_cfg=None,
                 test_cfg=None,
                 pretrained=None,
                 with_aux_kd=None,
                 teacher_config=None):
        # self.num_stages = num_stages

        super(KDEncoderDecoder, self).__init__(
            backbone=backbone,
            decode_head=decode_head,
            neck=neck,
            auxiliary_head=auxiliary_head,
            train_cfg=train_cfg,
            test_cfg=test_cfg,
            pretrained=pretrained)

        # self.with_aux_kd = False
        self.with_aux_kd = with_aux_kd

        if teacher_config:
            T_cfg = Config.fromfile(teacher_config.config_file)
            self.teacher = build_segmentor(T_cfg.model, train_cfg=None, test_cfg=None)
            checkpoint = load_checkpoint(self.teacher, teacher_config.checkpoint)
            self.teacher.eval()
Beispiel #8
0
def init_segmentor(config, checkpoint=None, device='cuda:0'):
    """Initialize a segmentor from config file.

    Args:
        config (str or :obj:`mmcv.Config`): Config file path or the config
            object.
        checkpoint (str, optional): Checkpoint path. If left as None, the model
            will not load any weights.
        device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
            Use 'cpu' for loading model on CPU.
    Returns:
        nn.Module: The constructed segmentor.
    """
    if isinstance(config, str):
        config = mmcv.Config.fromfile(config)
    elif not isinstance(config, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        'but got {}'.format(type(config)))
    config.model.pretrained = None
    model = build_segmentor(config.model, test_cfg=config.test_cfg)
    if checkpoint is not None:
        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
        model.CLASSES = checkpoint['meta']['CLASSES']
        model.PALETTE = checkpoint['meta']['PALETTE']
    model.cfg = config  # save the config in the model for convenience
    model.to(device)
    model.eval()
    return model
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    torch.backends.cudnn.benchmark = False
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        samples_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=False,
        shuffle=False)

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')

    model = MMDataParallel(model, device_ids=[0])

    model.eval()

    # the first several iterations may be very slow so skip them
    num_warmup = 5
    pure_inf_time = 0
    total_iters = 200

    # benchmark with 200 image and take the average
    for i, data in enumerate(data_loader):

        torch.cuda.synchronize()
        start_time = time.perf_counter()

        with torch.no_grad():
            model(return_loss=False, rescale=True, **data)

        torch.cuda.synchronize()
        elapsed = time.perf_counter() - start_time

        if i >= num_warmup:
            pure_inf_time += elapsed
            if (i + 1) % args.log_interval == 0:
                fps = (i + 1 - num_warmup) / pure_inf_time
                print(f'Done image [{i + 1:<3}/ {total_iters}], '
                      f'fps: {fps:.2f} img / s')

        if (i + 1) == total_iters:
            fps = (i + 1 - num_warmup) / pure_inf_time
            print(f'Overall fps: {fps:.2f} img / s')
            break
Beispiel #10
0
def predict_rsImage_mmseg(config_file,
                          trained_model,
                          image_path,
                          img_save_dir,
                          batch_size=1,
                          gpuid=0,
                          tile_width=480,
                          tile_height=480,
                          overlay_x=160,
                          overlay_y=160):
    cfg = mmcv.Config.fromfile(config_file)
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True
    distributed = False

    # test_mode=False,rsimage='',rsImg_id=0,tile_width=480,tile_height=480,
    #                  overlay_x=160,overlay_y=160

    data_args = {
        'rsImg_predict': True,
        'rsimage': image_path,
        'tile_width': tile_width,
        'tile_height': tile_height,
        'overlay_x': overlay_x,
        'overlay_y': overlay_y
    }
    dataset = build_dataset(cfg.data.test, default_args=data_args)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=batch_size,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    cfg.model.train_cfg = None
    model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, trained_model, map_location='cpu')

    if 'CLASSES' in checkpoint.get('meta', {}):
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        print('"CLASSES" not found in meta, use dataset.CLASSES instead')
        model.CLASSES = dataset.CLASSES
    if 'PALETTE' in checkpoint.get('meta', {}):
        model.PALETTE = checkpoint['meta']['PALETTE']
    else:
        print('"PALETTE" not found in meta, use dataset.PALETTE instead')
        model.PALETTE = dataset.PALETTE
    # clean gpu memory when starting a new evaluation.
    torch.cuda.empty_cache()

    # no distributed
    model = MMDataParallel(model, device_ids=[gpuid])
    single_gpu_prediction_rsImage(model, data_loader, img_save_dir)
Beispiel #11
0
def main():

    args = parse_args()

    if len(args.shape) == 1:
        input_shape = (3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (3, ) + tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    cfg = Config.fromfile(args.config)
    cfg.model.pretrained = None

    if args.net_params:
        tag, input_channels, block1, block2, block3, block4, last_channel = args.net_params.split(
            '-')
        input_channels = [int(item) for item in input_channels.split('_')]
        block1 = [int(item) for item in block1.split('_')]
        block2 = [int(item) for item in block2.split('_')]
        block3 = [int(item) for item in block3.split('_')]
        block4 = [int(item) for item in block4.split('_')]
        last_channel = int(last_channel)

        inverted_residual_setting = []
        for item in [block1, block2, block3, block4]:
            for _ in range(item[0]):
                inverted_residual_setting.append([
                    item[1], item[2:-int(len(item) / 2 - 1)],
                    item[-int(len(item) / 2 - 1):]
                ])

        cfg.model.backbone.input_channel = input_channels
        cfg.model.backbone.inverted_residual_setting = inverted_residual_setting
        cfg.model.backbone.last_channel = last_channel

    model = build_segmentor(cfg.model,
                            train_cfg=cfg.train_cfg,
                            test_cfg=cfg.test_cfg).cuda()
    model.eval()

    if hasattr(model, 'forward_dummy'):
        model.forward = model.forward_dummy
    else:
        raise NotImplementedError(
            'FLOPs counter is currently not currently supported with {}'.
            format(model.__class__.__name__))

    flops, params = get_model_complexity_info(model, input_shape)
    split_line = '=' * 30
    print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
        split_line, input_shape, flops, params))
    print('!!!Please be cautious if you use the results in papers. '
          'You may need to check if all ops are supported and verify that the '
          'flops computation is correct.')
Beispiel #12
0
def main(args):
    cfg = mmcv.Config.fromfile(
        f'./experiments/config_standfordbackground_{args.version}.py')

    if args.config:
        print(cfg.pretty_text)

    set_random_seed(cfg.seed, deterministic=False)

    # Build the dataset
    datasets = [build_dataset(cfg.data.train)]

    # Build the detector
    model = build_segmentor(cfg.model)

    if args.model:
        print(model)

    # Launch training
    if args.train:
        print("Start training...")

        # Create work_dir
        mmcv.mkdir_or_exist(os.path.abspath(cfg.work_dir))

        # Training process
        train_segmentor(model,
                        datasets,
                        cfg,
                        distributed=False,
                        validate=True,
                        meta={
                            "CLASSES": classes,
                            "PALETTE": palette,
                        })

    # evaluation
    if args.evaluation:
        eval_ = evaluate_dataset(checkpoint="{}/iter_{}.pth".format(
            cfg.work_dir, args.evaluation),
                                 device='cuda:{}'.format(cfg.gpu_ids[0]),
                                 config=cfg)
        print(f"Overall accuracy: {eval_[0]}")
        print(f"Accuracies: {eval_[1]}")
        print(f"IoUs: {eval_[2]}")
        print(f"mIoU: {eval_[3]}")
def _test_encoder_decoder_forward(cfg_file):
    model = _get_segmentor_cfg(cfg_file)
    model['pretrained'] = None
    model['test_cfg']['mode'] = 'whole'

    from mmseg.models import build_segmentor
    segmentor = build_segmentor(model)
    segmentor.init_weights()

    if isinstance(segmentor.decode_head, nn.ModuleList):
        num_classes = segmentor.decode_head[-1].num_classes
    else:
        num_classes = segmentor.decode_head.num_classes
    # batch_size=2 for BatchNorm
    input_shape = (2, 3, 32, 32)
    mm_inputs = _demo_mm_inputs(input_shape, num_classes=num_classes)

    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_semantic_seg = mm_inputs['gt_semantic_seg']

    # convert to cuda Tensor if applicable
    if torch.cuda.is_available():
        segmentor = segmentor.cuda()
        imgs = imgs.cuda()
        gt_semantic_seg = gt_semantic_seg.cuda()
    else:
        segmentor = _convert_batchnorm(segmentor)

    # Test forward train
    losses = segmentor.forward(imgs,
                               img_metas,
                               gt_semantic_seg=gt_semantic_seg,
                               return_loss=True)
    assert isinstance(losses, dict)

    # Test forward test
    with torch.no_grad():
        segmentor.eval()
        # pack into lists
        img_list = [img[None, :] for img in imgs]
        img_meta_list = [[img_meta] for img_meta in img_metas]
        segmentor.forward(img_list, img_meta_list, return_loss=False)
Beispiel #14
0
    def __init__(self, opt, num_classes=10, final_conv=False):
        super().__init__()
        self.opt = opt
        cfg = mmcv.Config.fromfile(
            os.path.join(self.opt.jg_dir, self.opt.config_segformer))
        cfg.model.pretrained = None
        cfg.model.train_cfg = None
        cfg.model.decode_head.num_classes = num_classes
        self.net = build_segmentor(cfg.model,
                                   train_cfg=None,
                                   test_cfg=cfg.get('test_cfg'))

        configure_encoder_decoder(self.net)
        configure_new_forward_mit(self.net.backbone)

        self.use_final_conv = final_conv

        if self.use_final_conv:
            self.final_conv = MobileResnetDecoder(num_classes, 3, ngf=64)
Beispiel #15
0
    def __init__(
        self,
        opt=None,
        final_conv=False
    ):  #nb_attn : total number of attention masks, nb_mask_input :number of attention mask applied to input img directly
        super(SegformerGenerator_attn, self).__init__()
        self.opt = opt
        self.nb_attn = self.opt.nb_mask_attn
        self.nb_mask_input = self.opt.nb_mask_input
        self.use_final_conv = final_conv
        self.tanh = nn.Tanh()

        cfg = mmcv.Config.fromfile(
            os.path.join(self.opt.jg_dir, self.opt.config_segformer))
        cfg.model.pretrained = None
        cfg.model.train_cfg = None
        cfg.model.auxiliary_head = cfg.model.decode_head.copy()
        if self.use_final_conv:
            num_cls = 256
        else:
            num_cls = 3 * (self.nb_attn - self.nb_mask_input)
        cfg.model.decode_head.num_classes = num_cls
        cfg.model.auxiliary_head.num_classes = self.nb_attn
        self.segformer = build_segmentor(cfg.model,
                                         train_cfg=None,
                                         test_cfg=cfg.get('test_cfg'))
        self.segformer.train()
        self.softmax_ = nn.Softmax(dim=1)

        configure_encoder_decoder(self.segformer)
        configure_new_forward_mit(self.segformer.backbone)

        self.use_final_conv = final_conv

        if self.use_final_conv:
            self.final_conv = MobileResnetDecoder(
                num_cls, 3 * (self.nb_attn - self.nb_mask_input), ngf=64)
Beispiel #16
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.load_from is not None:
        cfg.load_from = args.load_from
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, deterministic: '
                    f'{args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    meta['exp_name'] = osp.basename(args.config)

    model = build_segmentor(cfg.model,
                            train_cfg=cfg.get('train_cfg'),
                            test_cfg=cfg.get('test_cfg'))

    logger.info(model)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmseg version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
            config=cfg.pretty_text,
            CLASSES=datasets[0].CLASSES,
            PALETTE=datasets[0].PALETTE)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_segmentor(model,
                    datasets,
                    cfg,
                    distributed=distributed,
                    validate=(not args.no_validate),
                    timestamp=timestamp,
                    meta=meta)
Beispiel #17
0
    if len(args.shape) == 1:
        input_shape = (1, 3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (
            1,
            3,
        ) + tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None

    # build the model and load checkpoint
    segmentor = build_segmentor(cfg.model,
                                train_cfg=None,
                                test_cfg=cfg.test_cfg)
    # convert SyncBN to BN
    segmentor = _convert_batchnorm(segmentor)

    if args.checkpoint:
        load_checkpoint(segmentor, args.checkpoint, map_location='cpu')

    # conver model to onnx file
    pytorch2onnx(segmentor,
                 input_shape,
                 opset_version=args.opset_version,
                 show=args.show,
                 output_file=args.output_file,
                 verify=args.verify)
Beispiel #18
0
def main():
    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show \
           or args.show_dir, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results / save the results) with the argument "--out", "--eval"'
         ', "--format-only", "--show" or "--show-dir"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])

    if args.options is not None:
        cfg.merge_from_dict(args.options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.aug_test:
        # hard code index
        cfg.data.test.pipeline[1].img_ratios = [
            0.5, 0.75, 1.0, 1.25, 1.5, 1.75
        ]
        cfg.data.test.pipeline[1].flip = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init the logger before other steps
    logger = None
    if args.eval:
        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        log_file = osp.join(cfg.work_dir, f'test_{timestamp}.log')
        logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # set random seeds
    if args.seed is not None:
        set_random_seed(args.seed, deterministic=args.deterministic)
        if logger is not None:
            logger.info(f'Set random seed to {args.seed}, deterministic: '
                        f'{args.deterministic}')
        else:
            print(f'Set random seed to {args.seed}, deterministic: '
                  f'{args.deterministic}')

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.val, dict(test_mode=True))
    data_loader = build_dataloader(
        dataset,
        samples_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    model.CLASSES = checkpoint['meta']['CLASSES']
    model.PALETTE = checkpoint['meta']['PALETTE']

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if rank == 0:
        if args.out:
            print(f'\nwriting results to {args.out}')
            mmcv.dump(outputs, args.out)
        kwargs = {} if args.eval_options is None else args.eval_options
        if args.format_only:
            dataset.format_results(outputs, **kwargs)
        if args.eval:
            dataset.evaluate(outputs, args.eval, logger, **kwargs)
Beispiel #19
0
def main():
    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show \
        or args.show_dir, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results / save the results) with the argument "--out", "--eval"'
         ', "--format-only", "--show" or "--show-dir"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.aug_test:
        # hard code index
        cfg.data.test.pipeline[1].img_ratios = [
            0.5, 0.75, 1.0, 1.25, 1.5, 1.75
        ]
        cfg.data.test.pipeline[1].flip = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    model.CLASSES = checkpoint['meta']['CLASSES']
    model.PALETTE = checkpoint['meta']['PALETTE']

    efficient_test = False
    if args.eval_options is not None:
        efficient_test = args.eval_options.get('efficient_test', False)

    if not distributed:
        #for concatenated (multi) image input.
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test_multi(model, data_loader, args.show,
                                        args.show_dir, args.show_original_dir,
                                        efficient_test)
    else:
        #currently did not support for multi image
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect, efficient_test)

    rank, _ = get_dist_info()
    if rank == 0:
        if args.out:
            print(f'\nwriting results to {args.out}')
            mmcv.dump(outputs, args.out)
        kwargs = {} if args.eval_options is None else args.eval_options
        if args.format_only:
            dataset.format_results(outputs, **kwargs)
        if args.eval:
            dataset.evaluate(outputs, args.eval, **kwargs)
cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)

cfg.runner.max_iters = 80000

# Let's have a look at the final config used for training
print(f'Config:\n{cfg.pretty_text}')
sys.exit()

from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.apis import train_segmentor

# Build the dataset
datasets = [build_dataset(cfg.data.train)]

# Build the detector
model = build_segmentor(cfg.model)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES

# Create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
train_segmentor(model,
                datasets,
                cfg,
                distributed=False,
                validate=True,
                meta=dict())
Beispiel #21
0
def main():
    cfg, config_fn = get_cfg()
    _, config_name, _ = get_file_name_extension(config_fn)

    # dataset_name = "SV3_roads"
    dataset_name = "SN7_buildings"

    args_work_dir = path_join("C:/_koray/korhun/mmsegmentation/data/space", dataset_name + "_" + config_name)

    args_resume_from = path_join(args_work_dir, "latest.pth")
    if not os.path.isfile(args_resume_from):
        args_resume_from = None
    args_launcher = "none"
    args_seed = None
    args_deterministic = False
    args_no_validate = False

    if not os.path.isdir(args_work_dir):
        create_dir(args_work_dir)

    # cfg = Config.fromfile(args_config)

    # if args.options is not None:
    #     cfg.merge_from_dict(args.options)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    cfg.work_dir = args_work_dir

    # if args.load_from is not None:
    #     cfg.load_from = args.load_from
    if args_resume_from is not None:
        cfg.resume_from = args_resume_from
    # if args.gpu_ids is not None:
    #     cfg.gpu_ids = args.gpu_ids
    # else:
    #     cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
    cfg.gpu_ids = range(0, 1)

    # init distributed env first, since logger depends on the dist info.
    if args_launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args_launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, config_name))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args_seed is not None:
        logger.info(f'Set random seed to {args_seed}, deterministic: '
                    f'{args_deterministic}')
        set_random_seed(args_seed, deterministic=args_deterministic)
    cfg.seed = args_seed
    meta['seed'] = args_seed
    meta['exp_name'] = config_name

    model = build_segmentor(
        cfg.model,
        train_cfg=cfg.get('train_cfg'),
        test_cfg=cfg.get('test_cfg'))

    logger.info(model)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmseg version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
            config=cfg.pretty_text,
            CLASSES=datasets[0].CLASSES,
            PALETTE=datasets[0].PALETTE)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_segmentor(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=(not args_no_validate),
        timestamp=timestamp,
        meta=meta)
Beispiel #22
0
        input_shape = (1, 3, img_scale[1], img_scale[0])
    elif len(args.shape) == 1:
        input_shape = (1, 3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (
            1,
            3,
        ) + tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    test_mode = cfg.model.test_cfg.mode

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    segmentor = build_segmentor(
        cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
    # convert SyncBN to BN
    segmentor = _convert_batchnorm(segmentor)

    if args.checkpoint:
        checkpoint = load_checkpoint(
            segmentor, args.checkpoint, map_location='cpu')
        segmentor.CLASSES = checkpoint['meta']['CLASSES']
        segmentor.PALETTE = checkpoint['meta']['PALETTE']

    # read input or create dummpy input
    if args.input_img is not None:
        preprocess_shape = (input_shape[2], input_shape[3])
        rescale_shape = None
        if args.rescale_shape is not None:
            rescale_shape = [args.rescale_shape[0], args.rescale_shape[1]]
Beispiel #23
0
    def __init__(self, args):
        cudnn.enabled = True
        self.args = args
        self.mode_name = 'kd_mmseg'
        device = args.device

        ######## skd
        # self.S_device = 'cuda:0'
        # self.T_device = 'cuda:1'
        # student = Res_pspnet(BasicBlock, [2, 2, 2, 2], num_classes = args.classes_num, deep_base=False)
        # load_S_model(args, student, False)
        # # print(student)
        #
        # # print(student.device)
        # print_model_parm_nums(student, 'student_model')
        # self.student = self.DataParallelModelProcess(student, 2, 'train', device=self.S_device)
        # self.student = student
        # # self.student.cuda()
        # # self.student.to('cuda:0')
        # # self.student.train()

        # teacher = Res_pspnet(Bottleneck, [3, 4, 23, 3], num_classes = args.classes_num)
        # load_T_model(teacher, args.T_ckpt_path)
        # print_model_parm_nums(teacher, 'teacher_model')
        # self.teacher = self.DataParallelModelProcess(teacher, 2, 'eval', device=self.T_device)
        # self.teacher = teacher
        # # self.teacher.to('cuda:1')
        # # self.teacher.eval()


        ##########################  mmseg

        self.S_device = 'cuda:0'
        S_config = 'configs/pspnet/pspnet_r18-d8_512x512_40k_cityscapes_1gpu.py'
        S_cfg = Config.fromfile(S_config)
        # print(S_cfg)
        # S_cfg.model.pretrained = args.student_pretrain_model_imgnet
        self.student = build_segmentor(S_cfg.model, train_cfg=S_cfg.train_cfg, test_cfg=S_cfg.test_cfg)
        # self.student = build_segmentor(S_cfg.model, train_cfg=None, test_cfg=None)

        # checkpoint = args.student_pretrain_model_imgnet
        # print(checkpoint)
        # checkpoint = load_checkpoint(self.student, checkpoint)

        # load_S_model(args, self.student, False)
        # self.student = self.DataParallelModelProcess(self.student, 2, 'train', device=self.S_device)
        self.student.train()
        self.student.to(self.S_device)

        # print(self.student)
        # for name, parameters in self.student.named_parameters():
        #     print(name, ':', parameters.size(), parameters.requires_grad)
        #
        # # print(self.student.parameters())
        #
        # # for parameters in self.student.parameters():
        # #     print(parameters)

        if self.args.pi or self.args.pa or self.args.ho:
            self.T_device = 'cuda:1'
            T_config = 'configs/pspnet/pspnet_r101-d8_512x512_80k_cityscapes_1gpu.py'
            T_cfg = Config.fromfile(T_config)
            # print(T_cfg)
            # self.teacher = build_segmentor(T_cfg.model, train_cfg=T_cfg.train_cfg, test_cfg=T_cfg.test_cfg)
            self.teacher = build_segmentor(T_cfg.model, train_cfg=None, test_cfg=None)
            checkpoint = 'work_dirs/models_zoo/pspnet_r101-d8_512x512_80k_cityscapes.2.2_iter_80000.pth'
            checkpoint = load_checkpoint(self.teacher, checkpoint)
            self.teacher = self.DataParallelModelProcess(self.teacher, 2, 'eval', device=self.T_device)

        ####################################################

        D_model = Discriminator(args.preprocess_GAN_mode, args.classes_num, args.batch_size, args.imsize_for_adv,
                                args.adv_conv_dim)
        load_D_model(args, D_model, False)
        print_model_parm_nums(D_model, 'D_model')
        # self.parallel_D = self.DataParallelModelProcess(D_model, 2, 'train', device)
        self.parallel_D = self.DataParallelModelProcess(D_model, 2, 'train', device='cuda:0')
        self.D_model = D_model

        self.G_solver = optim.SGD(
            [{'params': filter(lambda p: p.requires_grad, self.student.parameters()), 'initial_lr': args.lr_g}],
            lr=args.lr_g, momentum=args.momentum, weight_decay=args.weight_decay)
        # self.G_solver  = optim.SGD(self.student.parameters(),
        #                            lr=args.lr_g, momentum=args.momentum, weight_decay=args.weight_decay)
        self.D_solver = optim.SGD(
            [{'params': filter(lambda p: p.requires_grad, D_model.parameters()), 'initial_lr': args.lr_d}],
            lr=args.lr_d, momentum=args.momentum, weight_decay=args.weight_decay)

        self.best_mean_IU = args.best_mean_IU

        self.criterion = self.DataParallelCriterionProcess(CriterionDSN())  # CriterionCrossEntropy()
        self.criterion_ce = self.DataParallelCriterionProcess(CriterionCE())  # CriterionCrossEntropy()
        self.criterion_pixel_wise = self.DataParallelCriterionProcess(CriterionPixelWise())
        # self.criterion_pair_wise_for_interfeat = [self.DataParallelCriterionProcess(CriterionPairWiseforWholeFeatAfterPool(scale=args.pool_scale[ind], feat_ind=-(ind+1))) for ind in range(len(args.lambda_pa))]
        self.criterion_pair_wise_for_interfeat = self.DataParallelCriterionProcess(
            CriterionPairWiseforWholeFeatAfterPool(scale=args.pool_scale, feat_ind=-5))
        self.criterion_adv = self.DataParallelCriterionProcess(CriterionAdv(args.adv_loss_type))
        if args.adv_loss_type == 'wgan-gp':
            self.criterion_AdditionalGP = self.DataParallelCriterionProcess(
                CriterionAdditionalGP(self.parallel_D, args.lambda_gp))
        self.criterion_adv_for_G = self.DataParallelCriterionProcess(CriterionAdvForG(args.adv_loss_type))

        self.mc_G_loss = 0.0
        self.pi_G_loss = 0.0
        self.pa_G_loss = 0.0
        self.D_loss = 0.0

        self.criterion_AT = self.DataParallelCriterionProcess(AT(p=2))

        cudnn.benchmark = True
        if not os.path.exists(args.snapshot_dir):
            os.makedirs(args.snapshot_dir)

        print('init finish')
Beispiel #24
0
 def __init__(self, cfgfile):
     super().__init__()
     cfg = Config.fromfile(cfgfile)
     temp = build_segmentor(cfg.model)
     self.backbone = temp.backbone
     self.decode_head = temp.decode_head
Beispiel #25
0
def test_calculate_metric(iter_nums):
    if args.net == 'unet':
        # timm-efficientnet performs slightly worse.
        if not args.vis_mode:
            backbone_type = re.sub("^eff", "efficientnet", args.backbone_type)
            net = smp.Unet(backbone_type, classes=args.num_classes, encoder_weights='imagenet')
        else:
            net = VanillaUNet(n_channels=3, num_classes=args.num_classes)
    elif args.net == 'unet-scratch':
        # net = UNet(num_classes=args.num_classes)
        net = VanillaUNet(n_channels=3, num_classes=args.num_classes, 
                          use_polyformer=args.polyformer_mode,
                          num_modes=args.num_modes)
    elif args.net == 'nestedunet':
        net = NestedUNet(num_classes=args.num_classes)
    elif args.net == 'unet3plus':
        net = UNet_3Plus(num_classes=args.num_classes)
    elif args.net == 'pranet':
        net = PraNet(num_classes=args.num_classes - 1)
    elif args.net == 'attunet':
        net = AttU_Net(output_ch=args.num_classes)
    elif args.net == 'attr2unet':
        net = R2AttU_Net(output_ch=args.num_classes)
    elif args.net == 'dunet':
        net = DeformableUNet(n_channels=3, n_classes=args.num_classes)
    elif args.net == 'setr':
        # Install mmcv first: 
        # pip install mmcv-full==1.2.2 -f https://download.openmmlab.com/mmcv/dist/cu{Your CUDA Version}/torch{Your Pytorch Version}/index.html
        # E.g.: pip install mmcv-full==1.2.2 -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.7.1/index.html
        from mmcv.utils import Config
        sys.path.append("networks/setr")
        from mmseg.models import build_segmentor
        task2config = { 'refuge': 'SETR_PUP_288x288_10k_refuge_context_bs_4.py', 
                        'polyp':  'SETR_PUP_320x320_10k_polyp_context_bs_4.py' }
        setr_cfg = Config.fromfile("networks/setr/configs/SETR/{}".format(task2config[args.task_name]))
        net = build_segmentor(setr_cfg.model, train_cfg=setr_cfg.train_cfg, test_cfg=setr_cfg.test_cfg)        
        # By default, net() calls forward_train(), which receives extra parameters, and returns losses.
        # net.forward_dummy() receives/returns the traditional input/output.
        # Relevant file: mmseg/models/segmentors/encoder_decoder.py
        net.forward = net.forward_dummy
    elif args.net == 'transunet':
        transunet_config = TransUNet_CONFIGS[args.backbone_type]
        transunet_config.n_classes = args.num_classes
        if args.backbone_type.find('R50') != -1:
            # The "patch" in TransUNet means grid-like patches of the input image.
            # The "patch" in our code means the whole input image after cropping/resizing (part of the augmentation)
            transunet_config.patches.grid = (int(args.patch_size[0] / transunet_config.patches.size[0]), 
                                             int(args.patch_size[1] / transunet_config.patches.size[1]))
        net = TransUNet(transunet_config, img_size=args.patch_size, num_classes=args.num_classes)
        
    elif args.net.startswith('deeplab'):
        use_smp_deeplab = args.net.endswith('smp')
        if use_smp_deeplab:
            backbone_type = re.sub("^eff", "efficientnet", args.backbone_type)
            net = smp.DeepLabV3Plus(backbone_type, classes=args.num_classes, encoder_weights='imagenet')
        else:
            model_name = args.net + "_" + args.backbone_type
            model_map = {
                'deeplabv3_resnet50':       deeplab.deeplabv3_resnet50,
                'deeplabv3plus_resnet50':   deeplab.deeplabv3plus_resnet50,
                'deeplabv3_resnet101':      deeplab.deeplabv3_resnet101,
                'deeplabv3plus_resnet101':  deeplab.deeplabv3plus_resnet101,
                'deeplabv3_mobilenet':      deeplab.deeplabv3_mobilenet,
                'deeplabv3plus_mobilenet':  deeplab.deeplabv3plus_mobilenet
            }
            net = model_map[model_name](num_classes=args.num_classes, output_stride=8)
            
    elif args.net == 'nnunet':
        from nnunet.network_architecture.initialization import InitWeights_He
        from nnunet.network_architecture.generic_UNet import Generic_UNet
        net = Generic_UNet(
                            input_channels=3,
                            base_num_features=32,
                            num_classes=args.num_classes,
                            num_pool=7,
                            num_conv_per_stage=2,
                            feat_map_mul_on_downscale=2,
                            norm_op=nn.InstanceNorm2d,
                            norm_op_kwargs={'eps': 1e-05, 'affine': True},
                            dropout_op_kwargs={'p': 0, 'inplace': True},
                            nonlin_kwargs={'negative_slope': 0.01, 'inplace': True},
                            final_nonlin=(lambda x: x),
                            weightInitializer=InitWeights_He(1e-2),
                            pool_op_kernel_sizes=[[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]],
                            conv_kernel_sizes=([[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]]),
                            upscale_logits=False,
                            convolutional_pooling=True,
                            convolutional_upsampling=True,
                           )
        net.inference_apply_nonlin = (lambda x: F.softmax(x, 1))
                        
    elif args.net == 'segtran':
        get_default(args, 'num_modes',  default_settings, -1,   [args.net, 'num_modes', args.in_fpn_layers])
        set_segtran2d_config(args)
        print(args)
        net = Segtran2d(config)
    else:
        breakpoint()

    net.cuda()
    net.eval()

    if args.robust_ref_cp_path:
        refnet = copy.deepcopy(net)
        print("Reference network created")
        load_model(refnet, args, args.robust_ref_cp_path)
    else:
        refnet = None
        
    # Currently colormap is used only for OCT task.
    colormap = get_seg_colormap(args.num_classes, return_torch=True).cuda()

    # prepred: pre-prediction. postpred: post-prediction.
    task2mask_prepred   = { 'refuge': refuge_map_mask,      'polyp': polyp_map_mask,
                            'oct': partial(index_to_onehot, num_classes=args.num_classes) }
    task2mask_postpred  = { 'refuge': refuge_inv_map_mask,  'polyp': polyp_inv_map_mask,
                            'oct': partial(onehot_inv_map, colormap=colormap) }
    mask_prepred_mapping_func   =   task2mask_prepred[args.task_name]
    mask_postpred_mapping_funcs = [ task2mask_postpred[args.task_name] ]
    if args.do_remove_frag:
        remove_frag = lambda segmap: remove_fragmentary_segs(segmap, 255)
        mask_postpred_mapping_funcs.append(remove_frag)

    if not args.checkpoint_dir:
        if args.vis_mode is not None:
            visualize_model(net, args.vis_mode, args.vis_layers, args.patch_size, db_test)
            return

        if args.eval_robustness:
            eval_robustness(args, net, refnet, testloader, mask_prepred_mapping_func)
            return

    allcls_avg_metric = None
    all_results = np.zeros((args.num_classes, len(iter_nums)))
    
    for iter_idx, iter_num in enumerate(iter_nums):
        if args.checkpoint_dir:
            checkpoint_path = os.path.join(args.checkpoint_dir, 'iter_' + str(iter_num) + '.pth')
            load_model(net, args, checkpoint_path)

            if args.vis_mode is not None:
                visualize_model(net, args.vis_mode, args.vis_layers, args.patch_size, db_test)
                continue

            if args.eval_robustness:
                eval_robustness(args, net, refnet, testloader, mask_prepred_mapping_func)
                continue

        save_results = args.save_results and (not args.test_interp)
        if save_results:
            test_save_paths = []
            test_save_dirs  = []
            test_save_dir_tmpl  = "%s-%s-%s-%d" %(args.net, args.job_name, timestamp, iter_num)
            for suffix in ("-soft", "-%.1f" %args.mask_thres):
                test_save_dir = test_save_dir_tmpl + suffix
                test_save_path = "../prediction/%s" %(test_save_dir)
                if not os.path.exists(test_save_path):
                    os.makedirs(test_save_path)
                test_save_dirs.append(test_save_dir)
                test_save_paths.append(test_save_path)
        else:
            test_save_paths = None
            test_save_dirs  = None

        if args.save_features_img_count > 0:
            args.save_features_file_path = "%s-%s-feat-%s.pth" %(args.net, args.job_name, timestamp)
        else:
            args.save_features_file_path = None
            
        allcls_avg_metric, allcls_metric_count = \
                test_all_cases(net, testloader, task_name=args.task_name,
                               num_classes=args.num_classes,
                               mask_thres=args.mask_thres,
                               model_type=args.net,
                               orig_input_size=args.orig_input_size,
                               patch_size=args.patch_size,
                               stride=(args.orig_input_size[0] // 2, args.orig_input_size[1] // 2),
                               test_save_paths=test_save_paths,
                               out_origsize=args.out_origsize,
                               mask_prepred_mapping_func=mask_prepred_mapping_func,
                               mask_postpred_mapping_funcs=mask_postpred_mapping_funcs,
                               reload_mask=args.reload_mask,
                               test_interp=args.test_interp,
                               save_features_img_count=args.save_features_img_count,
                               save_features_file_path=args.save_features_file_path,
                               verbose=args.verbose_output)

        print("Iter-%d scores on %d images:" %(iter_num, allcls_metric_count[0]))
        dice_sum = 0
        for cls in range(1, args.num_classes):
            dice = allcls_avg_metric[cls-1]
            print('class %d: dice = %.3f' %(cls, dice))
            dice_sum += dice
            all_results[cls, iter_idx] = dice
        avg_dice = dice_sum / (args.num_classes - 1)
        print("Average dice: %.3f" %avg_dice)

        if args.net == 'segtran':
            max_attn, avg_attn, clamp_count, call_count = \
                [ segtran_shared.__dict__[v] for v in ('max_attn', 'avg_attn', 'clamp_count', 'call_count') ]
            print("max_attn={:.2f}, avg_attn={:.2f}, clamp_count={}, call_count={}".format(
                  max_attn, avg_attn, clamp_count, call_count))

        if save_results:
            FNULL = open(os.devnull, 'w')
            for pred_type, test_save_dir, test_save_path in zip(('soft', 'hard'), test_save_dirs, test_save_paths):
                do_tar = subprocess.run(["tar", "cvf", "%s.tar" %test_save_dir, test_save_dir], cwd="../prediction",
                                        stdout=FNULL, stderr=subprocess.STDOUT)
                # print(do_tar)
                print("{} tarball:\n{}.tar".format(pred_type, os.path.abspath(test_save_path)))

    np.set_printoptions(precision=3, suppress=True)
    print(all_results[1:])
    return allcls_avg_metric
Beispiel #26
0
cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)

# Let's have a look at the final config used for training
print(f'Config:\n{cfg.pretty_text}')

# Build the dataset
datasets = [build_dataset(cfg.data.train)]

# checkpoint_file = osp.join(data_root, 'model', 'iter_1600.pth')
# model = init_segmentor(cfg, checkpoint_file, device='cuda:0')
# Build the detector

model = build_segmentor(cfg.model,
                        train_cfg=cfg.get('train_cfg'),
                        test_cfg=cfg.get('test_cfg'))

# print('datasets[0]: ', datasets[0].PALETTE)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
model.PALETTE = datasets[0].PALETTE

# Create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
train_segmentor(model,
                datasets,
                cfg,
                distributed=False,
                validate=True,
                meta=dict())
Beispiel #27
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    if args.work_dir is not None:
        mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
        json_file = osp.join(args.work_dir, f'fps_{timestamp}.json')
    else:
        # use config filename as default work_dir if cfg.work_dir is None
        work_dir = osp.join('./work_dirs',
                            osp.splitext(osp.basename(args.config))[0])
        mmcv.mkdir_or_exist(osp.abspath(work_dir))
        json_file = osp.join(work_dir, f'fps_{timestamp}.json')

    repeat_times = args.repeat_times
    # set cudnn_benchmark
    torch.backends.cudnn.benchmark = False
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    benchmark_dict = dict(config=args.config, unit='img / s')
    overall_fps_list = []
    for time_index in range(repeat_times):
        print(f'Run {time_index + 1}:')
        # build the dataloader
        # TODO: support multiple images per gpu (only minor changes are needed)
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            samples_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)

        # build the model and load checkpoint
        cfg.model.train_cfg = None
        model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)
        if 'checkpoint' in args and osp.exists(args.checkpoint):
            load_checkpoint(model, args.checkpoint, map_location='cpu')

        model = MMDataParallel(model, device_ids=[0])

        model.eval()

        # the first several iterations may be very slow so skip them
        num_warmup = 5
        pure_inf_time = 0
        total_iters = 200

        # benchmark with 200 image and take the average
        for i, data in enumerate(data_loader):

            torch.cuda.synchronize()
            start_time = time.perf_counter()

            with torch.no_grad():
                model(return_loss=False, rescale=True, **data)

            torch.cuda.synchronize()
            elapsed = time.perf_counter() - start_time

            if i >= num_warmup:
                pure_inf_time += elapsed
                if (i + 1) % args.log_interval == 0:
                    fps = (i + 1 - num_warmup) / pure_inf_time
                    print(f'Done image [{i + 1:<3}/ {total_iters}], '
                          f'fps: {fps:.2f} img / s')

            if (i + 1) == total_iters:
                fps = (i + 1 - num_warmup) / pure_inf_time
                print(f'Overall fps: {fps:.2f} img / s\n')
                benchmark_dict[f'overall_fps_{time_index + 1}'] = round(fps, 2)
                overall_fps_list.append(fps)
                break
    benchmark_dict['average_fps'] = round(np.mean(overall_fps_list), 2)
    benchmark_dict['fps_variance'] = round(np.var(overall_fps_list), 4)
    print(f'Average fps of {repeat_times} evaluations: '
          f'{benchmark_dict["average_fps"]}')
    print(f'The variance of {repeat_times} evaluations: '
          f'{benchmark_dict["fps_variance"]}')
    mmcv.dump(benchmark_dict, json_file, indent=4)
Beispiel #28
0
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
meta['env_info'] = env_info

# log some basic info
distributed = False
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')

model = build_segmentor(cfg.model,
                        train_cfg=cfg.train_cfg,
                        test_cfg=cfg.test_cfg)
# checkpoint = '/home/zhouzhigong/Documents/mmsegmentation/work_dirs/pspnet_r18-d8_512x512_40k_cityscapes_skd/iter_40000.pth'
# checkpoint = load_checkpoint(model, checkpoint)

model.cuda(0)
model_paral = MMDataParallel(model, device_ids=[0])

train_dataset = build_dataset(cfg.data.train)
train_data_loader = build_dataloader(
    train_dataset,
    cfg.data.samples_per_gpu,  # samples_per_gpu
    cfg.data.workers_per_gpu,  # workers_per_gpu
    len(cfg.gpu_ids),
    dist=False,
    seed=cfg.seed,
Beispiel #29
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.load_from is not None:
        cfg.load_from = args.load_from
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpus is not None:
        cfg.gpu_ids = range(1)
        warnings.warn('`--gpus` is deprecated because we only support '
                      'single GPU mode in non-distributed training. '
                      'Use `gpus=1` now.')
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids[0:1]
        warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
                      'Because we only support single GPU mode in '
                      'non-distributed training. Use the first GPU '
                      'in `gpu_ids` now.')
    if args.gpus is None and args.gpu_ids is None:
        cfg.gpu_ids = [args.gpu_id]

    cfg.auto_resume = args.auto_resume

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
        # gpu_ids is used to calculate iter when resuming checkpoint
        _, world_size = get_dist_info()
        cfg.gpu_ids = range(world_size)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # set multi-process settings
    setup_multi_processes(cfg)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    seed = init_random_seed(args.seed)
    seed = seed + dist.get_rank() if args.diff_seed else seed
    logger.info(f'Set random seed to {seed}, '
                f'deterministic: {args.deterministic}')
    set_random_seed(seed, deterministic=args.deterministic)
    cfg.seed = seed
    meta['seed'] = seed
    meta['exp_name'] = osp.basename(args.config)

    model = build_segmentor(
        cfg.model,
        train_cfg=cfg.get('train_cfg'),
        test_cfg=cfg.get('test_cfg'))
    model.init_weights()

    # SyncBN is not support for DP
    if not distributed:
        warnings.warn(
            'SyncBN is only supported with DDP. To be compatible with DP, '
            'we convert SyncBN to BN. Please use dist_train.sh which can '
            'avoid this error.')
        model = revert_sync_batchnorm(model)

    logger.info(model)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmseg version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
            config=cfg.pretty_text,
            CLASSES=datasets[0].CLASSES,
            PALETTE=datasets[0].PALETTE)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    # passing checkpoint meta for saving best checkpoint
    meta.update(cfg.checkpoint_config.meta)
    train_segmentor(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=(not args.no_validate),
        timestamp=timestamp,
        meta=meta)
Beispiel #30
0
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
meta['env_info'] = env_info

# log some basic info
distributed = False
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')

model = build_segmentor(cfg.model,
                        train_cfg=cfg.train_cfg,
                        test_cfg=cfg.test_cfg)
# checkpoint = '/home/zhouzhigong/Documents/mmsegmentation/work_dirs/pspnet_r18-d8_512x512_40k_cityscapes_skd/iter_40000.pth'
# checkpoint = load_checkpoint(model, checkpoint)

model.cuda(0)
model_paral = MMDataParallel(model, device_ids=[0])

train_dataset = build_dataset(cfg.data.train)
train_data_loader = build_dataloader(
    train_dataset,
    cfg.data.samples_per_gpu,  # samples_per_gpu
    cfg.data.workers_per_gpu,  # workers_per_gpu
    len(cfg.gpu_ids),
    dist=False,
    seed=cfg.seed,