Esempio n. 1
0
    def __init__(self, config):
        super(CVModule, self).__init__()
        self._cfg = AttrDict(

        )
        self.set_defalut_config()
        self._update_config(config)
Esempio n. 2
0
class ConfigurableMixin(object):
    """
    Usage 1: for torch.nn.Module
    >>> import torch.nn as nn
    >>> class Custom(nn.Module, ConfigurableMixin):
    >>>     def __init__(self, config:AttrDict):
    >>>         super(Custom,self).__init__()
    >>>         ConfigurableMixin.__init__(self, config)
    >>>     def forward(self, *input):
    >>>         pass
    >>>     def set_defalut_config(self):
    >>>         self.config.update(dict())
    """

    def __init__(self, config):
        self._cfg = AttrDict(

        )
        self.set_defalut_config()
        self._cfg.update(config)

    def set_defalut_config(self):
        raise NotImplementedError

    @property
    def config(self):
        return self._cfg
Esempio n. 3
0
    def __init__(self, config):
        self.config = AttrDict()
        self.set_defalut()
        self.config.update(config)

        dataset = ISAIDSegmmDataset(self.config.image_dir,
                                    self.config.mask_dir,
                                    self.config.patch_config,
                                    self.config.transforms)

        sampler = distributed.StepDistributedSampler(
            dataset) if self.config.training else SequentialSampler(dataset)

        super(ISAIDSegmmDataLoader,
              self).__init__(dataset,
                             self.config.batch_size,
                             sampler=sampler,
                             num_workers=self.config.num_workers,
                             pin_memory=True)
Esempio n. 4
0
    def __init__(self, config):
        self.config = AttrDict()
        self.set_defalut()
        self.config.update(config)
        if any([
                isinstance(self.config.image_dir, tuple),
                isinstance(self.config.image_dir, list)
        ]):
            dataset_list = []
            for im_dir, label_dir in zip(self.config.image_dir,
                                         self.config.label_dir):
                dataset_list.append(
                    Xview2(im_dir, label_dir, self.config.mode,
                           self.config.include,
                           self.config.transforms).pairwise_mode())

            dataset = ConcatDataset(dataset_list)

        else:
            dataset = Xview2(self.config.image_dir, self.config.label_dir,
                             self.config.mode, self.config.include,
                             self.config.transforms).pairwise_mode()

        if self.config.CV.on and self.config.CV.cur_k != -1:
            CV = CrossValSamplerGenerator(dataset, distributed=True, seed=2333)
            sampler_pairs = CV.k_fold(self.config.CV.k_fold)
            train_sampler, val_sampler = sampler_pairs[self.config.CV.cur_k]
            if self.config.training:
                sampler = train_sampler
            else:
                sampler = val_sampler
        else:
            sampler = distributed.StepDistributedSampler(
                dataset) if self.config.training else SequentialSampler(
                    dataset)

        super(Xview2PairwiseDataLoader,
              self).__init__(dataset,
                             self.config.batch_size,
                             sampler=sampler,
                             num_workers=self.config.num_workers,
                             pin_memory=True)
Esempio n. 5
0
class ISAIDSegmmDataLoader(DataLoader):
    def __init__(self, config):
        self.config = AttrDict()
        self.set_defalut()
        self.config.update(config)

        dataset = ISAIDSegmmDataset(self.config.image_dir,
                                    self.config.mask_dir,
                                    self.config.patch_config,
                                    self.config.transforms)

        sampler = distributed.StepDistributedSampler(
            dataset) if self.config.training else SequentialSampler(dataset)

        super(ISAIDSegmmDataLoader,
              self).__init__(dataset,
                             self.config.batch_size,
                             sampler=sampler,
                             num_workers=self.config.num_workers,
                             pin_memory=True)

    def set_defalut(self):
        self.config.update(
            dict(image_dir='',
                 mask_dir='',
                 patch_config=dict(
                     patch_size=896,
                     stride=512,
                 ),
                 transforms=[
                     RemoveColorMap(),
                     segm.RandomHorizontalFlip(0.5),
                     segm.RandomVerticalFlip(0.5),
                     segm.RandomRotate90K((0, 1, 2, 3)),
                     segm.FixedPad((896, 896), 255),
                     segm.ToTensor(True),
                     comm.THMeanStdNormalize((123.675, 116.28, 103.53),
                                             (58.395, 57.12, 57.375))
                 ],
                 batch_size=1,
                 num_workers=0,
                 training=True))
Esempio n. 6
0
def run(local_rank,
        config_path,
        model_dir,
        cpu_mode=False,
        after_construct_launcher_callbacks=None,
        opts=None):
    # 0. config
    cfg = config.import_config(config_path)
    cfg = AttrDict.from_dict(cfg)
    if opts is not None:
        cfg.update_from_list(opts)
    # 1. model
    model = make_model(cfg['model'])
    if cfg['train'].get('sync_bn', False):
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)

    if not cpu_mode:
        if torch.cuda.is_available():
            torch.cuda.set_device(local_rank)
            dist.init_process_group(backend="nccl", init_method="env://")
        model.to(torch.device('cuda'))
        if dist.is_available():
            model = nn.parallel.DistributedDataParallel(
                model,
                device_ids=[local_rank],
                output_device=local_rank,
            )

    # 2. data
    traindata_loader = make_dataloader(cfg['data']['train'])
    testdata_loader = make_dataloader(
        cfg['data']['test']) if 'test' in cfg['data'] else None

    # 3. optimizer
    lr_schedule = make_learningrate(cfg['learning_rate'])
    cfg['optimizer']['params']['lr'] = lr_schedule.base_lr
    optimizer = make_optimizer(cfg['optimizer'], params=model.parameters())
    tl = trainer.Launcher(model_dir=model_dir,
                          model=model,
                          optimizer=optimizer,
                          lr_schedule=lr_schedule)

    if after_construct_launcher_callbacks is not None:
        for f in after_construct_launcher_callbacks:
            f(tl)

    tl.logger.info('sync bn: {}'.format(
        'True' if cfg['train'].get('sync_bn', False) else 'False'))
    tl.logger.info('external parameter: {}'.format(opts))
    tl.train_by_config(traindata_loader,
                       config=merge_dict(cfg['train'], cfg['test']),
                       test_data_loader=testdata_loader)
    return dict(config=cfg, launcher=tl)
Esempio n. 7
0
class CVModule(nn.Module):
    def __init__(self, config):
        super(CVModule, self).__init__()
        self._cfg = AttrDict(

        )
        self.set_defalut_config()
        self._update_config(config)

    def forward(self, *input):
        raise NotImplementedError

    def set_defalut_config(self):
        raise NotImplementedError

    def _update_config(self, new_config):
        self._cfg.update(new_config)

    @property
    def config(self):
        return self._cfg
Esempio n. 8
0
def run(config_path,
        model_dir,
        cpu_mode=False,
        after_construct_launcher_callbacks=None,
        opts=None):
    # 0. config
    cfg = config.import_config(config_path)
    cfg = AttrDict.from_dict(cfg)
    if opts is not None:
        cfg.update_from_list(opts)
    # 1. model
    model = make_model(cfg['model'])

    if not cpu_mode:
        if torch.cuda.is_available():
            model.to(torch.device('cuda'))
            model = nn.DataParallel(model,
                                    device_ids=list(
                                        range(torch.cuda.device_count())))

    # 2. data
    traindata_loader = make_dataloader(cfg['data']['train'])
    testdata_loader = make_dataloader(
        cfg['data']['test']) if 'test' in cfg['data'] else None

    # 3. optimizer
    lr_schedule = make_learningrate(cfg['learning_rate'])
    cfg['optimizer']['params']['lr'] = lr_schedule.base_lr
    optimizer = make_optimizer(cfg['optimizer'], params=model.parameters())

    tl = trainer.Launcher(model_dir=model_dir,
                          model=model,
                          optimizer=optimizer,
                          lr_schedule=lr_schedule)

    if after_construct_launcher_callbacks is not None:
        for f in after_construct_launcher_callbacks:
            f(tl)

    tl.logger.info('external parameter: {}'.format(opts))
    tl.train_by_config(traindata_loader,
                       config=merge_dict(cfg['train'], cfg['test']),
                       test_data_loader=testdata_loader)
    return dict(config=cfg, launcher=tl)
Esempio n. 9
0
class Xview2DataLoader(DataLoader):
    def __init__(self, config):
        self.config = AttrDict()
        self.set_defalut()
        self.config.update(config)
        if any([
                isinstance(self.config.image_dir, tuple),
                isinstance(self.config.image_dir, list)
        ]):
            dataset_list = []
            for im_dir, label_dir in zip(self.config.image_dir,
                                         self.config.label_dir):
                dataset_list.append(
                    Xview2(im_dir, label_dir, self.config.mode,
                           self.config.include, self.config.transforms))

            dataset = ConcatDataset(dataset_list)

        else:
            dataset = Xview2(self.config.image_dir, self.config.label_dir,
                             self.config.mode, self.config.include,
                             self.config.transforms)

        if self.config.CV.on and self.config.CV.cur_k != -1:
            CV = CrossValSamplerGenerator(dataset, distributed=True, seed=2333)
            sampler_pairs = CV.k_fold(self.config.CV.k_fold)
            train_sampler, val_sampler = sampler_pairs[self.config.CV.cur_k]
            if self.config.training:
                sampler = train_sampler
            else:
                sampler = val_sampler
        else:
            sampler = distributed.StepDistributedSampler(
                dataset) if self.config.training else SequentialSampler(
                    dataset)

        super(Xview2DataLoader,
              self).__init__(dataset,
                             self.config.batch_size,
                             sampler=sampler,
                             num_workers=self.config.num_workers,
                             pin_memory=True)

    def set_defalut(self):
        self.config.update(
            dict(image_dir='',
                 label_dir='',
                 mode='segm',
                 include=('pre', 'post'),
                 CV=dict(
                     on=True,
                     cur_k=0,
                     k_fold=5,
                 ),
                 transforms=[
                     segm.RandomHorizontalFlip(0.5),
                     segm.RandomVerticalFlip(0.5),
                     segm.RandomRotate90K((0, 1, 2, 3)),
                     segm.FixedPad((1024, 1024), 255),
                     segm.ToTensor(True),
                     comm.THMeanStdNormalize((123.675, 116.28, 103.53),
                                             (58.395, 57.12, 57.375))
                 ],
                 batch_size=1,
                 num_workers=0,
                 training=True))
Esempio n. 10
0
class Xview2PairwiseDataLoader(DataLoader):
    def __init__(self, config):
        self.config = AttrDict()
        self.set_defalut()
        self.config.update(config)
        if any([
                isinstance(self.config.image_dir, tuple),
                isinstance(self.config.image_dir, list)
        ]):
            dataset_list = []
            for im_dir, label_dir in zip(self.config.image_dir,
                                         self.config.label_dir):
                dataset_list.append(
                    Xview2(im_dir, label_dir, self.config.mode,
                           self.config.include,
                           self.config.transforms).pairwise_mode())

            dataset = ConcatDataset(dataset_list)

        else:
            dataset = Xview2(self.config.image_dir, self.config.label_dir,
                             self.config.mode, self.config.include,
                             self.config.transforms).pairwise_mode()

        if self.config.CV.on and self.config.CV.cur_k != -1:
            CV = CrossValSamplerGenerator(dataset, distributed=True, seed=2333)
            sampler_pairs = CV.k_fold(self.config.CV.k_fold)
            train_sampler, val_sampler = sampler_pairs[self.config.CV.cur_k]
            if self.config.training:
                sampler = train_sampler
            else:
                sampler = val_sampler
        else:
            sampler = distributed.StepDistributedSampler(
                dataset) if self.config.training else SequentialSampler(
                    dataset)

        super(Xview2PairwiseDataLoader,
              self).__init__(dataset,
                             self.config.batch_size,
                             sampler=sampler,
                             num_workers=self.config.num_workers,
                             pin_memory=True)

    def set_defalut(self):
        self.config.update(
            dict(image_dir='',
                 label_dir='',
                 mode='segm',
                 include=('pre', 'post'),
                 CV=dict(
                     on=True,
                     cur_k=0,
                     k_fold=5,
                 ),
                 transforms=Compose([
                     OneOf([
                         HorizontalFlip(True),
                         VerticalFlip(True),
                         RandomRotate90(True)
                     ],
                           p=0.5),
                     RandomDiscreteScale([0.75, 1.25, 1.5], p=0.5),
                     RandomCrop(640, 640, True),
                     Normalize(mean=(0.485, 0.456, 0.406, 0.485, 0.456, 0.406),
                               std=(0.229, 0.224, 0.225, 0.229, 0.224, 0.225),
                               max_pixel_value=255),
                     ToTensorV2(True),
                 ]),
                 batch_size=1,
                 num_workers=0,
                 training=True))
Esempio n. 11
0
def run(local_rank,
        config_path,
        model_dir,
        opt_level='O0',
        cpu_mode=False,
        after_construct_launcher_callbacks=None,
        opts=None):
    # 0. config
    cfg = config.import_config(config_path)
    cfg = AttrDict.from_dict(cfg)
    if opts is not None:
        cfg.update_from_list(opts)
    # 1. model
    model = make_model(cfg['model'])
    if cfg['train'].get('apex_sync_bn', False):
        model = apex.parallel.convert_syncbn_model(model)
    # 2. optimizer
    lr_schedule = make_learningrate(cfg['learning_rate'])
    cfg['optimizer']['params']['lr'] = lr_schedule.base_lr
    optimizer = make_optimizer(cfg['optimizer'], params=model.parameters())

    if not cpu_mode:
        if torch.cuda.is_available():
            torch.cuda.set_device(local_rank)
            dist.init_process_group(backend="nccl", init_method="env://")
        model.to(torch.device('cuda'))
        if dist.is_available():
            # if OPT_LEVELS.index(opt_level) < 2:
            #     keep_batchnorm_fp32 = None
            model, optimizer = amp.initialize(
                model,
                optimizer,
                opt_level=opt_level,
            )
            model = DDP(
                model,
                delay_allreduce=True,
            )
    # 3. data
    traindata_loader = make_dataloader(cfg['data']['train'])
    testdata_loader = make_dataloader(
        cfg['data']['test']) if 'test' in cfg['data'] else None
    tl = trainer.Launcher(model_dir=model_dir,
                          model=model,
                          optimizer=optimizer,
                          lr_schedule=lr_schedule)
    # log dist train info
    tl.logger.info(
        '[NVIDIA/apex] amp optimizer. opt_level = {}'.format(opt_level))
    tl.logger.info('apex sync bn: {}'.format(
        'on' if cfg['train'].get('apex_sync_bn', False) else 'off'))
    tl.logger.info('external parameter: {}'.format(opts))
    tl.override_backward(default_backward.amp_backward)

    if after_construct_launcher_callbacks is not None:
        for f in after_construct_launcher_callbacks:
            f(tl)

    tl.train_by_config(traindata_loader,
                       config=merge_dict(cfg['train'], cfg['test']),
                       test_data_loader=testdata_loader)
Esempio n. 12
0
    def __init__(self, config):
        self._cfg = AttrDict(

        )
        self.set_defalut_config()
        self._cfg.update(config)