Example #1
0
    def init_weights(self):
        """Initialize the weights in backbone."""
        logger = get_root_logger()
        if self.init_cfg is None:
            logger.warning(f'No pre-trained weights for '
                           f'{self.__class__.__name__}, '
                           f'training start from scratch')
            self.apply(self._init_weights)
        else:
            assert 'checkpoint' in self.init_cfg, f'Only support ' \
                                                  f'specify `Pretrained` in ' \
                                                  f'`init_cfg` in ' \
                                                  f'{self.__class__.__name__} '
            ckpt = _load_checkpoint(self.init_cfg.checkpoint,
                                    logger=logger,
                                    map_location='cpu')
            if 'state_dict' in ckpt:
                state_dict = ckpt['state_dict']
            elif 'model' in ckpt:
                state_dict = ckpt['model']
            else:
                state_dict = ckpt

            missing_keys, unexpected_keys = \
                self.load_state_dict(state_dict, False)
            logger.warning(f'missing_keys: {missing_keys}')
            logger.warning(f'unexpected_keys: {unexpected_keys}')
Example #2
0
def _non_dist_train(model, dataset, cfg, validate=False, **kwargs):
    logger = get_root_logger(cfg.log_level)
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False,
            repeat_samples=cfg.train_cfg.repeat_samples,
            **kwargs)
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = EpochBasedRunner(
        model,
        #batch_processor=batch_processor,
        optimizer=optimizer,
        work_dir=cfg.work_dir,
        logger=logger)
    #runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
    #                logger)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
Example #3
0
    def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """

        def _init_weights(m):
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=.02)
                if isinstance(m, nn.Linear) and m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)

        if isinstance(pretrained, str):
            self.apply(_init_weights)
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            self.apply(_init_weights)
        else:
            raise TypeError('pretrained must be a str or None')
Example #4
0
    def init_weights(self, pretrained=None):
        """Initialize the weights in backbone

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None')
def train_detector(model,
                   dataset,
                   cfg,
                   distributed=False,
                   validate=False,
                   timestamp=None,
                   save_random_weights=False):

    logger = get_root_logger(cfg.log_level)

    # start training
    if distributed:
        _dist_train(
            model,
            dataset,
            cfg,
            validate=validate,
            logger=logger,
            timestamp=timestamp)
    else:
        _non_dist_train(
            model,
            dataset,
            cfg,
            validate=validate,
            logger=logger,
            timestamp=timestamp,
            save_random_weights=save_random_weights)
Example #6
0
    def __init__(self, dataset, oversample_thr, repeat_mode='ceil'):
        self.dataset = dataset
        self.oversample_thr = oversample_thr
        self.CLASSES = dataset.CLASSES

        repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
        repeat_indices = []

        def _get_ri(rf):
            if repeat_mode == 'ceil':
                return math.ceil(rf)
            elif repeat_mode == 'round':
                return round(rf)
            elif repeat_mode == 'floor':
                return int(rf)
            else:
                raise NotImplementedError

        for dataset_index, repeat_factor in enumerate(repeat_factors):
            repeat_indices.extend([dataset_index] * _get_ri(repeat_factor))
        self.repeat_indices = repeat_indices

        flags = []
        if hasattr(self.dataset, 'flag'):
            for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
                flags.extend([flag] * int(_get_ri(repeat_factor)))
            assert len(flags) == len(repeat_indices)
        self.flag = np.asarray(flags, dtype=np.uint8)

        logger = get_root_logger()
        logger.info(
            f"set up RFS dataset, t={oversample_thr}, repeat_mode={repeat_mode}"
        )
Example #7
0
    def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.features.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
                elif isinstance(m, nn.Linear):
                    normal_init(m, std=0.01)
        else:
            raise TypeError('pretrained must be a str or None')

        for m in self.extra.modules():
            if isinstance(m, nn.Conv2d):
                xavier_init(m, distribution='uniform')

        constant_init(self.l2_norm, self.l2_norm.scale)
    def __init__(self, dataset, max_iter):
        self.dataset = dataset
        self.max_iter = max_iter
        self.num_classes = len(dataset.cat_ids)
        self.CLASSES = dataset.CLASSES

        logger = get_root_logger()
        logger.info(f'init CAS dataset, num_classes {self.num_classes}')

        indices = []
        flag = []

        cls_data_inds = [[] for _ in range(self.num_classes)]
        for idx in range(len(dataset)):
            cat_ids = set(self.dataset.get_cat_ids(idx))
            for cat_id in cat_ids:
                label = self.dataset.cat2label[cat_id]
                cls_data_inds[label].append(idx)

        g = torch.Generator()
        g.manual_seed(0)
        cls_ind_stream = iter(
            RandomDataStream(list(range(self.num_classes)), g))
        cls_data_streams = [None] * self.num_classes
        for i, data_inds in enumerate(cls_data_inds):
            cls_data_streams[i] = iter(RandomDataStream(data_inds, g))

        for _ in range(max_iter):
            cls_idx = next(cls_ind_stream)
            img_idx = next(cls_data_streams[cls_idx])
            indices.append(int(img_idx))
            flag.append(self.dataset.flag[img_idx])

        self.indices = indices
        self.flag = np.asarray(flag, dtype=np.uint8)
Example #9
0
    def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
            # todo: init weight for gates
            for m in self.modules():
                if isinstance(m, DynamicHRModule):
                    if 'gate_layers' in m.__dict__['_modules'].keys():
                        # pdb.set_trace()
                        m_gate = m.__dict__['_modules']['gate_layers']
                        for m_g in m_gate.modules():
                            # pdb.set_trace()
                            if isinstance(m_g, nn.Conv2d):
                                kaiming_init(m_g, mode='fan_in',
                                             bias=2.2)  # 1.5
                            elif isinstance(m_g, (_BatchNorm, nn.GroupNorm)):
                                constant_init(m_g, 1)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None')
Example #10
0
def train_detector(model,
                   dataset,
                   cfg,
                   distributed=False,
                   validate=False,
                   timestamp=None,
                   meta=None):
    logger = get_root_logger(cfg.log_level)

    # start training
    if distributed:
        _dist_train(model,
                    dataset,
                    cfg,
                    validate=validate,
                    logger=logger,
                    timestamp=timestamp,
                    meta=meta)
    else:
        _non_dist_train(model,
                        dataset,
                        cfg,
                        validate=validate,
                        logger=logger,
                        timestamp=timestamp,
                        meta=meta)
Example #11
0
    def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)

        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    if (m.in_channels != m.out_channels
                            or m.out_channels != m.groups
                            or m.bias is not None):
                        # don't want to reinitialize downsample layers, code assuming normal conv layers will not have these characteristics
                        nn.init.kaiming_normal_(m.weight,
                                                mode='fan_out',
                                                nonlinearity='relu')
                    else:
                        print('Not initializing')
                elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if self.zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
Example #12
0
    def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottle2neck):
                        # dcn in Res2Net bottle2neck is in ModuleList
                        for n in m.convs:
                            if hasattr(n, 'conv_offset'):
                                constant_init(n.conv_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottle2neck):
                        constant_init(m.norm3, 0)
        else:
            raise TypeError('pretrained must be a str or None')
Example #13
0
    def __init__(self,
                 dataloader,
                 start=None,
                 interval=1,
                 save_best=None,
                 rule=None,
                 **eval_kwargs):
        if not isinstance(dataloader, DataLoader):
            raise TypeError('dataloader must be a pytorch DataLoader, but got'
                            f' {type(dataloader)}')
        if not interval > 0:
            raise ValueError(f'interval must be positive, but got {interval}')
        if start is not None and start < 0:
            warnings.warn(
                f'The evaluation start epoch {start} is smaller than 0, '
                f'use 0 instead', UserWarning)
            start = 0
        self.dataloader = dataloader
        self.interval = interval
        self.start = start
        assert isinstance(save_best, str) or save_best is None
        self.save_best = save_best
        self.eval_kwargs = eval_kwargs
        self.initial_epoch_flag = True

        self.logger = get_root_logger()

        if self.save_best is not None:
            self._init_rule(rule, self.save_best)
Example #14
0
    def init_weights(self):
        # Calling this method will cause parameter initialization exception
        # super(DetectoRS_ResNet, self).init_weights()

        if isinstance(self.pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, self.pretrained, strict=False, logger=logger)
        elif self.pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m.conv2, 'conv_offset'):
                        constant_init(m.conv2.conv_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None')
Example #15
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         #load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
         revise_keys = [(r'^module\.', '')]
         checkpoint = _load_checkpoint(pretrained,
                                       map_location='cpu',
                                       logger=logger)
         # OrderedDict is a subclass of dict
         if not isinstance(checkpoint, dict):
             raise RuntimeError(
                 f'No state_dict found in checkpoint file {pretrained}')
         # get state_dict from checkpoint
         if 'state_dict' in checkpoint:
             state_dict = checkpoint['state_dict']
         elif 'model' in checkpoint:  # for our model
             state_dict = checkpoint['model']
         else:
             state_dict = checkpoint
         # strip prefix of state_dict
         for p, r in revise_keys:
             state_dict = {
                 re.sub(p, r, k): v
                 for k, v in state_dict.items()
             }
         # load state_dict
         load_state_dict(self, state_dict, strict=False, logger=logger)
    def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m.conv2, 'conv_offset'):
                        constant_init(m.conv2.conv_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None')
Example #17
0
File: pvt.py Project: whai362/PVT
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self,
                         pretrained,
                         map_location='cpu',
                         strict=False,
                         logger=logger)
Example #18
0
def build_detector(cfg, train_cfg=None, test_cfg=None):
    """Build detector."""
    model = build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))

    logger = get_root_logger()
    logger.info(str(model))

    return model
Example #19
0
def rep_train_detector(model,
                       dataset,
                       cfg,
                       distributed=False,
                       timestamp=None,
                       meta=None):
    logger = get_root_logger(cfg.log_level)

    # prepare data loaders
    data_loaders = [
        build_dataloader(
            ds,
            cfg.data.samples_per_gpu,
            cfg.data.workers_per_gpu,
            # cfg.gpus will be ignored if distributed
            len(cfg.gpu_ids),
            dist=distributed,
            seed=cfg.seed) for ds in dataset
    ]

    # put model on gpus
    if distributed:
        assert False
        find_unused_parameters = cfg.get('find_unused_parameters', False)
        # Sets the `find_unused_parameters` parameter in
        # torch.nn.parallel.DistributedDataParallel
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters)
    else:
        model = MMDataParallel(model.cuda(cfg.gpu_ids[0]),
                               device_ids=cfg.gpu_ids)

    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = RepRunner(model,
                       optimizer=optimizer,
                       work_dir=cfg.work_dir,
                       logger=logger,
                       meta=meta)
    # an ugly workaround to make .log and .log.json filenames the same
    runner.timestamp = timestamp

    # register hooks; no opimizer_config & momentum_config
    runner.register_training_hooks(cfg.lr_config,
                                   checkpoint_config=cfg.checkpoint_config,
                                   log_config=cfg.log_config)

    # if distributed:
    #     runner.register_hook(DistSamplerSeedHook())
    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    # elif cfg.load_from:
    #     runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
Example #20
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 kaiming_init(m)
             elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                 constant_init(m, 1)
Example #21
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         for m in self.dis_net:
             nn.init.normal_(m.weight, 0, 0.01)
             nn.init.constant_(m.bias, 0)
     else:
         raise TypeError('pretrained must be a str or None')
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 xavier_init(m, distribution='uniform')
     else:
         raise TypeError('pretrained must be a str or None')
Example #23
0
 def init_weights(self, pretrained=None):
     if pretrained is not None:
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     else:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 xavier_init(m)
             elif isinstance(m, nn.BatchNorm2d):
                 uniform_init(m)
    def init_weights(self, pretrained=None):
        """Initialize the weights in detector.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if pretrained is not None:
            logger = get_root_logger()
            print_log(f'load model from: {pretrained}', logger=logger)
Example #25
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         normal_init(self.rpn_conv, std=0.01)
         normal_init(self.rpn_cls, std=0.01)
         normal_init(self.rpn_reg, std=0.01)
     else:
         raise TypeError('pretrained must be a str or None')
Example #26
0
 def init_weights(self, pretrained=None):
     if pretrained is None:
         print("[MobileNetV1]Train MobileNet V1 from scratch...")
     else:
         if isinstance(pretrained, str):
             print("[MobileNetV1]Train MobileNet V1 from weights:",
                   pretrained)
             logger = get_root_logger()
             load_checkpoint(self, pretrained, strict=False, logger=logger)
         else:
             raise TypeError('pretrained must be a str or None')
Example #27
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 kaiming_init(m)
             elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                 constant_init(m, 1)
     else:
         raise TypeError('pretrained must be a str and should not be None')
Example #28
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     else:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 kaiming_init(m)
             elif isinstance(m, (AttnBatchNorm2d, AttnGroupNorm)):
                 nn.init.normal_(m.weight_, 1., 0.1)
                 nn.init.normal_(m.bias_, 0., 0.1)
             elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                 constant_init(m, 1)
Example #29
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         # nn.init.normal_(self.fc_hallucinator.weight, 0, 0.01)
         # nn.init.constant_(self.fc_hallucinator.bias, 0)
         kaiming_init(self.conv_hallucinator)
         nn.init.normal_(self.fc_selector.weight, 0, 0.001)
         nn.init.constant_(self.fc_selector.bias, 0)
         self.std_roi_head.init_weights(pretrained)
     else:
         raise TypeError('pretrained must be a str or None')
Example #30
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         for m in self.res_blocks:
             for n in m.conv_block:
                 if isinstance(n, nn.Conv2d):
                     kaiming_init(n)
                 elif isinstance(n, nn.BatchNorm2d):
                     constant_init(n, 1)
     else:
         raise TypeError('pretrained must be a str or None')