def build(cfg, registry, default_args=None):
    if isinstance(cfg, list):
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        return nn.Sequential(*modules)
    else:
        return build_from_cfg(cfg, registry, default_args)
Exemple #2
0
def build(cfg, registry, default_args=None):
    if isinstance(cfg, list):  # 传进来的cfg.model是一个字典,不走这条路
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        return nn.Sequential(*modules)
    else:  # 所以调用的是build_from_cfg这个函数
        return build_from_cfg(cfg, registry, default_args)
Exemple #3
0
def build(cfg, registry, default_args=None):
    ##  主干是一个判断结构,其实就是判断传进来的cfg是字典列表还是单独的字典,来分情况处理。
    #   字典列表的话:挨个调用build_from_cfg(),将其加到注册表******的_module_dict中,然后再返回return nn.Sequential(*modules)
    #   字典的话:直接调用build_from_cfg(),将其添加到注册表DETECTORS中(以DETECTORS为例)。
    if isinstance(cfg, list):
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        return nn.Sequential(*modules)
    else:
        return build_from_cfg(cfg, registry, default_args)
Exemple #4
0
def build(cfg, registry, default_args=None):#建立模型的所有几口函数,
  '''
  函数的作用是用于构建网络的所有模块,他会根据模型被分成多少个部分,然后根据给定的参数字典来构建相应的部分,
  如果给定的参数是一个list,那么他就生成了序列模块,不然就是用整个字典进行构建
  '''
    if isinstance(cfg, list):#如果给定的参数是列表格式,这个是模型中一小部分的构建
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]#生成模块序列列表
        return nn.Sequential(*modules)#生成序列模型
    else:
        return build_from_cfg(cfg, registry, default_args)#利用给定的字典生成模型,其中的registry是一个定义的类
Exemple #5
0
def build(cfg, registry, default_args=None, parallel=None):
    # not sequence, but parallel
    # pdb.set_trace()
    if isinstance(cfg, list):
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        if parallel:
            return nn.ModuleList(modules)
        else:
            return nn.Sequential(*modules)
    else:
        return build_from_cfg(cfg, registry, default_args)
Exemple #6
0
def build(cfg, registry, default_args=None):
    # 多次进入build,每次cfg给一部分进行搭建(即cfg和registry的print是交替进行的,而非cfg一次输出完)
    # print(cfg)  # cfg.model -> cfg.model.backbone(ResNeXt) -> .neck(FPN) -> .ReinaHead(head) -> ...
    if isinstance(cfg, list):
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        # print('1')
        return nn.Sequential(*modules)
    else:
        '''CORE'''
        # print('2')  # √
        # print(registry)  # detector -> backbone -> neck -> head -> loss -> loss
        return build_from_cfg(cfg, registry, default_args)
Exemple #7
0
def build_dataset(cfg):
    if cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(build_dataset(cfg['dataset']), cfg['times'])
    elif isinstance(cfg['ann_file'], (list, tuple)):
        dataset = _concat_dataset(cfg)
    else:
        dataset = build_from_cfg(cfg, DATASETS)

    return dataset
Exemple #8
0
def build_dataset(cfg, default_args=None):
    if cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args),
                                cfg['times'])
    elif isinstance(cfg['ann_file'], (list, tuple)):
        dataset = _concat_dataset(cfg, default_args)
    else:
        # √
        dataset = build_from_cfg(cfg, DATASETS, default_args)
    return dataset
Exemple #9
0
 def __init__(self, transforms):
     assert isinstance(transforms, collections.abc.Sequence)
     self.transforms = []
     for transform in transforms:
         if isinstance(transform, dict):
             transform = build_from_cfg(transform, PIPELINES)
             self.transforms.append(transform)
         elif callable(transform):
             self.transforms.append(transform)
         else:
             raise TypeError('transform must be callable or a dict')
Exemple #10
0
def build_dataset(cfg, default_args=None):
    if isinstance(cfg, (list, tuple)):                           # if the cfg compose of several
        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
    elif cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(
            build_dataset(cfg['dataset'], default_args), cfg['times'])
    elif isinstance(cfg['ann_file'], (list, tuple)):              # 进入到这个if中
        dataset = _concat_dataset(cfg, default_args)
    else:
        dataset = build_from_cfg(cfg, DATASETS, default_args)

    return dataset                                                # 整合不同源数据集之后的数据集list,在这里都是coco源
Exemple #11
0
def build_dataset(cfg, default_args=None):
    if isinstance(cfg, (list, tuple)):
        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
    elif cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(
            build_dataset(cfg['dataset'], default_args), cfg['times'])
    elif isinstance(cfg['ann_file'], (list, tuple)):
        dataset = _concat_dataset(cfg, default_args)
    else:
        dataset = build_from_cfg(cfg, DATASETS, default_args)

    return dataset
# import copy

# from mmdet.utils import build_from_cfg
# from .dataset_wrappers import ConcatDataset, RepeatDataset
# from .registry import DATASETS


# def _concat_dataset(cfg, default_args=None):
#     ann_files = cfg['ann_file']
#     img_prefixes = cfg.get('img_prefix', None)
#     seg_prefixes = cfg.get('seg_prefixes', None)
#     proposal_files = cfg.get('proposal_file', None)

#     datasets = []
#     num_dset = len(ann_files)
#     for i in range(num_dset):
#         data_cfg = copy.deepcopy(cfg)
#         data_cfg['ann_file'] = ann_files[i]
#         if isinstance(img_prefixes, (list, tuple)):
#             data_cfg['img_prefix'] = img_prefixes[i]
#         if isinstance(seg_prefixes, (list, tuple)):
#             data_cfg['seg_prefix'] = seg_prefixes[i]
#         if isinstance(proposal_files, (list, tuple)):
#             data_cfg['proposal_file'] = proposal_files[i]
#         datasets.append(build_dataset(data_cfg, default_args))

#     return ConcatDataset(datasets)


# def build_dataset(cfg, default_args=None):
#     if isinstance(cfg, (list, tuple)):
#         dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
#     elif cfg['type'] == 'RepeatDataset':
#         dataset = RepeatDataset(
#             build_dataset(cfg['dataset'], default_args), cfg['times'])
#     elif isinstance(cfg['ann_file'], (list, tuple)):
#         dataset = _concat_dataset(cfg, default_args)
#     else:
#         dataset = build_from_cfg(cfg, DATASETS, default_args)

#     return dataset
Exemple #12
0
def build_dataset(cfg, default_args=None):
    if isinstance(cfg, (list, tuple)):
        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
    elif cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args),
                                cfg['times'])
    elif isinstance(cfg.get('ann_file'), (tuple, list)):
        dataset = _concat_dataset(cfg, default_args)
    else:
        dataset = build_from_cfg(cfg, DATASETS, default_args)

    return dataset
Exemple #13
0
def build_dataset(cfg, default_args=None):
    if isinstance(cfg, (list, tuple)):
        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
    elif cfg["type"] == "RepeatDataset":
        dataset = RepeatDataset(build_dataset(cfg["dataset"], default_args),
                                cfg["times"])
    elif isinstance(cfg.get("ann_file"), (list, tuple)):
        dataset = _concat_dataset(cfg, default_args)
    else:
        dataset = build_from_cfg(cfg, DATASETS, default_args)

    return dataset
Exemple #14
0
def build_dataset(cfg, default_args=None):
    if isinstance(cfg, (list, tuple)):
        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
    elif cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args),
                                cfg['times'])
    elif cfg['type'] == 'ClassBalancedDataset':
        dataset = ClassBalancedDataset(
            build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
    elif isinstance(cfg['ann_file'], (list, tuple)):
        dataset = _concat_dataset(cfg, default_args)
    else:
        dataset = build_from_cfg(cfg, DATASETS, default_args)

    return dataset
 def __init__(self, transforms=None):
     # You should load overlay object here.
     print("%s __init__ called" % self.__class__.__name__)
     assert transforms is None or isinstance(transforms,
                                             collections.abc.Sequence)
     self.transforms = []
     if transforms is not None:
         for transform in transforms:
             if isinstance(transform, dict):
                 transform = build_from_cfg(transform, PIPELINES)
                 self.transforms.append(transform)
             elif callable(transform):
                 self.transforms.append(transform)
             else:
                 raise TypeError('transform must be callable or a dict')
Exemple #16
0
def build_dataset(cfg, default_args=None):
    if isinstance(cfg, (list, tuple)):
        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
    elif cfg['type'] == 'RepeatDataset':
        dataset = RepeatDataset(
            build_dataset(cfg['dataset'], default_args), cfg['times'])
    elif isinstance(cfg['ann_file'], (list, tuple)):
        dataset = _concat_dataset(cfg, default_args)
    else:
        matches = glob.glob(cfg['ann_file'], recursive=True)
        if not matches:
            raise RuntimeError(f'Failed to find annotation files that match pattern: '
                               f'{cfg["ann_file"]}')
        cfg['ann_file'] = matches
        if len(cfg['ann_file']) == 1:
            cfg['ann_file'] = cfg['ann_file'][0]
            dataset = build_from_cfg(cfg, DATASETS, default_args)
        else:
            dataset = _concat_dataset(cfg, default_args)

    return dataset
Exemple #17
0
def build_iou_calculator(cfg, default_args=None):
    """Builder of IoU calculator."""
    return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
def build_optimizer(model, optimizer_cfg):
    """Build optimizer from configs.

    Args:
        model (:obj:`nn.Module`): The model with parameters to be optimized.
        optimizer_cfg (dict): The config dict of the optimizer.
            Positional fields are:
                - type: class name of the optimizer.
                - lr: base learning rate.
            Optional fields are:
                - any arguments of the corresponding optimizer type, e.g.,
                  weight_decay, momentum, etc.
                - paramwise_options: a dict with 4 accepted fileds
                  (bias_lr_mult, bias_decay_mult, norm_decay_mult,
                  dwconv_decay_mult).
                  `bias_lr_mult` and `bias_decay_mult` will be multiplied to
                  the lr and weight decay respectively for all bias parameters
                  (except for the normalization layers), and
                  `norm_decay_mult` will be multiplied to the weight decay
                  for all weight and bias parameters of normalization layers.
                  `dwconv_decay_mult` will be multiplied to the weight decay
                  for all weight and bias parameters of depthwise conv layers.

    Returns:
        torch.optim.Optimizer: The initialized optimizer.

    Example:
        >>> import torch
        >>> model = torch.nn.modules.Conv1d(1, 1, 1)
        >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
        >>>                      weight_decay=0.0001)
        >>> optimizer = build_optimizer(model, optimizer_cfg)
    """
    if hasattr(model, 'module'):
        model = model.module

    optimizer_cfg = optimizer_cfg.copy()
    paramwise_options = optimizer_cfg.pop('paramwise_options', None)
    # if no paramwise option is specified, just use the global setting
    if paramwise_options is None:
        params = model.parameters()
    else:
        assert isinstance(paramwise_options, dict)
        # get base lr and weight decay
        base_lr = optimizer_cfg['lr']
        base_wd = optimizer_cfg.get('weight_decay', None)
        # weight_decay must be explicitly specified if mult is specified
        if ('bias_decay_mult' in paramwise_options
                or 'norm_decay_mult' in paramwise_options
                or 'dwconv_decay_mult' in paramwise_options):
            assert base_wd is not None
        # get param-wise options
        bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.)
        bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.)
        norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.)
        dwconv_decay_mult = paramwise_options.get('dwconv_decay_mult', 1.)
        named_modules = dict(model.named_modules())
        # set param-wise lr and weight decay
        params = []
        for name, param in model.named_parameters():
            param_group = {'params': [param]}
            if not param.requires_grad:
                # FP16 training needs to copy gradient/weight between master
                # weight copy and model weight, it is convenient to keep all
                # parameters here to align with model.parameters()
                params.append(param_group)
                continue

            # for norm layers, overwrite the weight decay of weight and bias
            # TODO: obtain the norm layer prefixes dynamically
            if re.search(r'(bn|gn)(\d+)?.(weight|bias)', name):
                if base_wd is not None:
                    param_group['weight_decay'] = base_wd * norm_decay_mult
            # for other layers, overwrite both lr and weight decay of bias
            elif name.endswith('.bias'):
                param_group['lr'] = base_lr * bias_lr_mult
                if base_wd is not None:
                    param_group['weight_decay'] = base_wd * bias_decay_mult

            module_name = name.replace('.weight', '').replace('.bias', '')
            if module_name in named_modules and base_wd is not None:
                module = named_modules[module_name]
                # if this Conv2d is depthwise Conv2d
                if isinstance(module, torch.nn.Conv2d) and \
                        module.in_channels == module.groups:
                    param_group['weight_decay'] = base_wd * dwconv_decay_mult
            # otherwise use the global settings

            params.append(param_group)

    optimizer_cfg['params'] = params

    return build_from_cfg(optimizer_cfg, OPTIMIZERS)
Exemple #19
0
def build_sampler(cfg, **default_args):
    """Builder of box sampler."""
    return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
Exemple #20
0
def build_bbox_coder(cfg, **default_args):
    """Builder of box coder."""
    return build_from_cfg(cfg, BBOX_CODERS, default_args)
Exemple #21
0
def build_assigner(cfg, **default_args):
    """Builder of box assigner."""
    return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
Exemple #22
0
def build_dataset(cfg, default_args=None):
    dataset = build_from_cfg(cfg, DATASETS, default_args)
    return dataset