import copy import inspect import torch from mmcv.utils import Registry, build_from_cfg OPTIMIZERS = Registry('optimizer') OPTIMIZER_BUILDERS = Registry('optimizer builder') def register_torch_optimizers(): torch_optimizers = [] for module_name in dir(torch.optim): if module_name.startswith('__'): continue _optim = getattr(torch.optim, module_name) if inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer): OPTIMIZERS.register_module()(_optim) torch_optimizers.append(module_name) return torch_optimizers TORCH_OPTIMIZERS = register_torch_optimizers() def build_optimizer_constructor(cfg): return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
from mmcv.utils import Registry BACKBONES = Registry('backbone') HEADS = Registry('head') RECOGNIZERS = Registry('recognizer') LOSSES = Registry('loss') LOCALIZERS = Registry('localizer') SCALAR_SCHEDULERS = Registry('scalar_scheduler') PARAMS_MANAGERS = Registry('params_manager') SPATIAL_TEMPORAL_MODULES = Registry('spatial_temporal_module') NECKS = Registry('necks')
from mmcv.parallel import collate from mmcv.runner import get_dist_info from mmcv.utils import Registry, build_from_cfg from torch.utils.data import DataLoader from .samplers import ClassSpecificDistributedSampler, DistributedSampler if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) hard_limit = rlimit[1] soft_limit = min(4096, hard_limit) resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) DATASETS = Registry('dataset') PIPELINES = Registry('pipeline') BLENDINGS = Registry('blending') def build_dataset(cfg, default_args=None): """Build a dataset from config dict. Args: cfg (dict): Config dict. It should at least contain the key "type". default_args (dict | None, optional): Default initialization arguments. Default: None. Returns: Dataset: The constructed dataset. """
from mmcv.utils import Registry, build_from_cfg IOU_CALCULATORS = Registry('IoU calculator') def build_iou_calculator(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
from abc import ABCMeta, abstractmethod from mmcv.utils import Registry CAMERAS = Registry('camera') class SingleCameraBase(metaclass=ABCMeta): """Base class for single camera model. Args: param (dict): Camera parameters Methods: world_to_camera: Project points from world coordinates to camera coordinates camera_to_world: Project points from camera coordinates to world coordinates camera_to_pixel: Project points from camera coordinates to pixel coordinates world_to_pixel: Project points from world coordinates to pixel coordinates """ @abstractmethod def __init__(self, param): """Load camera parameters and check validity.""" def world_to_camera(self, X): """Project points from world coordinates to camera coordinates.""" raise NotImplementedError
import torch.nn as nn from mmcv.utils import Registry, build_from_cfg BACKBONES = Registry('backbone') CLASSIFIERS = Registry('classifier') HEADS = Registry('head') NECKS = Registry('neck') LOSSES = Registry('loss') def build(cfg, registry, default_args=None): if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args) def build_backbone(cfg): return build(cfg, BACKBONES) def build_head(cfg): return build(cfg, HEADS) def build_neck(cfg): return build(cfg, NECKS)
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry NODES = Registry('node')
from mmcv.utils import Registry from mmdet.models.builder import build MODELS = Registry('model') TRACKERS = Registry('tracker') def build_tracker(cfg): """Build tracker.""" return build(cfg, TRACKERS) def build_model(cfg, train_cfg=None, test_cfg=None): """Build model.""" return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION from mmcv.utils import Registry MODELS = Registry('models', parent=MMCV_MODELS) ATTENTION = Registry('attention', parent=MMCV_ATTENTION) BACKBONES = MODELS NECKS = MODELS HEADS = MODELS LOSSES = MODELS SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" return BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" return NECKS.build(cfg) def build_head(cfg): """Build head.""" return HEADS.build(cfg)
from mmcv.utils import Registry OPTIMIZERS = Registry('optimizers')
import numpy as np import torch from mmcv.parallel import collate from mmcv.runner import get_dist_info from mmcv.utils import Registry, build_from_cfg, digit_version from torch.utils.data import DataLoader if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) hard_limit = rlimit[1] soft_limit = min(4096, hard_limit) resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) DATASETS = Registry('dataset') PIPELINES = Registry('pipeline') SAMPLERS = Registry('sampler') def build_dataset(cfg, default_args=None): from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, KFoldDataset, RepeatDataset) if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'ConcatDataset': dataset = ConcatDataset( [build_dataset(c, default_args) for c in cfg['datasets']], separate_eval=cfg.get('separate_eval', True)) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args),
# Copyright (c) OpenMMLab. All rights reserved. from torch.nn.parallel import DataParallel, DistributedDataParallel from mmcv.utils import Registry MODULE_WRAPPERS = Registry('module wrapper') MODULE_WRAPPERS.register_module(module=DataParallel) MODULE_WRAPPERS.register_module(module=DistributedDataParallel)
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg LOADERS = Registry('loader') PARSERS = Registry('parser') def build_loader(cfg): """Build anno file loader.""" return build_from_cfg(cfg, LOADERS) def build_parser(cfg): """Build anno file parser.""" return build_from_cfg(cfg, PARSERS)
from mmcv.utils import Registry BACKBONES = Registry('backbone') NECKS = Registry('neck') HEADS = Registry('head') RECOGNIZERS = Registry('recognizer') LOSSES = Registry('loss') LOCALIZERS = Registry('localizer') MATCHERS = Registry('matcher')
from mmcv.utils import Registry, build_from_cfg BBOX_ASSIGNERS = Registry('bbox_assigner') BBOX_SAMPLERS = Registry('bbox_sampler') BBOX_CODERS = Registry('bbox_coder') def build_assigner(cfg, **default_args): """Builder of box assigner.""" return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) def build_sampler(cfg, **default_args): """Builder of box sampler.""" return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) def build_bbox_coder(cfg, **default_args): """Builder of box coder.""" return build_from_cfg(cfg, BBOX_CODERS, default_args)
import platform from mmcv.utils import Registry, build_from_cfg from mmdet.datasets import DATASETS from mmdet.datasets.builder import _concat_dataset if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) hard_limit = rlimit[1] soft_limit = min(4096, hard_limit) resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) OBJECTSAMPLERS = Registry('Object sampler') def build_dataset(cfg, default_args=None): from mmdet3d.datasets.dataset_wrappers import CBGSDataset from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset, ConcatDataset, RepeatDataset) if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'ConcatDataset': dataset = ConcatDataset( [build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True)) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset( build_dataset(cfg['dataset'], default_args), cfg['times']) elif cfg['type'] == 'ClassBalancedDataset':
# Copyright (c) OpenMMLab. All rights reserved. import copy import math import warnings import numpy as np import torch import torch.nn as nn from torch import Tensor from mmcv.utils import Registry, build_from_cfg, get_logger, print_log INITIALIZERS = Registry('initializer') def update_init_info(module, init_info): """Update the `_params_init_info` in the module if the value of parameters are changed. Args: module (obj:`nn.Module`): The module of PyTorch with a user-defined attribute `_params_init_info` which records the initialization information. init_info (str): The string that describes the initialization. """ assert hasattr( module, '_params_init_info'), f'Can not find `_params_init_info` in {module}' for name, param in module.named_parameters(): assert param in module._params_init_info, (
from mmcv.utils import Registry CUSTOM_CONV_OP = Registry("custom_conv_op") def build_op(conv_type): ''' :param conv_type: conv must have input: in_ch out_ch kernel_size :return: cls func ''' assert conv_type in CUSTOM_CONV_OP._module_dict.keys(), \ "conv_type has not been registered!" obj_cls = CUSTOM_CONV_OP.module_dict[conv_type] return obj_cls
import tempfile import pytest import torch from torch import nn import mmcv from mmcv.cnn.utils.weight_init import update_init_info from mmcv.runner import BaseModule, ModuleList, Sequential from mmcv.utils import Registry, build_from_cfg COMPONENTS = Registry('component') FOOMODELS = Registry('model') @COMPONENTS.register_module() class FooConv1d(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg) self.conv1d = nn.Conv1d(4, 1, 4) def forward(self, x): return self.conv1d(x) @COMPONENTS.register_module() class FooConv2d(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg)
import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry from mmdet.models.builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS) from mmseg.models.builder import SEGMENTORS MODELS = Registry('models', parent=MMCV_MODELS) VOXEL_ENCODERS = MODELS MIDDLE_ENCODERS = MODELS FUSION_LAYERS = MODELS def build_backbone(cfg): """Build backbone.""" return BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" return NECKS.build(cfg) def build_roi_extractor(cfg): """Build RoI feature extractor.""" return ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg):
# Copyright (c) Open-MMLab. All rights reserved. from mmcv.utils import Registry DAHOOKS = Registry('hook') class DAHook: def before_run(self, runner): pass def after_run(self, runner): pass def before_epoch(self, runner): pass def after_epoch(self, runner): pass def before_iter(self, runner): pass def after_iter(self, runner): pass def before_train_epoch(self, runner): self.before_epoch(runner) def before_val_epoch(self, runner): self.before_epoch(runner)
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.cnn import build_model_from_cfg from mmcv.utils import Registry MODELS = Registry('models', build_func=build_model_from_cfg, parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS HEADS = MODELS LOSSES = MODELS POSENETS = MODELS MESH_MODELS = MODELS def build_backbone(cfg): """Build backbone.""" return BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" return NECKS.build(cfg) def build_head(cfg): """Build head.""" return HEADS.build(cfg)
import warnings from mmcv.utils import Registry, build_from_cfg from torch import nn BACKBONES = Registry('backbone') NECKS = Registry('neck') HEADS = Registry('head') LOSSES = Registry('loss') SEGMENTORS = Registry('segmentor') def build(cfg, registry, default_args=None): """Build a module. Args: cfg (dict, list[dict]): The config of modules, is is either a dict or a list of configs. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules)
from mmcv.utils import Registry BACKBONES = Registry('backbone') HEADS = Registry('head') LOSSES = Registry('loss') POSENETS = Registry('posenet')
import warnings from mmcv.utils import Registry, build_from_cfg from torch import nn BACKBONES = Registry('backbone') NECKS = Registry('neck') ROI_EXTRACTORS = Registry('roi_extractor') SHARED_HEADS = Registry('shared_head') HEADS = Registry('head') LOSSES = Registry('loss') DETECTORS = Registry('detector') def build(cfg, registry, default_args=None): """Build a module. Args: cfg (dict, list[dict]): The config of modules, is is either a dict or a list of configs. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ]
""" Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from mmcv.utils import Registry ARG_PARSERS = Registry('arg_parser') ARG_CONVERTERS = Registry('arg_converter') TRAINERS = Registry('trainer') EVALUATORS = Registry('evaluator') EXPORTERS = Registry('exporter') COMPRESSION = Registry('compression')
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg PIXEL_SAMPLERS = Registry('pixel sampler') def build_pixel_sampler(cfg, **default_args): """Build pixel sampler for segmentation map.""" return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
from mmcv.utils import Registry BACKBONES = Registry('backbone') HEADS = Registry('head') RECOGNIZERS = Registry('recognizer') LOSSES = Registry('loss') LOCALIZERS = Registry('localizer')
from mmcv.utils import Registry DATASETS = Registry('dataset') PIPELINES = Registry('pipeline')
from mmcv.utils import Registry SA_MODULES = Registry('point_sa_module') def build_sa_module(cfg, *args, **kwargs): """Build PointNet2 set abstraction (SA) module. Args: cfg (None or dict): The SA module config, which should contain: - type (str): Module type. - module args: Args needed to instantiate an SA module. args (argument list): Arguments passed to the `__init__` method of the corresponding module. kwargs (keyword arguments): Keyword arguments passed to the `__init__` method of the corresponding SA module . Returns: nn.Module: Created SA module. """ if cfg is None: cfg_ = dict(type='PointSAModule') else: if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() module_type = cfg_.pop('type') if module_type not in SA_MODULES: