Exemple #1
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    logger.info('MMDetection Version: {}'.format(__version__))
    logger.info('Config: {}'.format(cfg.text))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    # model.CLASSES = ('破洞', '污渍', '三丝', '结头', '花板跳', '百脚', '毛粒', '粗经',
    # '松经', '断经', '吊经', '粗维', '纬缩', '浆斑', '整经结', '星跳', '跳花', '断氨纶', '色差档',
    # '磨痕', '死皱')
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #2
0
def train_detector(model,
                   dataset,
                   cfg,
                   distributed=False,
                   validate=False,
                   timestamp=None):
    logger = get_root_logger(cfg.log_level)

    # start training
    if distributed:
        _dist_train(
            model,
            dataset,
            cfg,
            validate=validate,
            logger=logger,
            timestamp=timestamp)
    else:
        _non_dist_train(
            model,
            dataset,
            cfg,
            validate=validate,
            logger=logger,
            timestamp=timestamp)
Exemple #3
0
def main():
    # 读取命令行的参数
    args = parse_args()
    # 读取配置文件
    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    # 如果命令行中没有设定工作空间,就按照默认的——work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x';如果有输入就更新
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir

    # 如果是在预训练的基础上继续训练,那就更新cfg,否则就按照默认的resume_from = None
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    # 输入的gpu数量来设置
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    # 如果不设置分布式的,那么distributed的值为false
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # 核心调用 build_detector,get_dataset,train_detector

    # train函数的核心 —— 调用build_detector()来创建模型,将config配置文件中的数据加载到建立的模型中去,返回的是对应的网络实例化的对象
    model = build_detector(
        # 获得config文件的model配置数据,train的配置数据,test的配置数据
        cfg.model,
        train_cfg=cfg.train_cfg,
        test_cfg=cfg.test_cfg)

    # 注册数据集,获得cfg中的data字典其中的train字段,也为字典类型
    # 返回是一个dict,有数据集相关的数据和datasets所有的数据集标签。
    train_dataset = get_dataset(cfg.data.train)

    # 开始训练
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #4
0
    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
                              missing_keys, unexpected_keys, error_msgs):
        version = local_metadata.get('version', None)

        if version is None or version < 2:
            # the key is different in early versions
            # In version < 2, ModulatedDeformConvPack
            # loads previous benchmark models.
            if (prefix + 'conv_offset.weight' not in state_dict
                    and prefix[:-1] + '_offset.weight' in state_dict):
                state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(
                    prefix[:-1] + '_offset.weight')
            if (prefix + 'conv_offset.bias' not in state_dict
                    and prefix[:-1] + '_offset.bias' in state_dict):
                state_dict[prefix +
                           'conv_offset.bias'] = state_dict.pop(prefix[:-1] +
                                                                '_offset.bias')

        if version is not None and version > 1:
            from mmdet.apis import get_root_logger
            logger = get_root_logger()
            logger.info(
                'ModulatedDeformConvPack {} is upgraded to version 2.'.format(
                    prefix.rstrip('.')))

        super()._load_from_state_dict(state_dict, prefix, local_metadata,
                                      strict, missing_keys, unexpected_keys,
                                      error_msgs)
Exemple #5
0
    def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            from mmdet.apis import get_root_logger
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m, 'conv2_offset'):
                        constant_init(m.conv2_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None')
Exemple #6
0
def train(args):
    cfg = Config.fromfile(args.config)
    cfg.data.workers_per_gpu = args.workers_per_gpu
    cfg.data.imgs_per_gpu = args.imgs_per_gpu
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    else:
        args.work_dir = cfg.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
    torch.save(model, args.work_dir + '/' + args.model_name + '.pth')
    return
Exemple #7
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    args.resume_from = "/home/ubuntu/code/fengda/MaskTrackRCNN/pretrained_models/epoch_12.pth"
    # network_data = torch.load(args.resume_from)
    load_checkpoint(model, args.resume_from)
    # model.eval()
    # for param in model.parameters():
    #     param.requires_grad = False
    model.load_flow()
    # model.flow_head.train()
    # for param in model.flow_head.parameters():
    #     param.requires_grad = True

    # get dataset
    train_dataset = get_dataset(cfg.data.train)
    print("len of dataset: {}.".format(len(train_dataset)))

    # train
    train_flownet(model,
                  train_dataset,
                  cfg,
                  distributed=distributed,
                  validate=args.validate,
                  logger=logger)
Exemple #8
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args

    cfg.work_dir = cfg.work_dir + '_' + time.strftime('Time_%m%d_%H%M%S',
                                                      time.localtime())

    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # log cfg
    logger.info('training config:{}\n'.format(pprint.pformat(cfg._cfg_dict)))

    # log git hash
    logger.info('git hash: {}'.format(get_git_hash()))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          classes=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #9
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    # 在图片输入尺度固定时开启,可以加速.一般都是关的,只有在固定尺度的网络如SSD512中才开启;
    # ?  cv's model in general input is fixed ?
    # should test;
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        # 创建工作目录存放训练文件,如果不键入,会自动按照py配置文件生成对应的目录
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        # 断点继续训练的权值文件
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # ipdb.set_trace(context=35)
    #  搭建模型
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    # 将训练配置传入
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in checkpoints as meta data
        # 要注意的是,以前发布的模型是不存这个类别等信息的,
        # 用的默认COCO或者VOC参数,所以如果用以前训练好的模型检测时会提醒warning一下,无伤大雅
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)

    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES  # model的CLASSES属性本来没有的,但是python不用提前声明,再赋值的时候自动定义变量
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
def main():
    # parse arguments
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.load_from is not None:
        cfg.load_from = args.load_from
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.fp16:
        cfg.fp16 = {'loss_scale': 512.}
    if args.workers is not None:
        cfg.data.workers_per_gpu = args.workers
    cfg.gpus = args.gpus
    if args.autoscale_lr:
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8.
    ops.sys_print('Args:\n--', args)
    ops.sys_print('Configs:\n--', cfg)

    # init distributed env, logger and random seeds
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    
    # build model
    model = build_detector(
        cfg.model,
        train_cfg=cfg.train_cfg,
        test_cfg=cfg.test_cfg)
    
    # build dataset
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        cfg.checkpoint_config.meta = {
            'mmdet_version': mmdet.__version__,
            'config': cfg.text,
            'CLASSES': train_dataset.CLASSES}
    model.CLASSES = train_dataset.CLASSES

    # run training
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Exemple #11
0
    def set_configuration(self, cfg_in):
        cfg = self.get_configuration()
        cfg.merge_config(cfg_in)

        self._config_file = str(cfg.get_value("config_file"))
        self._seed_weights = str(cfg.get_value("seed_weights"))
        self._train_directory = str(cfg.get_value("train_directory"))
        self._output_directory = str(cfg.get_value("output_directory"))
        self._gpu_count = int(cfg.get_value("gpu_count"))
        self._integer_labels = strtobool(cfg.get_value("integer_labels"))
        self._launcher = str(cfg.get_value("launcher"))
        self._validate = strtobool(cfg.get_value("validate"))

        self._training_data = []

        self._cfg = Config.fromfile(self._config_file)

        if self._cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True

        if self._train_directory is not None:
            self._cfg.work_dir = self._train_directory
            self._groundtruth_store = os.path.join(self._train_directory,
                                                   self._tmp_annotation_file)
            if not os.path.exists(self._train_directory):
                os.mkdir(self._train_directory)
        else:
            self._groundtruth_store = self._tmp_annotation_file

        if self._seed_weights is not None:
            self._cfg.resume_from = self._seed_weights

        if self._gpu_count > 0:
            self._cfg.gpus = self._gpu_count
        else:
            self._cfg.gpus = torch.cuda.device_count()

        if self._cfg.checkpoint_config is not None:
            self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                    config=self._cfg.text)

        if self._launcher == 'none':
            self._distributed = False
        else:
            self._distributed = True
            init_dist(self._launcher, **self._cfg.dist_params)

        self._logger = get_root_logger(self._cfg.log_level)
        self._logger.info('Distributed training: {}'.format(self._distributed))

        if self._random_seed is not "none":
            logger.info('Set random seed to {}'.format(self._random_seed))
            set_random_seed(int(self._random_seed))

        self._model = build_detector(self._cfg.model,
                                     train_cfg=self._cfg.train_cfg,
                                     test_cfg=self._cfg.test_cfg)
Exemple #12
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)

    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    # print('cfg.data.train:',cfg.data.train)
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    print('model.CLASSES:', model.CLASSES)
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,  #default : False
        validate=args.validate,
        logger=logger)
Exemple #13
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg,
                           global_cfg=cfg)
    train_dataset = [build_dataset(c) for c in cfg.data.train] \
                    if type(cfg.data.train) == list else \
                    build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES
            if hasattr(train_dataset, 'CLASSES') else None)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES if hasattr(train_dataset,
                                                     'CLASSES') else None
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger,
                   multitask=type(train_dataset) == list,
                   vis=True)
Exemple #14
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    cfg.work_dir = args.work_dir
    cfg.gpus = args.gpus
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    
    train_dataset = build_dataset(cfg.data.train)
        
    model = torch.nn.parallel.DistributedDataParallel(
        model.cuda(), find_unused_parameters=True, device_ids=[args.local_rank], output_device=args.local_rank)
    print(model)
    print("Model have {} paramerters.".format(sum(x.numel() for x in model.parameters()) / 1e6))
    print("Model have {} backbone.".format(sum(x.numel() for x in model.module.backbone.parameters()) / 1e6))
    print("Model have {} neck.".format(sum(x.numel() for x in model.module.neck.parameters()) / 1e6))
    print("Model have {} head.".format(sum(x.numel() for x in model.module.bbox_head.parameters()) / 1e6))
    
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Exemple #15
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.save_dir is not None:
        try:
            os.makedirs(args.save_dir)
        except OSError:
            pass
    cfg.work_dir = args.save_dir
    # import pdb; pdb.set_trace()
    if args.load_from is not None:
        cfg.resume_from = args.load_from
        cfg.load_from = args.load_from
    logger = get_root_logger(cfg.log_level)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    cfg.data.train.pipeline = cfg.train_pipeline
    cfg.data.train.classes_to_load = args.classes
    datasets = [build_dataset(cfg.data.train)]

    cfg.train_cfg.rcnn.assigner.pos_iou_thr = args.fg_iou_thr
    cfg.train_cfg.rcnn.assigner.min_pos_iou = args.fg_iou_thr
    cfg.train_cfg.rcnn.assigner.neg_iou_thr = args.bg_iou_thr
    cfg.data.imgs_per_gpu = 8

    if 'val' in args.data_split:
        cfg.data.val.pipeline = cfg.train_pipeline
        cfg.data.val.classes_to_load = args.classes
        datasets = [build_dataset(cfg.data.val)]

    elif 'test' in args.data_split:
        cfg.data.test.pipeline = cfg.train_pipeline
        cfg.data.test.classes_to_load = args.classes
        datasets = [build_dataset(cfg.data.test)]

    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    extract_feats(model,
                  datasets,
                  cfg,
                  args.save_dir,
                  data_split=args.data_split,
                  logger=logger)
Exemple #16
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)

    # 训练完成后做一次评估
    import os.path as osp
    checkpoint = osp.join(cfg.work_dir, 'latest.pth')
    out = osp.join(cfg.work_dir, 'val_cropped_dets.pkl')
    _do_dota_eval(args.config, checkpoint, out)
Exemple #17
0
def main():
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        import torch.distributed as dist
        dist.init_process_group('gloo', init_method='file:///tmp/somefile', rank=0, world_size=1)
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Exemple #18
0
def main():
    import os
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    # get dataset
    # from mmdet.models.decision_net.utils import modify_cfg
    # video_name = '01c783268c'
    # cfg.data.train = modify_cfg(cfg, video_name)
    train_dataset = get_dataset(cfg.data.train)
    val_dataset = get_dataset(cfg.data.val)
    print("len of dataset: {}.".format(len(train_dataset)))

    # train
    train_detector(model, [train_dataset, val_dataset],
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #19
0
    def load_network(self):
        from mmcv import Config
        self._cfg = Config.fromfile(self._train_config)

        if self._cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True

        if self._train_directory is not None:
            self._cfg.work_dir = self._train_directory

        if self._seed_weights is not None:
            self._cfg.load_from = self._seed_weights

        if self._gpu_count is not None and self._gpu_count > 0:
            self._cfg.gpus = self._gpu_count
            flux_factor = self._images_per_gpu * self._gpu_count
            self._cfg.optimizer['lr'] = self._cfg.optimizer['lr'] * flux_factor

        if self._cfg.checkpoint_config is not None:
            from mmdet import __version__
            self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                    config=self._cfg.text)

        if self._launcher == 'none':
            self._distributed = False
        else:
            self._distributed = True
            from mmcv.runner import init_dist
            init_dist(self._launcher, **self._cfg.dist_params)

        from mmdet.apis import get_root_logger
        self._logger = get_root_logger(self._cfg.log_level)
        self._logger.info('Distributed training: {}'.format(self._distributed))

        if self._random_seed is not 'none':
            self._logger.info('Set random seed to {}'.format(
                self._random_seed))
            from mmdet.apis import set_random_seed
            if isinstance(self._random_seed, int):
                set_random_seed(int(self._random_seed))

        from mmdet.models import build_detector

        if self._cfg.model['pretrained'] is not None:
            if not os.path.exists(self._cfg.model['pretrained']):
                dirname = os.path.dirname(self._config_file)
                relpath = os.path.join(dirname, self._cfg.model['pretrained'])
                if os.path.exists(relpath):
                    self._cfg.model['pretrained'] = relpath

        self._model = build_detector(self._cfg.model,
                                     train_cfg=self._cfg.train_cfg,
                                     test_cfg=self._cfg.test_cfg)
Exemple #20
0
def main():
    # ipdb.set_trace()
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark  在图片输入尺度固定时开启,可以加速
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        # 创建工作目录存放训练文件,如果不键入,会自动按照py配置文件生成对应的目录
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        # 断点继续训练的权值文件
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    # if cfg.checkpoint_config is not None:
    #     # save mmdet version in checkpoints as meta data
    #     cfg.checkpoint_config.meta = dict(
    #         mmdet_version=__version__, config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # 模型的build和inference一样,就不多说了
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    # ipdb.set_trace()
    # 注意传入的是cfg.data.train
    train_dataset = get_dataset(cfg.data.train)

    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #21
0
 def init_weights(self, pretrained=None):
     if isinstance(pretrained, str):
         from mmdet.apis import get_root_logger
         logger = get_root_logger()
         load_checkpoint(self, pretrained, strict=False, logger=logger)
     elif pretrained is None:
         for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 kaiming_init(m)
             elif isinstance(m, nn.BatchNorm2d):
                 constant_init(m, 1)
     else:
         raise TypeError('pretrained must be a str or None')
Exemple #22
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)

    if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'train'):
        train_dataset2 = get_dataset(cfg.data2.train)
    else:
        train_dataset2 = None
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger,
                   train_dataset2=train_dataset2)
Exemple #23
0
def main():
    args = parse_args()  # 解析命令行参数
    cfg = Config.fromfile(args.config)  # 读取配置文件
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir  # checkpoint save path
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from  # checkpoint resume from path
    cfg.gpus = args.gpus  # gpus numbers

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':  # if laucher == none , then distributed == False means no distributed training.
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(
        cfg.data.train
    )  # get dataset   param: train(a dict containing configs) return a specific dataset class
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES  # a tuple containing class names
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #24
0
def main():
    args = parse_args()
    fb_cfg = mmcv_config.fromfile(args.fb_cfg)
    _space = fb_cfg.search_space
    # base = _space['base']
    # depth = _space['depth']
    # space = _space['space']

    model_cfg = mmcv_config.fromfile(args.model_cfg)
    # set cudnn_benchmark
    if model_cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        model_cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        model_cfg.resume_from = args.resume_from
    model_cfg.gpus = args.gpus
    if model_cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        model_cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                config=model_cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **model_cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(model_cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    model = detection(mmcv_config(model_cfg['model_cfg']),
                      mmcv_config(model_cfg['train_cfg']),
                      mmcv_config(model_cfg['test_cfg']), _space,
                      args.theta_txt)
    print(model)
    train_dataset = get_dataset(model_cfg.data.train)
    train_detector(model,
                   train_dataset,
                   model_cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__, config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    # 首先要先注册 BACKBONES、 NECKS、 ROI_EXTRACTORS、 HEADS、 DETECTORS、
    # 然后 BACKBONES.register_module(class SSDVGG) @HEADS.register_module(class AnchorHead)
    #     @HEADS.register_module(class SSDHead)   @DETECTORS.register_module(class SingleStageDetector)
    # 最后 build_detector() 相当于SingleStageDetector(**args)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Exemple #26
0
def main():

    args = parse_args()

    cfg = Config.fromfile(args.config)
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # if args.aux_cls_weight is not None:
    #     cfg.train_cfg.aux.cls_weight = args.aux_cls_weight
    # if args.aux_reg_weight is not None:
    #     cfg.train_cfg.aux.reg_weight = args.aux_reg_weight

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)

    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #27
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set num_classes
    cfg.model.bbox_head['num_classes'] = len(
        load_classes(cfg.data_root + 'ocr.names')) + 1
    print('num_classes: %d' % (cfg.model.bbox_head['num_classes']))
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    train_dataset = obj_from_dict(cfg.data.train, datasets)
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Exemple #28
0
def copy_synthesised_weights(model,
                             filename,
                             dataset_name='voc',
                             split='65_15'):

    logger = get_root_logger('INFO')

    checkpoint = torch.load(filename, map_location='cpu')
    if isinstance(checkpoint, OrderedDict):
        state_dict = checkpoint
    elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        state_dict = checkpoint['state_dict']
    else:
        raise RuntimeError(
            'No state_dict found in checkpoint file {}'.format(filename))

    if hasattr(model, 'module'):
        own_state = model.module.state_dict()
    else:
        own_state = model.state_dict()

    unseen_class_inds = get_unseen_class_ids(dataset=dataset_name, split=split)
    seen_bg_weights = own_state['bbox_head.fc_cls.weight'][0].data.cpu().numpy(
    ).copy()
    seen_bg_bias = own_state['bbox_head.fc_cls.bias'][0].data.cpu().numpy(
    ).copy()

    own_state['bbox_head.fc_cls.bias'][unseen_class_inds] = state_dict[
        'fc1.bias'][1:]
    own_state['bbox_head.fc_cls.weight'][unseen_class_inds] = state_dict[
        'fc1.weight'][1:]

    alpha1 = 0.35
    alpha2 = 0.65
    own_state['bbox_head.fc_cls.bias'][0] = alpha1 * own_state[
        'bbox_head.fc_cls.bias'][0] + alpha2 * state_dict['fc1.bias'][0]
    own_state['bbox_head.fc_cls.weight'][0] = alpha1 * own_state[
        'bbox_head.fc_cls.weight'][0] + alpha2 * state_dict['fc1.weight'][0]

    logger.info(
        f'{dataset_name} {unseen_class_inds.shape} seenbg: {alpha1} synbg: {alpha2} copied classifier weights from {filename} \n'
    )
    return seen_bg_weights, seen_bg_bias
Exemple #29
0
def main():
    """Start train."""
    args = parse_args()
    if args.config.endswith(".json"):
        load_method = mmcv.load
        mmcv.load = json_to_dict
        cfg = mmcv.Config.fromfile(args.config)
        mmcv.load = load_method
    else:
        cfg = mmcv.Config.fromfile(args.config)
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env, logger and model.
    init_dist('pytorch', **cfg.dist_params)
    logger = get_root_logger(cfg.log_level)
    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(
        model,
        datasets,
        cfg,
        distributed=True,
        logger=logger)
Exemple #30
0
    def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            from mmdet.apis import get_root_logger
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.features.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
                elif isinstance(m, nn.Linear):
                    normal_init(m, std=0.01)
        else:
            raise TypeError('pretrained must be a str or None')

        for m in self.extra.modules():
            if isinstance(m, nn.Conv2d):
                xavier_init(m, distribution='uniform')

        constant_init(self.l2_norm, self.l2_norm.scale)
Exemple #31
0
  def set_configuration( self, cfg_in ):
    cfg = self.get_configuration()
    cfg.merge_config( cfg_in )

    self._config_file = str( cfg.get_value( "config_file" ) )
    self._seed_weights = str( cfg.get_value( "seed_weights" ) )
    self._train_directory = str( cfg.get_value( "train_directory" ) )
    self._output_directory = str( cfg.get_value( "output_directory" ) )
    self._gpu_count = int( cfg.get_value( "gpu_count" ) )
    self._integer_labels = strtobool( cfg.get_value( "integer_labels" ) )
    self._launcher = str( cfg.get_value( "launcher" ) )
    self._validate = strtobool( cfg.get_value( "validate" ) )

    self._training_data = []

    from mmcv import Config
    self._cfg = Config.fromfile( self._config_file )

    if self._cfg.get( 'cudnn_benchmark', False ):
      torch.backends.cudnn.benchmark = True

    if self._train_directory is not None:
      self._cfg.work_dir = self._train_directory
      self._groundtruth_store = os.path.join(
        self._train_directory, self._tmp_annotation_file )
      if not os.path.exists( self._train_directory ):
        os.mkdir( self._train_directory )
    else:
      self._groundtruth_store = self._tmp_annotation_file

    if self._seed_weights is not None:
      self._cfg.resume_from = self._seed_weights

    if self._gpu_count > 0:
      self._cfg.gpus = self._gpu_count
    else:
      self._cfg.gpus = torch.cuda.device_count()

    if self._cfg.checkpoint_config is not None:
      from mmdet import __version__
      self._cfg.checkpoint_config.meta = dict(
        mmdet_version=__version__, config=self._cfg.text )

    if self._launcher == 'none':
      self._distributed = False
    else:
      self._distributed = True
      from mmdet.apis import init_dist
      init_dist( self._launcher, **self._cfg.dist_params )

    from mmdet.apis import get_root_logger
    self._logger = get_root_logger( self._cfg.log_level )
    self._logger.info( 'Distributed training: {}'.format( self._distributed ) )

    if self._random_seed is not "none":
      logger.info( 'Set random seed to {}'.format( self._random_seed ) )
      from mmdet.apis import set_random_seed
      set_random_seed( int( self._random_seed ) )

    from mmdet.models import build_detector

    self._model = build_detector(
      self._cfg.model, train_cfg=self._cfg.train_cfg, test_cfg=self._cfg.test_cfg )