Пример #1
0
def collect_env():
    """Collect the information of the running environments."""
    env_info = collect_base_env()
    env_info['MMDetection'] = mmdet.__version__
    env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7]

    return env_info
Пример #2
0
    def MMdet_train(self, config):
        ### only train and cancel validate
        # create work_dir
        # dump config
        config.dump(osp.join(config.work_dir, osp.basename(self.cfg_path)))
        # init the logger before other steps
        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        log_file = osp.join(config.work_dir, f'{timestamp}.log')
        logger = get_root_logger(log_file=log_file, log_level=config.log_level)

        # init the meta dict to record some important information such as
        # environment info and seed, which will be logged
        meta = dict()
        # log env info
        env_info_dict = collect_env()
        env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
        dash_line = '-' * 60 + '\n'
        logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                    dash_line)
        meta['env_info'] = env_info
        meta['config'] = config.pretty_text
        # log some basic info
        logger.info(f'Distributed training: False')
        logger.info(f'Config:\n{config.pretty_text}')
        # set random seeds
        seed = None
        config.seed = seed
        meta['seed'] = seed
        meta['exp_name'] = osp.basename(self.cfg_path)

        model = build_detector(config.model,
                               train_cfg=config.get('train_cfg'),
                               test_cfg=config.get('test_cfg'))

        datasets = [build_dataset(config.data.train)]
        if config.checkpoint_config is not None:
            # save mmdet version, config file content and class names in
            # checkpoints as meta data
            config.checkpoint_config.meta = dict(mmdet_version=__version__ +
                                                 get_git_hash()[:7],
                                                 CLASSES=datasets[0].CLASSES)
        # add an attribute for visualization convenience
        model.CLASSES = datasets[0].CLASSES
        train_detector(model,
                       datasets,
                       config,
                       distributed=None,
                       validate=self.valid,
                       timestamp=timestamp,
                       meta=meta)
Пример #3
0
def collect_env():
    """Collect the information of the running environments."""
    env_info = {}
    env_info['sys.platform'] = sys.platform
    env_info['Python'] = sys.version.replace('\n', '')

    cuda_available = torch.cuda.is_available()
    env_info['CUDA available'] = cuda_available

    if cuda_available:
        from torch.utils.cpp_extension import CUDA_HOME
        env_info['CUDA_HOME'] = CUDA_HOME

        if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
            try:
                nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
                nvcc = subprocess.check_output(f'"{nvcc}" -V | tail -n1',
                                               shell=True)
                nvcc = nvcc.decode('utf-8').strip()
            except subprocess.SubprocessError:
                nvcc = 'Not Available'
            env_info['NVCC'] = nvcc

        devices = defaultdict(list)
        for k in range(torch.cuda.device_count()):
            devices[torch.cuda.get_device_name(k)].append(str(k))
        for name, devids in devices.items():
            env_info['GPU ' + ','.join(devids)] = name

    gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
    gcc = gcc.decode('utf-8').strip()
    env_info['GCC'] = gcc

    env_info['PyTorch'] = torch.__version__
    env_info['PyTorch compiling details'] = get_build_config()

    env_info['TorchVision'] = torchvision.__version__

    env_info['OpenCV'] = cv2.__version__

    env_info['MMCV'] = mmcv.__version__
    env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
    from mmcv.ops import get_compiler_version, get_compiling_cuda_version
    env_info['MMDetection Compiler'] = get_compiler_version()
    env_info['MMDetection CUDA Compiler'] = get_compiling_cuda_version()
    return env_info
Пример #4
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed

    model = build_posenet(cfg.model)
    datasets = [build_dataset(cfg.data.train)]

    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))

    if cfg.checkpoint_config is not None:
        # save mmpose version, config file content
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmpose_version=__version__ + get_git_hash(digits=7),
            config=cfg.pretty_text,
        )
    train_model(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=(not args.no_validate),
        timestamp=timestamp,
        meta=meta)
Пример #5
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    # import modules from string list.
    if cfg.get('custom_imports', None):
        from mmcv.utils import import_modules_from_strings
        import_modules_from_strings(**cfg['custom_imports'])
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
        # re-set gpu_ids with distributed training mode
        _, world_size = get_dist_info()
        cfg.gpu_ids = range(world_size)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info
    meta['config'] = cfg.pretty_text
    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    meta['exp_name'] = osp.basename(args.config)
    # 建立模型
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    # 验证集
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__ +
                                          get_git_hash()[:7],
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=(not args.no_validate),
                   timestamp=timestamp,
                   meta=meta)
Пример #6
0
def collect_env():
    """Collect the information of the running environments."""
    env_info = collect_base_env()
    env_info['MMTracking'] = mmtrack.__version__ + '+' + get_git_hash()[:7]
    return env_info
Пример #7
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    cfg.merge_from_dict(args.cfg_options)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority:
    # CLI > config file > default (base filename)
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # The flag is used to determine whether it is omnisource training
    cfg.setdefault('omnisource', False)

    # The flag is used to register module's hooks
    cfg.setdefault('module_hooks', [])

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config: {cfg.text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    meta['config_name'] = osp.basename(args.config)
    meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\'))

    model = build_model(cfg.model,
                        train_cfg=cfg.get('train_cfg'),
                        test_cfg=cfg.get('test_cfg'))

    register_module_hooks(model.backbone, cfg.module_hooks)

    if cfg.omnisource:
        # If omnisource flag is set, cfg.data.train should be a list
        assert type(cfg.data.train) is list
        datasets = [build_dataset(dataset) for dataset in cfg.data.train]
    else:
        datasets = [build_dataset(cfg.data.train)]

    if len(cfg.workflow) == 2:
        # For simplicity, omnisource is not compatiable with val workflow,
        # we recommend you to use `--validate`
        assert not cfg.omnisource
        if args.validate:
            warnings.warn('val workflow is duplicated with `--validate`, '
                          'it is recommended to use `--validate`. see '
                          'https://github.com/open-mmlab/mmaction2/pull/123')
        val_dataset = copy.deepcopy(cfg.data.val)
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmaction version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmaction_version=__version__ +
                                          get_git_hash(digits=7),
                                          config=cfg.text)

    train_model(model,
                datasets,
                cfg,
                distributed=distributed,
                validate=args.validate,
                timestamp=timestamp,
                meta=meta)
Пример #8
0
def main():
    args=parse_args()
    # print(args)
    cfg=Config.fromfile(args.config)
    # print(cfg)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark',False):
        # print("set cudnn_benchmark")
        torch.backends.cudnn.benchmark=True
    # set work_dir
    if args.work_dir is not None:
        # print("set work_dir")
        cfg.work_dir=args.work_dir
    elif cfg.get('work_dir',None) is not None:
        cfg.work_dir=os.path.join('./work_dirs',os.path.splitext(os.path.basename(args.config))[0])
    # set resume_from
    if args.resume_from is not None:
        cfg.resume_from=args.resume_from
    # set gpu
    if args.gpu_ids is not None:
        cfg.gpu_ids=args.gpu_ids
    else:
        cfg.gpu_ids=range(1) if args.gpus is None else range(args.gpus)
    # set distributed env
    if args.launcher=='none':
        distributed=False
    else:
        # 其实这里默认single gpu,我感觉这里直接设置ids=ids就可以,word_size可以省略
        distributed=True
        # init_dist(args.launcher,**cfg.dist_params)
        _,word_size=get_dist_info()
        #print(word_size)
        cfg.gpu_ids=range(word_size)
    # create work_dir
    mmcv.mkdir_or_exist(os.path.abspath(cfg.work_dir))
    # dump config
    cfg.dump(os.path.join(cfg.work_dir,os.path.basename(args.config)))
    # init logger
    timestamp=time.strftime("%Y%m%d_%H%M%S",time.localtime())
    log_file=os.path.join(cfg.work_dir,f'{timestamp}.log')
    logger=get_root_logger(log_file=log_file,log_level=cfg.log_level)
    # init something tobe logged
    meta=dict()
    env_info_dict=collect_env()
    env_info='\n'.join([(f'{k}:{v}') for k,v in env_info_dict.items()])
    dash_line='-'*60+'\n'
    logger.info('Environment info:\n'+dash_line+env_info+'\n'+dash_line)
    meta['env_info']=env_info
    meta['config']=cfg.pretty_text
    logger.info(f'Distributed training:{distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random sees
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, deterministic:{args.deterministic}')
        set_random_seed(args.seed,deterministic=args.deterministic)
    cfg.seed=args.seed
    meta['seed']=args.seed
    meta['exp_name']=os.path.basename(args.config)

    model=build_model(cfg.model,train_cfg=cfg.get('train_cfg'),test_cfg=cfg.get('test_cfg'))
    datasets=[build_dataset(cfg.data.train)]

    if cfg.checkpoint_config is not None:
        cfg.checkpoint_config.meta=dict(mmdet_version=__version__+get_git_hash()[:7],CLASSES=datasets[0].CLASSES)
    # model.CLASSES=datasets[0].CLASSES
    print(model)
    train_detector(model,datasets,cfg,distributed=distributed,validate=(not args.no_validate),meta=meta,timestamp=timestamp)
Пример #9
0
def main():
    # 命令行解析库获取配置输入
    args = parse_args()

    # 从配置文件路径获取配置信息
    cfg = Config.fromfile(args.config)
    # 可选选项整合
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    # import modules from string list.(从字符串列表导入模块)
    if cfg.get('custom_imports', None):
        from mmcv.utils import import_modules_from_strings
        import_modules_from_strings(**cfg['custom_imports'])
    # set cudnn_benchmark(设定cudnn基准加速训练 链接:https://blog.csdn.net/tuntunmmd/article/details/90229836)
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename(工作目录的优先级:命令行输入>文件片段>文件名)
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None(按命令行输入配置工作目录)
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None(当命令行输入为None时以配置文件名创建工作目录)
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    # 是否断点训练(注意resume_from与load_from的区别)
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    # 配置gpu对象
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    # init distributed env first, since logger depends on the dist info.(鉴于logger对象依赖于分布式环境信息,所以首先初始化分布式环境)
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
        # re-set gpu_ids with distributed training mode(在分布式训练模式下重置gpu分配情况)
        _, world_size = get_dist_info()
        cfg.gpu_ids = range(world_size)

    # create work_dir(创建工作目录)
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config(导出当前训练配置信息到工作目录下)
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init the logger before other steps(初始化looger模块)
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged(初始化meta字典用以记录一些关于环境的重要信息)
    meta = dict()
    # log env info(打印环境信息)
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info
    meta['config'] = cfg.pretty_text
    # log some basic info(打印一些基本的信息)
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds(设置随机种子)
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    meta['exp_name'] = osp.basename(args.config)

    # 基于装饰器模式创建检测器(传入模型配置信息,训练配置与测试配置)
    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    # 创建数据库对象
    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:  # 设置保存权重轮数
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__ + get_git_hash()[:7],
            CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    # 进入训练方法
    train_detector(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=(not args.no_validate),
        timestamp=timestamp,
        meta=meta)
Пример #10
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)

    setup_multi_processes(cfg)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.load_from is not None:
        cfg.load_from = args.load_from
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpus is not None:
        cfg.gpu_ids = range(1)
        warnings.warn('`--gpus` is deprecated because we only support '
                      'single GPU mode in non-distributed training. '
                      'Use `gpus=1` now.')
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids[0:1]
        warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
                      'Because we only support single GPU mode in '
                      'non-distributed training. Use the first GPU '
                      'in `gpu_ids` now.')
    if args.gpus is None and args.gpu_ids is None:
        cfg.gpu_ids = [args.gpu_id]

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
        # re-set gpu_ids with distributed training mode
        _, world_size = get_dist_info()
        cfg.gpu_ids = range(world_size)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info
    meta['config'] = cfg.pretty_text
    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    seed = init_random_seed(args.seed)
    seed = seed + dist.get_rank() if args.diff_seed else seed
    logger.info(f'Set random seed to {seed}, '
                f'deterministic: {args.deterministic}')
    set_random_seed(seed, deterministic=args.deterministic)
    cfg.seed = seed
    meta['seed'] = seed
    meta['exp_name'] = osp.basename(args.config)

    model = build_detector(cfg.model,
                           train_cfg=cfg.get('train_cfg'),
                           test_cfg=cfg.get('test_cfg'))
    model.init_weights()

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        if cfg.data.train.get('pipeline', None) is None:
            if is_2dlist(cfg.data.train.datasets):
                train_pipeline = cfg.data.train.datasets[0][0].pipeline
            else:
                train_pipeline = cfg.data.train.datasets[0].pipeline
        elif is_2dlist(cfg.data.train.pipeline):
            train_pipeline = cfg.data.train.pipeline[0]
        else:
            train_pipeline = cfg.data.train.pipeline

        if val_dataset['type'] in ['ConcatDataset', 'UniformConcatDataset']:
            for dataset in val_dataset['datasets']:
                dataset.pipeline = train_pipeline
        else:
            val_dataset.pipeline = train_pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmocr_version=__version__ +
                                          get_git_hash()[:7],
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=(not args.no_validate),
                   timestamp=timestamp,
                   meta=meta)
Пример #11
0
def collect_env():
    env_info = collect_basic_env()
    env_info['MMAction2'] = (mmaction.__version__ + '+' +
                             get_git_hash(digits=7))
    return env_info
Пример #12
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # work_directory is determined in this priority: CLI > segment in file > filename
    if args.work_directory is not None:
        # update configs according to CLI args if args.work_directory is not None
        cfg.work_directory = args.work_directory
    elif cfg.get('work_directory', None) is None:
        # use config filename as default work_directory if cfg.work_directory is None
        cfg.work_directory = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    # create work_directory
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_directory))
    # dump config
    cfg.dump(osp.join(cfg.work_directory, osp.basename(args.config)))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_directory, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
    # init the meta dict to record some important information such as environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
    meta['env_info'] = env_info
    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # ---------- MI-AOD Training and Test Start Here ---------- #

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    X_L, X_U, X_all, all_anns = get_X_L_0(cfg)

    # # load set and model
    # # Please change it to the timestamp directory which you want to load data from.
    # last_timestamp = '/20201013_154728'
    # # Please change it to the cycle which you want to load data from.
    # load_cycle = 0
    # X_L = np.load(cfg.work_directory + last_timestamp +'/X_L_' + str(load_cycle) + '.npy')
    # X_U = np.load(cfg.work_directory + last_timestamp +'/X_U_' + str(load_cycle) + '.npy')
    # cfg.cycles = list(range(load_cycle, 7))

    cfg.work_directory = cfg.work_directory + '/' + timestamp
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_directory))
    np.save(cfg.work_directory + '/X_L_' + '0' + '.npy', X_L)
    np.save(cfg.work_directory + '/X_U_' + '0' + '.npy', X_U)
    initial_step = cfg.lr_config.step
    for cycle in cfg.cycles:
        # set random seeds
        if args.seed is not None:
            logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
            set_random_seed(args.seed, deterministic=args.deterministic)
        cfg.seed = args.seed
        meta['seed'] = args.seed
        # get the config of the labeled dataset
        cfg = create_X_L_file(cfg, X_L, all_anns, cycle)
        # load model
        model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

        # # Please change it to the epoch which you want to load model at.
        # model_file_name = '/latest.pth'
        # model.load_state_dict(torch.load(cfg.work_directory[:16] + last_timestamp + model_file_name)['state_dict'])

        # load dataset
        datasets = [build_dataset(cfg.data.train)]
        if len(cfg.workflow) == 2:
            val_dataset = copy.deepcopy(cfg.data.val)
            val_dataset.pipeline = cfg.data.train.pipeline
            datasets.append(build_dataset(val_dataset))
        if cfg.checkpoint_config is not None and cycle == 0:
            # save mmdet version, config file content and class names in
            # checkpoints as meta data
            cfg.checkpoint_config.meta = dict(mmdet_version=__version__ + get_git_hash()[:7],
                                              config=cfg.pretty_text, CLASSES=datasets[0].CLASSES)
        model.CLASSES = datasets[0].CLASSES
        for epoch in range(cfg.epoch):
            # Only in the last 3 epoch does the learning rate need to be reduced and the model needs to be evaluated.
            if epoch == cfg.epoch - 1:
                cfg.lr_config.step = initial_step
                cfg.evaluation.interval = cfg.epoch_ratio[0]
            else:
                cfg.lr_config.step = [1000]
                cfg.evaluation.interval = 100

            # ---------- Label Set Training ----------

            if epoch == 0:
                cfg = create_X_L_file(cfg, X_L, all_anns, cycle)
                datasets = [build_dataset(cfg.data.train)]
                losstype.update_vars(0)
                cfg.total_epochs = cfg.epoch_ratio[0]
                cfg_bak = cfg.deepcopy()
                time.sleep(2)
                for name, value in model.named_parameters():
                    value.requires_grad = True
                train_detector(model, datasets, cfg,
                               distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
                cfg = cfg_bak

            # ---------- Re-weighting and Minimizing Instance Uncertainty ----------

            cfg_u = create_X_U_file(cfg.deepcopy(), X_U, all_anns, cycle)
            cfg = create_X_L_file(cfg, X_L, all_anns, cycle)
            datasets_u = [build_dataset(cfg_u.data.train)]
            datasets = [build_dataset(cfg.data.train)]
            losstype.update_vars(1)
            cfg_u.total_epochs = cfg_u.epoch_ratio[1]
            cfg.total_epochs = cfg.epoch_ratio[1]
            cfg_u_bak = cfg_u.deepcopy()
            cfg_bak = cfg.deepcopy()
            time.sleep(2)
            for name, value in model.named_parameters():
                if name in cfg.theta_f_1:
                    value.requires_grad = False
                elif name in cfg.theta_f_2:
                    value.requires_grad = False
                else:
                    value.requires_grad = True
            train_detector(model, [datasets, datasets_u], [cfg, cfg_u],
                           distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
            cfg_u = cfg_u_bak
            cfg = cfg_bak

            # ---------- Re-weighting and Maximizing Instance Uncertainty ----------

            cfg_u = create_X_U_file(cfg.deepcopy(), X_U, all_anns, cycle)
            cfg = create_X_L_file(cfg, X_L, all_anns, cycle)
            datasets_u = [build_dataset(cfg_u.data.train)]
            datasets = [build_dataset(cfg.data.train)]
            losstype.update_vars(2)
            cfg_u.total_epochs = cfg_u.epoch_ratio[1]
            cfg.total_epochs = cfg.epoch_ratio[1]
            cfg_u_bak = cfg_u.deepcopy()
            cfg_bak = cfg.deepcopy()
            time.sleep(2)
            for name, value in model.named_parameters():
                if name in cfg.theta_f_1:
                    value.requires_grad = True
                elif name in cfg.theta_f_2:
                    value.requires_grad = True
                else:
                    value.requires_grad = False
            train_detector(model, [datasets, datasets_u], [cfg, cfg_u],
                           distributed=distributed,validate=(not args.no_validate), timestamp=timestamp, meta=meta)
            cfg_u = cfg_u_bak
            cfg = cfg_bak

            # ---------- Label Set Training ----------

            cfg = create_X_L_file(cfg, X_L, all_anns, cycle)
            datasets = [build_dataset(cfg.data.train)]
            losstype.update_vars(0)
            cfg.total_epochs = cfg.epoch_ratio[0]
            cfg_bak = cfg.deepcopy()
            for name, value in model.named_parameters():
                value.requires_grad = True
            time.sleep(2)
            train_detector(model, datasets, cfg,
                           distributed=distributed, validate=args.no_validate, timestamp=timestamp, meta=meta)
            cfg = cfg_bak

        # ---------- Informative Image Selection ----------

        if cycle != cfg.cycles[-1]:
            # get new labeled data
            dataset_al = build_dataset(cfg.data.test)
            data_loader = build_dataloader(dataset_al, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu,
                                           dist=False, shuffle=False)
            # set random seeds
            if args.seed is not None:
                logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
                set_random_seed(args.seed, deterministic=args.deterministic)
            cfg.seed = args.seed
            meta['seed'] = args.seed
            uncertainty = calculate_uncertainty(cfg, model, data_loader, return_box=False)
            # update labeled set
            X_L, X_U = update_X_L(uncertainty, X_all, X_L, cfg.X_S_size)
            # save set and model
            np.save(cfg.work_directory + '/X_L_' + str(cycle+1) + '.npy', X_L)
            np.save(cfg.work_directory + '/X_U_' + str(cycle+1) + '.npy', X_U)
Пример #13
0
def collect_env():
    env_info = collect_basic_env()
    env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7))
    return env_info
Пример #14
0
def maintrain(args):
    cfg = Config.fromfile(args.config)

    # New add to setup the dataset, no need to change the configuration file
    cfg.dataset_type = 'CocoDataset'
    cfg.data.test.type = 'CocoDataset'
    cfg.data.test.data_root = args.data_root
    cfg.data.test.ann_file = args.data_root + 'annotations_val20new.json'  #'annotations_valallnew.json'
    cfg.data.test.img_prefix = ''

    cfg.data.train.type = 'CocoDataset'
    cfg.data.train.data_root = args.data_root
    cfg.data.train.ann_file = args.data_root + 'annotations_train20new.json'  #'annotations_trainallnew.json'
    cfg.data.train.img_prefix = ''

    cfg.data.val.type = 'CocoDataset'
    cfg.data.val.data_root = args.data_root
    cfg.data.val.ann_file = args.data_root + 'annotations_val20new.json'  #'annotations_valallnew.json'
    cfg.data.val.img_prefix = ''

    #batch size=2, workers=0, eta: 1 day, 5:56:54,  memory: 5684
    cfg.data.samples_per_gpu = 4  #batch size
    cfg.data.workers_per_gpu = 4
    #eta: 1 day, 6:17:04, memory: 10234

    # modify num classes of the model in box head
    cfg.model.roi_head.bbox_head.num_classes = len(args.classes)  # 4

    # import modules from string list.
    if cfg.get('custom_imports', None):  #not used
        from mmcv.utils import import_modules_from_strings
        import_modules_from_strings(**cfg['custom_imports'])
    # set cudnn_benchmark, benchmark mode is good whenever your input sizes for your network do not vary. This way, cudnn will look for the optimal set of algorithms for that particular configuration (which takes some time). This usually leads to faster runtime.
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.workdir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.workdir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))

    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    cfg.load_from = args.checkpoint
    if args.resumefrom is not None:
        cfg.resume_from = args.resumefrom
    if args.gpuids is not None:
        cfg.gpu_ids = args.gpuids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info
    meta['config'] = cfg.pretty_text
    # log some basic info
    distributed = False
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:  #not used
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    meta['exp_name'] = osp.basename(args.config)

    model = build_detector(cfg.model,
                           train_cfg=cfg.get('train_cfg'),
                           test_cfg=cfg.get('test_cfg'))

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__ +
                                          get_git_hash()[:7],
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = args.classes  #datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=(not args.novalidate),
                   timestamp=timestamp,
                   meta=meta)
Пример #15
0
def collect_env():
    """Collect the information of the running environments."""
    env_info = collect_base_env()
    env_info['MMClassification'] = mmcls.__version__ + '+' + get_git_hash()[:7]
    return env_info
Пример #16
0
    def Train(self):
        self.setup()

        # create work_dir
        mmcv.mkdir_or_exist(
            osp.abspath(self.system_dict["local"]["cfg"].work_dir))
        # dump config
        self.system_dict["local"]["cfg"].dump(
            osp.join(self.system_dict["local"]["cfg"].work_dir,
                     osp.basename(self.system_dict["params"]["config"])))
        # init the logger before other steps
        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        log_file = osp.join(self.system_dict["local"]["cfg"].work_dir,
                            f'{timestamp}.log')
        logger = get_root_logger(
            log_file=log_file,
            log_level=self.system_dict["local"]["cfg"].log_level)

        # init the meta dict to record some important information such as
        # environment info and seed, which will be logged
        meta = dict()
        # log env info
        env_info_dict = collect_env()
        env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
        dash_line = '-' * 60 + '\n'
        logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                    dash_line)
        meta['env_info'] = env_info

        # log some basic info
        logger.info(
            f'Distributed training: {self.system_dict["local"]["distributed"]}'
        )
        logger.info(f'Config:\n{self.system_dict["local"]["cfg"].pretty_text}')

        # set random seeds
        if self.system_dict["params"]["seed"] is not None:
            logger.info(
                f'Set random seed to {self.system_dict["params"]["seed"]}, '
                f'deterministic: {args.deterministic}')
            set_random_seed(
                self.system_dict["params"]["seed"],
                deterministic=self.system_dict["params"]["deterministic"])
        self.system_dict["local"]["cfg"].seed = self.system_dict["params"][
            "seed"]
        meta['seed'] = self.system_dict["params"]["seed"]

        model = build_detector(
            self.system_dict["local"]["cfg"].model,
            train_cfg=self.system_dict["local"]["cfg"].train_cfg,
            test_cfg=self.system_dict["local"]["cfg"].test_cfg)

        datasets = [build_dataset(self.system_dict["local"]["cfg"].data.train)]

        if len(self.system_dict["local"]["cfg"].workflow) == 2:
            val_dataset = copy.deepcopy(
                self.system_dict["local"]["cfg"].data.val)
            val_dataset.pipeline = self.system_dict["local"][
                "cfg"].data.train.pipeline
            datasets.append(build_dataset(val_dataset))

        if self.system_dict["local"]["cfg"].checkpoint_config is not None:
            # save mmdet version, config file content and class names in
            # checkpoints as meta data
            self.system_dict["local"]["cfg"].checkpoint_config.meta = dict(
                mmdet_version=__version__ + get_git_hash()[:7],
                config=self.system_dict["local"]["cfg"].pretty_text,
                CLASSES=datasets[0].CLASSES)

        # add an attribute for visualization convenience
        model.CLASSES = datasets[0].CLASSES
        print("Classes to be trained: {}".format(model.CLASSES))

        train_detector(
            model,
            datasets,
            self.system_dict["local"]["cfg"],
            distributed=self.system_dict["local"]["distributed"],
            validate=(not self.system_dict["params"]["no_validate"]),
            timestamp=timestamp,
            meta=meta)