示例#1
0
    def __init__(self, args):
        self._init_cfg(args.config_file)
        # set random seeds
        if args.seed is not None:
            set_random_seed(args.seed, deterministic=args.deterministic)

        self._init_data()
        self._init_model(args.checkpoint_file)

        self._temp_dir = tempfile.TemporaryDirectory()

        if args.out:
            self._result_base_path = pathlib.Path(
                args.out).joinpath("pkl_results")
            print("Results will be saved in: {}".format(self._result_base_path))

        else:
            self._result_base_path = pathlib.Path(str(self._temp_dir.name))

        self._intermediate_res_path = self._result_base_path.joinpath(
            "pkl_results")

        self._intermediate_res_path.mkdir()

        self._init_metrics()
示例#2
0
def test_paconv_regularization_loss():
    from mmdet3d.models.losses import PAConvRegularizationLoss
    from mmdet3d.ops import PAConv, PAConvCUDA
    from mmdet.apis import set_random_seed

    class ToyModel(nn.Module):
        def __init__(self):
            super(ToyModel, self).__init__()

            self.paconvs = nn.ModuleList()
            self.paconvs.append(PAConv(8, 16, 8))
            self.paconvs.append(PAConv(8, 16, 8, kernel_input='identity'))
            self.paconvs.append(PAConvCUDA(8, 16, 8))

            self.conv1 = nn.Conv1d(3, 8, 1)

    set_random_seed(0, True)
    model = ToyModel()

    # reduction shoule be in ['none', 'mean', 'sum']
    with pytest.raises(AssertionError):
        paconv_corr_loss = PAConvRegularizationLoss(reduction='l2')

    paconv_corr_loss = PAConvRegularizationLoss(reduction='mean')
    mean_corr_loss = paconv_corr_loss(model.modules())
    assert mean_corr_loss >= 0
    assert mean_corr_loss.requires_grad

    sum_corr_loss = paconv_corr_loss(model.modules(), reduction_override='sum')
    assert torch.allclose(sum_corr_loss, mean_corr_loss * 3)

    none_corr_loss = paconv_corr_loss(model.modules(),
                                      reduction_override='none')
    assert none_corr_loss.shape[0] == 3
    assert torch.allclose(none_corr_loss.mean(), mean_corr_loss)
示例#3
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    logger.info('MMDetection Version: {}'.format(__version__))
    logger.info('Config: {}'.format(cfg.text))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    # model.CLASSES = ('破洞', '污渍', '三丝', '结头', '花板跳', '百脚', '毛粒', '粗经',
    # '松经', '断经', '吊经', '粗维', '纬缩', '浆斑', '整经结', '星跳', '跳花', '断氨纶', '色差档',
    # '磨痕', '死皱')
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#4
0
def main():
    # 读取命令行的参数
    args = parse_args()
    # 读取配置文件
    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    # 如果命令行中没有设定工作空间,就按照默认的——work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x';如果有输入就更新
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir

    # 如果是在预训练的基础上继续训练,那就更新cfg,否则就按照默认的resume_from = None
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    # 输入的gpu数量来设置
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    # 如果不设置分布式的,那么distributed的值为false
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # 核心调用 build_detector,get_dataset,train_detector

    # train函数的核心 —— 调用build_detector()来创建模型,将config配置文件中的数据加载到建立的模型中去,返回的是对应的网络实例化的对象
    model = build_detector(
        # 获得config文件的model配置数据,train的配置数据,test的配置数据
        cfg.model,
        train_cfg=cfg.train_cfg,
        test_cfg=cfg.test_cfg)

    # 注册数据集,获得cfg中的data字典其中的train字段,也为字典类型
    # 返回是一个dict,有数据集相关的数据和datasets所有的数据集标签。
    train_dataset = get_dataset(cfg.data.train)

    # 开始训练
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
def set_p3_train(cfg,
                 save_file_name,
                 samples_per_gpu=16,
                 wokers_per_gpu=4,
                 max_keep_ckpts=1):
    set_wandb(cfg, 'p32', save_file_name)

    set_save_best_score(cfg)

    # trash/dataset.py 에서 전부 진행
    #     cfg.data.train.classes = classes
    #     cfg.data.train.img_prefix = PREFIX
    #     cfg.data.train.ann_file = PREFIX + 'train.json'
    #     cfg.data.train.pipeline[2]['img_scale'] = (512, 512)

    #     cfg.data.val.classes = classes
    #     cfg.data.val.img_prefix = PREFIX
    #     cfg.data.val.ann_file = PREFIX + 'val.json'
    #     cfg.data.val.pipeline[1]['img_scale'] = (512, 512)

    #     set_num_classes(cfg, num_classes=11)

    cfg.data.samples_per_gpu = samples_per_gpu
    cfg.wokers_per_gpu = wokers_per_gpu

    cfg.seed = 2020
    set_random_seed(2020, True)

    cfg.gpu_ids = [0]
    cfg.work_dir = f'./work_dirs/{save_file_name}'

    cfg.optimizer_config.grad_clip['max_norm'] = 35
    cfg.optimizer_config.grad_clip['norm_type'] = 2
    #  cfg.optimizer_config._delete_=True for trash
    cfg.checkpoint_config = dict(max_keep_ckpts=max_keep_ckpts, interval=1)
示例#6
0
def train(args):
    cfg = Config.fromfile(args.config)
    cfg.data.workers_per_gpu = args.workers_per_gpu
    cfg.data.imgs_per_gpu = args.imgs_per_gpu
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    else:
        args.work_dir = cfg.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
    torch.save(model, args.work_dir + '/' + args.model_name + '.pth')
    return
示例#7
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args

    cfg.work_dir = cfg.work_dir + '_' + time.strftime('Time_%m%d_%H%M%S',
                                                      time.localtime())

    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # log cfg
    logger.info('training config:{}\n'.format(pprint.pformat(cfg._cfg_dict)))

    # log git hash
    logger.info('git hash: {}'.format(get_git_hash()))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          classes=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#8
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    args.resume_from = "/home/ubuntu/code/fengda/MaskTrackRCNN/pretrained_models/epoch_12.pth"
    # network_data = torch.load(args.resume_from)
    load_checkpoint(model, args.resume_from)
    # model.eval()
    # for param in model.parameters():
    #     param.requires_grad = False
    model.load_flow()
    # model.flow_head.train()
    # for param in model.flow_head.parameters():
    #     param.requires_grad = True

    # get dataset
    train_dataset = get_dataset(cfg.data.train)
    print("len of dataset: {}.".format(len(train_dataset)))

    # train
    train_flownet(model,
                  train_dataset,
                  cfg,
                  distributed=distributed,
                  validate=args.validate,
                  logger=logger)
示例#9
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    # 在图片输入尺度固定时开启,可以加速.一般都是关的,只有在固定尺度的网络如SSD512中才开启;
    # ?  cv's model in general input is fixed ?
    # should test;
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        # 创建工作目录存放训练文件,如果不键入,会自动按照py配置文件生成对应的目录
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        # 断点继续训练的权值文件
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # ipdb.set_trace(context=35)
    #  搭建模型
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    # 将训练配置传入
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in checkpoints as meta data
        # 要注意的是,以前发布的模型是不存这个类别等信息的,
        # 用的默认COCO或者VOC参数,所以如果用以前训练好的模型检测时会提醒warning一下,无伤大雅
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)

    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES  # model的CLASSES属性本来没有的,但是python不用提前声明,再赋值的时候自动定义变量
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#10
0
def main():
    # parse arguments
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.load_from is not None:
        cfg.load_from = args.load_from
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.fp16:
        cfg.fp16 = {'loss_scale': 512.}
    if args.workers is not None:
        cfg.data.workers_per_gpu = args.workers
    cfg.gpus = args.gpus
    if args.autoscale_lr:
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8.
    ops.sys_print('Args:\n--', args)
    ops.sys_print('Configs:\n--', cfg)

    # init distributed env, logger and random seeds
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    
    # build model
    model = build_detector(
        cfg.model,
        train_cfg=cfg.train_cfg,
        test_cfg=cfg.test_cfg)
    
    # build dataset
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        cfg.checkpoint_config.meta = {
            'mmdet_version': mmdet.__version__,
            'config': cfg.text,
            'CLASSES': train_dataset.CLASSES}
    model.CLASSES = train_dataset.CLASSES

    # run training
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
示例#11
0
    def set_configuration(self, cfg_in):
        cfg = self.get_configuration()
        cfg.merge_config(cfg_in)

        self._config_file = str(cfg.get_value("config_file"))
        self._seed_weights = str(cfg.get_value("seed_weights"))
        self._train_directory = str(cfg.get_value("train_directory"))
        self._output_directory = str(cfg.get_value("output_directory"))
        self._gpu_count = int(cfg.get_value("gpu_count"))
        self._integer_labels = strtobool(cfg.get_value("integer_labels"))
        self._launcher = str(cfg.get_value("launcher"))
        self._validate = strtobool(cfg.get_value("validate"))

        self._training_data = []

        self._cfg = Config.fromfile(self._config_file)

        if self._cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True

        if self._train_directory is not None:
            self._cfg.work_dir = self._train_directory
            self._groundtruth_store = os.path.join(self._train_directory,
                                                   self._tmp_annotation_file)
            if not os.path.exists(self._train_directory):
                os.mkdir(self._train_directory)
        else:
            self._groundtruth_store = self._tmp_annotation_file

        if self._seed_weights is not None:
            self._cfg.resume_from = self._seed_weights

        if self._gpu_count > 0:
            self._cfg.gpus = self._gpu_count
        else:
            self._cfg.gpus = torch.cuda.device_count()

        if self._cfg.checkpoint_config is not None:
            self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                    config=self._cfg.text)

        if self._launcher == 'none':
            self._distributed = False
        else:
            self._distributed = True
            init_dist(self._launcher, **self._cfg.dist_params)

        self._logger = get_root_logger(self._cfg.log_level)
        self._logger.info('Distributed training: {}'.format(self._distributed))

        if self._random_seed is not "none":
            logger.info('Set random seed to {}'.format(self._random_seed))
            set_random_seed(int(self._random_seed))

        self._model = build_detector(self._cfg.model,
                                     train_cfg=self._cfg.train_cfg,
                                     test_cfg=self._cfg.test_cfg)
示例#12
0
文件: train.py 项目: XDong18/bdd-mtl
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg,
                           global_cfg=cfg)
    train_dataset = [build_dataset(c) for c in cfg.data.train] \
                    if type(cfg.data.train) == list else \
                    build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES
            if hasattr(train_dataset, 'CLASSES') else None)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES if hasattr(train_dataset,
                                                     'CLASSES') else None
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger,
                   multitask=type(train_dataset) == list,
                   vis=True)
示例#13
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)

    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    # print('cfg.data.train:',cfg.data.train)
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    print('model.CLASSES:', model.CLASSES)
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,  #default : False
        validate=args.validate,
        logger=logger)
示例#14
0
文件: train.py 项目: microsoft/AutoML
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    cfg.work_dir = args.work_dir
    cfg.gpus = args.gpus
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    
    train_dataset = build_dataset(cfg.data.train)
        
    model = torch.nn.parallel.DistributedDataParallel(
        model.cuda(), find_unused_parameters=True, device_ids=[args.local_rank], output_device=args.local_rank)
    print(model)
    print("Model have {} paramerters.".format(sum(x.numel() for x in model.parameters()) / 1e6))
    print("Model have {} backbone.".format(sum(x.numel() for x in model.module.backbone.parameters()) / 1e6))
    print("Model have {} neck.".format(sum(x.numel() for x in model.module.neck.parameters()) / 1e6))
    print("Model have {} head.".format(sum(x.numel() for x in model.module.bbox_head.parameters()) / 1e6))
    
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
示例#15
0
def set_test(cfg, cfg_name, samples_per_gpu=1, workers_per_gpu=4):

    cfg.data.samples_per_gpu = 1
    cfg.data.workers_per_gpu = 4

    cfg.seed = 2020
    set_random_seed(2020, True)

    cfg.gpu_ids = [0]
    cfg.work_dir = f'./work_dirs/{cfg_name}'
示例#16
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)

    # 训练完成后做一次评估
    import os.path as osp
    checkpoint = osp.join(cfg.work_dir, 'latest.pth')
    out = osp.join(cfg.work_dir, 'val_cropped_dets.pkl')
    _do_dota_eval(args.config, checkpoint, out)
示例#17
0
def main():
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        import torch.distributed as dist
        dist.init_process_group('gloo', init_method='file:///tmp/somefile', rank=0, world_size=1)
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
示例#18
0
def main():
    import os
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    # get dataset
    # from mmdet.models.decision_net.utils import modify_cfg
    # video_name = '01c783268c'
    # cfg.data.train = modify_cfg(cfg, video_name)
    train_dataset = get_dataset(cfg.data.train)
    val_dataset = get_dataset(cfg.data.val)
    print("len of dataset: {}.".format(len(train_dataset)))

    # train
    train_detector(model, [train_dataset, val_dataset],
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#19
0
    def load_network(self):
        from mmcv import Config
        self._cfg = Config.fromfile(self._train_config)

        if self._cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True

        if self._train_directory is not None:
            self._cfg.work_dir = self._train_directory

        if self._seed_weights is not None:
            self._cfg.load_from = self._seed_weights

        if self._gpu_count is not None and self._gpu_count > 0:
            self._cfg.gpus = self._gpu_count
            flux_factor = self._images_per_gpu * self._gpu_count
            self._cfg.optimizer['lr'] = self._cfg.optimizer['lr'] * flux_factor

        if self._cfg.checkpoint_config is not None:
            from mmdet import __version__
            self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                    config=self._cfg.text)

        if self._launcher == 'none':
            self._distributed = False
        else:
            self._distributed = True
            from mmcv.runner import init_dist
            init_dist(self._launcher, **self._cfg.dist_params)

        from mmdet.apis import get_root_logger
        self._logger = get_root_logger(self._cfg.log_level)
        self._logger.info('Distributed training: {}'.format(self._distributed))

        if self._random_seed is not 'none':
            self._logger.info('Set random seed to {}'.format(
                self._random_seed))
            from mmdet.apis import set_random_seed
            if isinstance(self._random_seed, int):
                set_random_seed(int(self._random_seed))

        from mmdet.models import build_detector

        if self._cfg.model['pretrained'] is not None:
            if not os.path.exists(self._cfg.model['pretrained']):
                dirname = os.path.dirname(self._config_file)
                relpath = os.path.join(dirname, self._cfg.model['pretrained'])
                if os.path.exists(relpath):
                    self._cfg.model['pretrained'] = relpath

        self._model = build_detector(self._cfg.model,
                                     train_cfg=self._cfg.train_cfg,
                                     test_cfg=self._cfg.test_cfg)
示例#20
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg_start_time = time.time()
    cfg = Config.fromfile(args.config)
    cfg_last = time.time() - cfg_start_time
    print('cfg time:', cfg_last)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # set random seeds
    if args.seed is not None:
        set_random_seed(args.seed, deterministic=args.deterministic)

    # build the dataloader
    samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
    dataset_start_time = time.time()
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        samples_per_gpu=samples_per_gpu,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=False,
        shuffle=False)
    dataset_last = time.time() - dataset_start_time
    print('dataset & dataloader time:', dataset_last)

    # build the model and load checkpoint
    model_start_time = time.time()
    model = build_detector(cfg.model, train_cfg=None, test_cfg=None)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.fuse_conv_bn:
        model = fuse_module(model)
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES
    model_time = time.time() - model_start_time
    print('model time:', model_time)

    model = MMDataParallel(model, device_ids=[0])
    single_seg_test(model, data_loader)
示例#21
0
def main():
    # ipdb.set_trace()
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark  在图片输入尺度固定时开启,可以加速
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        # 创建工作目录存放训练文件,如果不键入,会自动按照py配置文件生成对应的目录
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        # 断点继续训练的权值文件
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    # if cfg.checkpoint_config is not None:
    #     # save mmdet version in checkpoints as meta data
    #     cfg.checkpoint_config.meta = dict(
    #         mmdet_version=__version__, config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # 模型的build和inference一样,就不多说了
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    # ipdb.set_trace()
    # 注意传入的是cfg.data.train
    train_dataset = get_dataset(cfg.data.train)

    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
def set_p3_test(cfg):

    cfg.data.test.classes = classes
    cfg.data.test.img_prefix = PREFIX
    cfg.data.test.ann_file = PREFIX + 'test.json'
    cfg.data.test.pipeline[1]['img_scale'] = (512, 512)

    set_num_classes(cfg, num_classes=11)

    cfg.seed = 2020
    set_random_seed(2020, True)

    cfg.gpu_ids = [0]
示例#23
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)

    if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'train'):
        train_dataset2 = get_dataset(cfg.data2.train)
    else:
        train_dataset2 = None
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger,
                   train_dataset2=train_dataset2)
示例#24
0
def main():
    args = parse_args()
    fb_cfg = mmcv_config.fromfile(args.fb_cfg)
    _space = fb_cfg.search_space
    # base = _space['base']
    # depth = _space['depth']
    # space = _space['space']

    model_cfg = mmcv_config.fromfile(args.model_cfg)
    # set cudnn_benchmark
    if model_cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        model_cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        model_cfg.resume_from = args.resume_from
    model_cfg.gpus = args.gpus
    if model_cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        model_cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                config=model_cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **model_cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(model_cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    model = detection(mmcv_config(model_cfg['model_cfg']),
                      mmcv_config(model_cfg['train_cfg']),
                      mmcv_config(model_cfg['test_cfg']), _space,
                      args.theta_txt)
    print(model)
    train_dataset = get_dataset(model_cfg.data.train)
    train_detector(model,
                   train_dataset,
                   model_cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#25
0
def main():
    args = parse_args()  # 解析命令行参数
    cfg = Config.fromfile(args.config)  # 读取配置文件
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir  # checkpoint save path
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from  # checkpoint resume from path
    cfg.gpus = args.gpus  # gpus numbers

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':  # if laucher == none , then distributed == False means no distributed training.
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(
        cfg.data.train
    )  # get dataset   param: train(a dict containing configs) return a specific dataset class
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES  # a tuple containing class names
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#26
0
def set_train(cfg, cfg_name, samples_per_gpu=16, workers_per_gpu=4):

    set_wandb(cfg, 'p32', cfg_name)

    set_save_best_score(cfg)

    cfg.data.samples_per_gpu = samples_per_gpu
    cfg.data.workers_per_gpu = workers_per_gpu

    cfg.seed = 2020
    set_random_seed(2020, True)

    cfg.gpu_ids = [0]
    cfg.work_dir = f'./work_dirs/{cfg_name}'
示例#27
0
def main():
    args = parse_args()
    epoch = int(os.path.basename(args.checkpoint)[6:-4])
    print(f'Epoch [{epoch}]')

    cfg_start_time = time.time()
    cfg = Config.fromfile(args.config)
    cfg_last = time.time() - cfg_start_time
    print('cfg time:', cfg_last)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    cfg.model.pretrained = None
    # cfg.data.test.test_mode = True

    # set random seeds
    if args.seed is not None:
        set_random_seed(args.seed, deterministic=args.deterministic)

    # build the dataloader
    samples_per_gpu = 1
    dataset_start_time = time.time()
    # dataset = build_dataset(cfg.data.test)
    dataset = build_dataset(cfg.data.get(args.split))
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=samples_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=False,
                                   shuffle=False)
    dataset_last = time.time() - dataset_start_time
    print('dataset & dataloader time:', dataset_last)

    # build the model and load checkpoint
    model_start_time = time.time()
    model = build_detector(cfg.model, train_cfg=None, test_cfg=None)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES
    model_time = time.time() - model_start_time
    print('model time:', model_time)

    model = MMDataParallel(model, device_ids=[0])
    seg_test_with_loss(model, data_loader)
示例#28
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__, config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    # 首先要先注册 BACKBONES、 NECKS、 ROI_EXTRACTORS、 HEADS、 DETECTORS、
    # 然后 BACKBONES.register_module(class SSDVGG) @HEADS.register_module(class AnchorHead)
    #     @HEADS.register_module(class SSDHead)   @DETECTORS.register_module(class SingleStageDetector)
    # 最后 build_detector() 相当于SingleStageDetector(**args)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
示例#29
0
def main():

    args = parse_args()

    cfg = Config.fromfile(args.config)
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # if args.aux_cls_weight is not None:
    #     cfg.train_cfg.aux.cls_weight = args.aux_cls_weight
    # if args.aux_reg_weight is not None:
    #     cfg.train_cfg.aux.reg_weight = args.aux_reg_weight

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)

    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
示例#30
0
def test_pointnet2_msg():
    if not torch.cuda.is_available():
        pytest.skip('test requires GPU and torch+cuda')

    set_random_seed(0, True)
    pn2_msg_cfg = _get_segmentor_cfg(
        'pointnet2/pointnet2_msg_16x2_scannet-3d-20class.py')
    pn2_msg_cfg.test_cfg.num_points = 32
    self = build_segmentor(pn2_msg_cfg).cuda()
    points = [torch.rand(1024, 6).float().cuda() for _ in range(2)]
    img_metas = [dict(), dict()]
    gt_masks = [torch.randint(0, 20, (1024, )).long().cuda() for _ in range(2)]

    # test forward_train
    losses = self.forward_train(points, img_metas, gt_masks)
    assert losses['decode.loss_sem_seg'].item() >= 0

    # test loss with ignore_index
    ignore_masks = [torch.ones_like(gt_masks[0]) * 20 for _ in range(2)]
    losses = self.forward_train(points, img_metas, ignore_masks)
    assert losses['decode.loss_sem_seg'].item() == 0

    # test simple_test
    self.eval()
    with torch.no_grad():
        scene_points = [
            torch.randn(500, 6).float().cuda() * 3.0,
            torch.randn(200, 6).float().cuda() * 2.5
        ]
        results = self.simple_test(scene_points, img_metas)
        assert results[0]['semantic_mask'].shape == torch.Size([500])
        assert results[1]['semantic_mask'].shape == torch.Size([200])

    # test aug_test
    with torch.no_grad():
        scene_points = [
            torch.randn(2, 500, 6).float().cuda() * 3.0,
            torch.randn(2, 200, 6).float().cuda() * 2.5
        ]
        img_metas = [[dict(), dict()], [dict(), dict()]]
        results = self.aug_test(scene_points, img_metas)
        assert results[0]['semantic_mask'].shape == torch.Size([500])
        assert results[1]['semantic_mask'].shape == torch.Size([200])
示例#31
0
  def set_configuration( self, cfg_in ):
    cfg = self.get_configuration()
    cfg.merge_config( cfg_in )

    self._config_file = str( cfg.get_value( "config_file" ) )
    self._seed_weights = str( cfg.get_value( "seed_weights" ) )
    self._train_directory = str( cfg.get_value( "train_directory" ) )
    self._output_directory = str( cfg.get_value( "output_directory" ) )
    self._gpu_count = int( cfg.get_value( "gpu_count" ) )
    self._integer_labels = strtobool( cfg.get_value( "integer_labels" ) )
    self._launcher = str( cfg.get_value( "launcher" ) )
    self._validate = strtobool( cfg.get_value( "validate" ) )

    self._training_data = []

    from mmcv import Config
    self._cfg = Config.fromfile( self._config_file )

    if self._cfg.get( 'cudnn_benchmark', False ):
      torch.backends.cudnn.benchmark = True

    if self._train_directory is not None:
      self._cfg.work_dir = self._train_directory
      self._groundtruth_store = os.path.join(
        self._train_directory, self._tmp_annotation_file )
      if not os.path.exists( self._train_directory ):
        os.mkdir( self._train_directory )
    else:
      self._groundtruth_store = self._tmp_annotation_file

    if self._seed_weights is not None:
      self._cfg.resume_from = self._seed_weights

    if self._gpu_count > 0:
      self._cfg.gpus = self._gpu_count
    else:
      self._cfg.gpus = torch.cuda.device_count()

    if self._cfg.checkpoint_config is not None:
      from mmdet import __version__
      self._cfg.checkpoint_config.meta = dict(
        mmdet_version=__version__, config=self._cfg.text )

    if self._launcher == 'none':
      self._distributed = False
    else:
      self._distributed = True
      from mmdet.apis import init_dist
      init_dist( self._launcher, **self._cfg.dist_params )

    from mmdet.apis import get_root_logger
    self._logger = get_root_logger( self._cfg.log_level )
    self._logger.info( 'Distributed training: {}'.format( self._distributed ) )

    if self._random_seed is not "none":
      logger.info( 'Set random seed to {}'.format( self._random_seed ) )
      from mmdet.apis import set_random_seed
      set_random_seed( int( self._random_seed ) )

    from mmdet.models import build_detector

    self._model = build_detector(
      self._cfg.model, train_cfg=self._cfg.train_cfg, test_cfg=self._cfg.test_cfg )