コード例 #1
0
def inference_pytorch(args, cfg, distributed, data_loader):
    """Get predictions by pytorch models."""
    # remove redundant pretrain steps for testing
    turn_off_pretrained(cfg.model)

    # build the model and load checkpoint
    model = build_model(cfg.model,
                        train_cfg=None,
                        test_cfg=cfg.get('test_cfg'))

    if len(cfg.module_hooks) > 0:
        register_module_hooks(model, cfg.module_hooks)

    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')

    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    return outputs
コード例 #2
0
def test_register_module_hooks():
    _module_hooks = [
        dict(type='GPUNormalize',
             hooked_module='backbone',
             hook_pos='forward_pre',
             input_format='NCHW',
             mean=[123.675, 116.28, 103.53],
             std=[58.395, 57.12, 57.375])
    ]

    repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
    config_fpath = osp.join(repo_dpath, 'configs/_base_/models/tsm_r50.py')
    config = mmcv.Config.fromfile(config_fpath)
    config.model['backbone']['pretrained'] = None

    # case 1
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = 'forward_pre'
    recognizer = build_recognizer(config.model)
    handles = register_module_hooks(recognizer, module_hooks)
    assert recognizer.backbone._forward_pre_hooks[
        handles[0].id].__name__ == 'normalize_hook'

    # case 2
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = 'forward'
    recognizer = build_recognizer(config.model)
    handles = register_module_hooks(recognizer, module_hooks)
    assert recognizer.backbone._forward_hooks[
        handles[0].id].__name__ == 'normalize_hook'

    # case 3
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hooked_module'] = 'cls_head'
    module_hooks[0]['hook_pos'] = 'backward'
    recognizer = build_recognizer(config.model)
    handles = register_module_hooks(recognizer, module_hooks)
    assert recognizer.cls_head._backward_hooks[
        handles[0].id].__name__ == 'normalize_hook'

    # case 4
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = '_other_pos'
    recognizer = build_recognizer(config.model)
    with pytest.raises(ValueError):
        handles = register_module_hooks(recognizer, module_hooks)

    # case 5
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hooked_module'] = '_other_module'
    recognizer = build_recognizer(config.model)
    with pytest.raises(ValueError):
        handles = register_module_hooks(recognizer, module_hooks)
コード例 #3
0
def test_register_module_hooks():
    _module_hooks = [
        dict(type='GPUNormalize',
             hook_pos='forward_pre',
             input_format='NCHW',
             mean=[123.675, 116.28, 103.53],
             std=[58.395, 57.12, 57.375])
    ]

    # case 1
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = 'forward_pre'
    resnet = models.resnet50()
    handles = register_module_hooks(resnet, module_hooks)
    assert resnet._forward_pre_hooks[
        handles[0].id].__name__ == 'normalize_hook'

    # case 2
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = 'forward'
    resnet = models.resnet50()
    handles = register_module_hooks(resnet, module_hooks)
    assert resnet._forward_hooks[handles[0].id].__name__ == 'normalize_hook'

    # case 3
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = 'backward'
    resnet = models.resnet50()
    handles = register_module_hooks(resnet, module_hooks)
    assert resnet._backward_hooks[handles[0].id].__name__ == 'normalize_hook'

    # case 4
    module_hooks = copy.deepcopy(_module_hooks)
    module_hooks[0]['hook_pos'] = '_other_pos'
    resnet = models.resnet50()
    with pytest.raises(ValueError):
        handles = register_module_hooks(resnet, module_hooks)
コード例 #4
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    cfg.merge_from_dict(args.cfg_options)

    # Load output_config from cfg
    output_config = cfg.get('output_config', {})
    if args.out:
        # Overwrite output_config from args.out
        output_config = Config._merge_a_into_b(dict(out=args.out),
                                               output_config)

    # Load eval_config from cfg
    eval_config = cfg.get('eval_config', {})
    if args.eval:
        # Overwrite eval_config from args.eval
        eval_config = Config._merge_a_into_b(dict(metrics=args.eval),
                                             eval_config)
    if args.eval_options:
        # Add options from args.eval_options
        eval_config = Config._merge_a_into_b(args.eval_options, eval_config)

    assert output_config or eval_config, \
        ('Please specify at least one operation (save or eval the '
         'results) with the argument "--out" or "--eval"')

    dataset_type = cfg.data.test.type
    if output_config.get('out', None):
        if 'output_format' in output_config:
            # ugly workround to make recognition and localization the same
            warnings.warn(
                'Skip checking `output_format` in localization task.')
        else:
            out = output_config['out']
            # make sure the dirname of the output path exists
            mmcv.mkdir_or_exist(osp.dirname(out))
            _, suffix = osp.splitext(out)
            if dataset_type == 'AVADataset':
                assert suffix[1:] == 'csv', ('For AVADataset, the format of '
                                             'the output file should be csv')
            else:
                assert suffix[1:] in file_handlers, (
                    'The format of the output '
                    'file should be json, pickle or yaml')

    # set cudnn benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.average_clips is not None:
        # You can set average_clips during testing, it will override the
        # original setting
        if cfg.model.get('test_cfg') is None and cfg.get('test_cfg') is None:
            cfg.model.setdefault('test_cfg',
                                 dict(average_clips=args.average_clips))
        else:
            if cfg.model.get('test_cfg') is not None:
                cfg.model.test_cfg.average_clips = args.average_clips
            else:
                cfg.test_cfg.average_clips = args.average_clips

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # The flag is used to register module's hooks
    cfg.setdefault('module_hooks', [])

    # build the dataloader
    dataset = build_dataset(cfg.data.test, dict(test_mode=True))
    dataloader_setting = dict(videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
                              workers_per_gpu=cfg.data.get(
                                  'workers_per_gpu', 1),
                              dist=distributed,
                              shuffle=False)
    dataloader_setting = dict(dataloader_setting,
                              **cfg.data.get('test_dataloader', {}))
    data_loader = build_dataloader(dataset, **dataloader_setting)

    # remove redundant pretrain steps for testing
    turn_off_pretrained(cfg.model)

    # build the model and load checkpoint
    model = build_model(cfg.model,
                        train_cfg=None,
                        test_cfg=cfg.get('test_cfg'))

    if len(cfg.module_hooks) > 0:
        register_module_hooks(model, cfg.module_hooks)

    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')

    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if rank == 0:
        if output_config.get('out', None):
            out = output_config['out']
            print(f'\nwriting results to {out}')
            #import pdb
            #pdb.set_trace()
            print(out[-4:])
            if out[-4:] == 'json':
                result_dict = {}
                for result in outputs:
                    video_name = result['video_name']
                    result_dict[video_name] = result['proposal_list']
                output_dict = {
                    'version': 'VERSION 1.3',
                    'results': result_dict,
                    'external_data': {}
                }
                mmcv.dump(output_dict, out)
            else:
                dataset.dump_results(outputs, **output_config)

        if eval_config:
            eval_res = dataset.evaluate(outputs, **eval_config)
            for name, val in eval_res.items():
                print(f'{name}: {val:.04f}')
コード例 #5
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    cfg.merge_from_dict(args.cfg_options)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority:
    # CLI > config file > default (base filename)
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # The flag is used to determine whether it is omnisource training
    cfg.setdefault('omnisource', False)

    # The flag is used to register module's hooks
    cfg.setdefault('module_hooks', [])

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # dump config
    cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
    # init logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config: {cfg.text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    meta['config_name'] = osp.basename(args.config)
    meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\'))

    model = build_model(cfg.model,
                        train_cfg=cfg.get('train_cfg'),
                        test_cfg=cfg.get('test_cfg'))

    register_module_hooks(model.backbone, cfg.module_hooks)

    if cfg.omnisource:
        # If omnisource flag is set, cfg.data.train should be a list
        assert type(cfg.data.train) is list
        datasets = [build_dataset(dataset) for dataset in cfg.data.train]
    else:
        datasets = [build_dataset(cfg.data.train)]

    if len(cfg.workflow) == 2:
        # For simplicity, omnisource is not compatiable with val workflow,
        # we recommend you to use `--validate`
        assert not cfg.omnisource
        if args.validate:
            warnings.warn('val workflow is duplicated with `--validate`, '
                          'it is recommended to use `--validate`. see '
                          'https://github.com/open-mmlab/mmaction2/pull/123')
        val_dataset = copy.deepcopy(cfg.data.val)
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmaction version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmaction_version=__version__ +
                                          get_git_hash(digits=7),
                                          config=cfg.text)

    train_model(model,
                datasets,
                cfg,
                distributed=distributed,
                validate=args.validate,
                timestamp=timestamp,
                meta=meta)