Esempio n. 1
0
def test_get_logger_rank0():
    logger = get_logger('rank0.pkg1')
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 1
    assert isinstance(logger.handlers[0], logging.StreamHandler)
    assert logger.handlers[0].level == logging.INFO

    logger = get_logger('rank0.pkg2', log_level=logging.DEBUG)
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 1
    assert logger.handlers[0].level == logging.DEBUG

    # the name can not be used to open the file a second time in windows,
    # so `delete` should be set as `False` and we need to manually remove it
    # more details can be found at https://github.com/open-mmlab/mmcv/pull/1077
    with tempfile.NamedTemporaryFile(delete=False) as f:
        logger = get_logger('rank0.pkg3', log_file=f.name)
        assert isinstance(logger, logging.Logger)
        assert len(logger.handlers) == 2
        assert isinstance(logger.handlers[0], logging.StreamHandler)
        assert isinstance(logger.handlers[1], logging.FileHandler)
        logger_pkg3 = get_logger('rank0.pkg3')
        assert id(logger_pkg3) == id(logger)
        # flushing and closing all handlers in order to remove `f.name`
        logging.shutdown()

    os.remove(f.name)

    logger_pkg3 = get_logger('rank0.pkg3.subpkg')
    assert logger_pkg3.handlers == logger_pkg3.handlers
Esempio n. 2
0
def test_get_logger_rank1():
    logger = get_logger('rank1.pkg1')
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 1
    assert isinstance(logger.handlers[0], logging.StreamHandler)
    assert logger.handlers[0].level == logging.INFO

    with tempfile.NamedTemporaryFile() as f:
        logger = get_logger('rank1.pkg2', log_file=f.name)
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 1
    assert logger.handlers[0].level == logging.INFO
Esempio n. 3
0
def test_get_logger_rank0():
    logger = get_logger('rank0.pkg1')
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 1
    assert isinstance(logger.handlers[0], logging.StreamHandler)
    assert logger.handlers[0].level == logging.INFO

    logger = get_logger('rank0.pkg2', log_level=logging.DEBUG)
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 1
    assert logger.handlers[0].level == logging.DEBUG

    with tempfile.NamedTemporaryFile() as f:
        logger = get_logger('rank0.pkg3', log_file=f.name)
    assert isinstance(logger, logging.Logger)
    assert len(logger.handlers) == 2
    assert isinstance(logger.handlers[0], logging.StreamHandler)
    assert isinstance(logger.handlers[1], logging.FileHandler)

    logger_pkg3 = get_logger('rank0.pkg3')
    assert id(logger_pkg3) == id(logger)

    logger_pkg3 = get_logger('rank0.pkg3.subpkg')
    assert logger_pkg3.handlers == logger_pkg3.handlers
Esempio n. 4
0
def test_print_log_logger(caplog):
    print_log('welcome', logger='mmcv')
    assert caplog.record_tuples[-1] == ('mmcv', logging.INFO, 'welcome')

    print_log('welcome', logger='mmcv', level=logging.ERROR)
    assert caplog.record_tuples[-1] == ('mmcv', logging.ERROR, 'welcome')

    with tempfile.NamedTemporaryFile() as f:
        logger = get_logger('abc', log_file=f.name)
        print_log('welcome', logger=logger)
        assert caplog.record_tuples[-1] == ('abc', logging.INFO, 'welcome')
        with open(f.name, 'r') as fin:
            log_text = fin.read()
            regex_time = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}'
            match = re.fullmatch(regex_time + r' - abc - INFO - welcome\n',
                                 log_text)
            assert match is not None
Esempio n. 5
0
def test_print_log_logger(caplog):
    print_log('welcome', logger='mmcv')
    assert caplog.record_tuples[-1] == ('mmcv', logging.INFO, 'welcome')

    print_log('welcome', logger='mmcv', level=logging.ERROR)
    assert caplog.record_tuples[-1] == ('mmcv', logging.ERROR, 'welcome')

    # the name can not be used to open the file a second time in windows,
    # so `delete` should be set as `False` and we need to manually remove it
    # more details can be found at https://github.com/open-mmlab/mmcv/pull/1077
    with tempfile.NamedTemporaryFile(delete=False) as f:
        logger = get_logger('abc', log_file=f.name)
        print_log('welcome', logger=logger)
        assert caplog.record_tuples[-1] == ('abc', logging.INFO, 'welcome')
        with open(f.name, 'r') as fin:
            log_text = fin.read()
            regex_time = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}'
            match = re.fullmatch(regex_time + r' - abc - INFO - welcome\n',
                                 log_text)
            assert match is not None
        # flushing and closing all handlers in order to remove `f.name`
        logging.shutdown()

    os.remove(f.name)
Esempio n. 6
0
def main():
    args = parse_args()
    # touch the output json if not exist
    with open(args.json_out, 'a+'):
        pass
    # init distributed env first, since logger depends on the dist
    # info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, backend='nccl')
    rank, world_size = get_dist_info()

    logger = get_logger('root')

    # read info of checkpoints and config
    result_dict = dict()
    for model_family_dir in os.listdir(args.model_dir):
        for model in os.listdir(os.path.join(args.model_dir,
                                             model_family_dir)):
            # cpt: rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth
            # cfg: rpn_r50_fpn_1x_coco.py
            cfg = model.split('.')[0][:-18] + '.py'
            cfg_path = os.path.join('configs', model_family_dir, cfg)
            assert os.path.isfile(
                cfg_path), f'{cfg_path} is not valid config path'
            cpt_path = os.path.join(args.model_dir, model_family_dir, model)
            result_dict[cfg_path] = cpt_path
            assert cfg_path in modelzoo_dict, f'please fill the ' \
                                              f'performance of cfg: {cfg_path}'
    cfg = check_finish(result_dict, args.json_out)
    cpt = result_dict[cfg]
    try:
        cfg_name = cfg
        logger.info(f'evaluate {cfg}')
        record = dict(cfg=cfg, cpt=cpt)
        cfg = Config.fromfile(cfg)
        # cfg.data.test.ann_file = 'data/val_0_10.json'
        # set cudnn_benchmark
        if cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True
        cfg.model.pretrained = None
        if cfg.model.get('neck'):
            if isinstance(cfg.model.neck, list):
                for neck_cfg in cfg.model.neck:
                    if neck_cfg.get('rfp_backbone'):
                        if neck_cfg.rfp_backbone.get('pretrained'):
                            neck_cfg.rfp_backbone.pretrained = None
            elif cfg.model.neck.get('rfp_backbone'):
                if cfg.model.neck.rfp_backbone.get('pretrained'):
                    cfg.model.neck.rfp_backbone.pretrained = None

        # in case the test dataset is concatenated
        if isinstance(cfg.data.test, dict):
            cfg.data.test.test_mode = True
        elif isinstance(cfg.data.test, list):
            for ds_cfg in cfg.data.test:
                ds_cfg.test_mode = True

        # build the dataloader
        samples_per_gpu = 2  # hack test with 2 image per gpu
        if samples_per_gpu > 1:
            # Replace 'ImageToTensor' to 'DefaultFormatBundle'
            cfg.data.test.pipeline = replace_ImageToTensor(
                cfg.data.test.pipeline)
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            samples_per_gpu=samples_per_gpu,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=distributed,
            shuffle=False)

        # build the model and load checkpoint
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)

        checkpoint = load_checkpoint(model, cpt, map_location='cpu')
        # old versions did not save class info in checkpoints,
        # this walkaround is for backward compatibility
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = dataset.CLASSES

        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader)
        else:
            model = MMDistributedDataParallel(
                model.cuda(),
                device_ids=[torch.cuda.current_device()],
                broadcast_buffers=False)
            outputs = multi_gpu_test(model, data_loader, 'tmp')
        if rank == 0:
            ref_mAP_dict = modelzoo_dict[cfg_name]
            metrics = list(ref_mAP_dict.keys())
            metrics = [
                m if m != 'AR@1000' else 'proposal_fast' for m in metrics
            ]
            eval_results = dataset.evaluate(outputs, metrics)
            print(eval_results)
            for metric in metrics:
                if metric == 'proposal_fast':
                    ref_metric = modelzoo_dict[cfg_name]['AR@1000']
                    eval_metric = eval_results['AR@1000']
                else:
                    ref_metric = modelzoo_dict[cfg_name][metric]
                    eval_metric = eval_results[f'{metric}_mAP']
                if abs(ref_metric - eval_metric) > 0.003:
                    record['is_normal'] = False
            dump_dict(record, args.json_out)
            check_finish(result_dict, args.json_out)
    except Exception as e:
        logger.error(f'rank: {rank} test fail with error: {e}')
        record['terminate'] = True
        dump_dict(record, args.json_out)
        check_finish(result_dict, args.json_out)
        # hack there to throw some error to prevent hang out
        subprocess.call('xxx')
Esempio n. 7
0
def main():
    args = parse_args()

    assert args.eval or args.show \
        or args.show_dir, \
        ('Please specify at least one operation (eval/show the '
         'results) with the argument "--eval"'
         ', "--show" or "--show-dir"')

    cfg = Config.fromfile(args.config)

    if cfg.get('USE_MMDET', False):
        from mmdet.apis import multi_gpu_test, single_gpu_test
        from mmdet.datasets import build_dataloader
        from mmdet.models import build_detector as build_model
        if 'detector' in cfg.model:
            cfg.model = cfg.model.detector
    elif cfg.get('USE_MMCLS', False):
        from mmtrack.apis import multi_gpu_test, single_gpu_test
        from mmtrack.datasets import build_dataloader
        from mmtrack.models import build_reid as build_model
        if 'reid' in cfg.model:
            cfg.model = cfg.model.reid
    else:
        from mmtrack.apis import multi_gpu_test, single_gpu_test
        from mmtrack.datasets import build_dataloader
        from mmtrack.models import build_model
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    logger = get_logger('SOTParamsSearcher', log_file=args.log)

    # build the model and load checkpoint
    if cfg.get('test_cfg', False):
        model = build_model(cfg.model,
                            train_cfg=cfg.train_cfg,
                            test_cfg=cfg.test_cfg)
    else:
        model = build_model(cfg.model)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    if args.checkpoint is not None:
        checkpoint = load_checkpoint(model,
                                     args.checkpoint,
                                     map_location='cpu')
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
    if not hasattr(model, 'CLASSES'):
        model.CLASSES = dataset.CLASSES

    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)

    if 'meta' in checkpoint and 'hook_msgs' in checkpoint[
            'meta'] and 'best_score' in checkpoint['meta']['hook_msgs']:
        best_score = checkpoint['meta']['hook_msgs']['best_score']
    else:
        best_score = 0

    best_result = dict(success=best_score, norm_precision=0., precision=0.)
    best_params = dict(penalty_k=cfg.model.test_cfg.rpn.penalty_k,
                       lr=cfg.model.test_cfg.rpn.lr,
                       win_influ=cfg.model.test_cfg.rpn.window_influence)
    print_log(f'init best score as: {best_score}', logger)
    print_log(f'init best params as: {best_params}', logger)

    num_cases = len(args.penalty_k_range) * len(args.lr_range) * len(
        args.win_influ_range)
    case_count = 0

    for penalty_k in args.penalty_k_range:
        for lr in args.lr_range:
            for win_influ in args.win_influ_range:
                case_count += 1
                cfg.model.test_cfg.rpn.penalty_k = penalty_k
                cfg.model.test_cfg.rpn.lr = lr
                cfg.model.test_cfg.rpn.window_influence = win_influ
                print_log(f'-----------[{case_count}/{num_cases}]-----------',
                          logger)
                print_log(
                    f'penalty_k={penalty_k} lr={lr} win_influence={win_influ}',
                    logger)

                if not distributed:
                    outputs = single_gpu_test(
                        model,
                        data_loader,
                        args.show,
                        args.show_dir,
                        show_score_thr=args.show_score_thr)
                else:
                    outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                             args.gpu_collect)

                rank, _ = get_dist_info()
                if rank == 0:
                    kwargs = args.eval_options if args.eval_options else {}
                    if args.eval:
                        eval_kwargs = cfg.get('evaluation', {}).copy()
                        # hard-code way to remove EvalHook args
                        eval_hook_args = [
                            'interval', 'tmpdir', 'start', 'gpu_collect',
                            'save_best', 'rule', 'by_epoch'
                        ]
                        for key in eval_hook_args:
                            eval_kwargs.pop(key, None)
                        eval_kwargs.update(dict(metric=args.eval, **kwargs))
                        eval_results = dataset.evaluate(outputs, **eval_kwargs)
                        # print(eval_results)
                        print_log(f'evaluation results: {eval_results}',
                                  logger)
                        print_log('------------------------------------------',
                                  logger)
                        if eval_results['success'] > best_result['success']:
                            best_result = eval_results
                            best_params['penalty_k'] = penalty_k,
                            best_params['lr'] = lr,
                            best_params['win_influ'] = win_influ

                        print_log(
                            f'The current best evaluation results: \
                                {best_result}', logger)
                        print_log(f'The current best params: {best_params}',
                                  logger)

    print_log(
        f'After parameter searching, the best evaluation results: \
            {best_result}', logger)
    print_log(f'After parameter searching, the best params: {best_params}',
              logger)
Esempio n. 8
0
def main():

    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show \
        or args.show_dir, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results / save the results) with the argument "--out", "--eval"'
         ', "--format-only", "--show" or "--show-dir"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = Config.fromfile(args.config)
    if cfg.get('USE_MMDET', False):
        from mmdet.apis import multi_gpu_test, single_gpu_test
        from mmdet.models import build_detector as build_model
        from mmdet.datasets import build_dataloader
    else:
        from mmtrack.apis import multi_gpu_test, single_gpu_test
        from mmtrack.models import build_model
        from mmtrack.datasets import build_dataloader
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # cfg.model.pretrains = None
    if hasattr(cfg.model, 'detector'):
        cfg.model.detector.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    logger = get_logger('ParamsSearcher', log_file=args.log)
    # get all cases
    search_params = get_search_params(cfg.model.tracker, logger=logger)
    combinations = [p for p in product(*search_params.values())]
    search_cfgs = []
    for c in combinations:
        search_cfg = dotty(cfg.model.tracker.copy())
        for i, k in enumerate(search_params.keys()):
            search_cfg[k] = c[i]
        search_cfgs.append(dict(search_cfg))
    print_log(f'Totally {len(search_cfgs)} cases.', logger)
    # init with the first one
    cfg.model.tracker = search_cfgs[0].copy()

    # build the model and load checkpoint
    if cfg.get('test_cfg', False):
        model = build_model(cfg.model,
                            train_cfg=cfg.train_cfg,
                            test_cfg=cfg.test_cfg)
    else:
        model = build_model(cfg.model)
    # We need call `init_weights()` to load pretained weights in MOT task.
    model.init_weights()
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    if args.checkpoint is not None:
        checkpoint = load_checkpoint(model,
                                     args.checkpoint,
                                     map_location='cpu')
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
    if not hasattr(model, 'CLASSES'):
        model.CLASSES = dataset.CLASSES

    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)

    print_log(f'Record {cfg.search_metrics}.', logger)
    for i, search_cfg in enumerate(search_cfgs):
        if not distributed:
            model.module.tracker = build_tracker(search_cfg)
            outputs = single_gpu_test(model, data_loader, args.show,
                                      args.show_dir)
        else:
            model.module.tracker = build_tracker(search_cfg)
            outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                     args.gpu_collect)
        rank, _ = get_dist_info()
        if rank == 0:
            if args.out:
                print(f'\nwriting results to {args.out}')
                mmcv.dump(outputs, args.out)
            kwargs = {} if args.eval_options is None else args.eval_options
            if args.format_only:
                dataset.format_results(outputs, **kwargs)
            if args.eval:
                eval_kwargs = cfg.get('evaluation', {}).copy()
                # hard-code way to remove EvalHook args
                for key in ['interval', 'tmpdir', 'start', 'gpu_collect']:
                    eval_kwargs.pop(key, None)
                eval_kwargs.update(dict(metric=args.eval, **kwargs))
                results = dataset.evaluate(outputs, **eval_kwargs)
                _records = []
                for k in cfg.search_metrics:
                    if isinstance(results[k], float):
                        _records.append(f'{(results[k]):.3f}')
                    else:
                        _records.append(f'{(results[k])}')
                print_log(f'{combinations[i]}: {_records}', logger)