Exemple #1
0
def main(args):
    assert args.model.endswith('.xml')

    # load config
    cfg = mmcv.Config.fromfile(args.config)
    if args.update_config is not None:
        cfg.merge_from_dict(args.update_config)
    cfg = update_config(cfg, args)
    cfg = propagate_root_dir(cfg, args.data_dir)

    # Load eval_config from cfg
    eval_config = cfg.get('eval_config', {})
    # Overwrite eval_config from args.eval
    eval_config = merge_configs(eval_config, dict(metrics=args.eval))
    # Add options from args.option
    eval_config = merge_configs(eval_config, args.options)

    assert eval_config, 'Please specify at eval operation with the argument "--eval"'

    # build the dataset
    dataset = build_dataset(cfg.data, 'test', dict(test_mode=True))
    assert dataset.num_datasets == 1
    if cfg.get('classes'):
        dataset = dataset.filter(cfg.classes)
    print(f'Test datasets:\n{str(dataset)}')

    # build the dataloader
    data_loader = build_dataloader(
        dataset,
        videos_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=False,
        shuffle=False
    )

    # build class mapping between model.classes and dataset.classes
    assert cfg.get('model_classes') is not None
    model_classes = {k: v for k, v in enumerate(cfg.model_classes)}
    class_map = build_class_map(dataset.class_maps[0], model_classes)

    # load model
    ie_core = load_ie_core()
    model = ActionRecognizer(args.model, ie_core, class_map)

    # collect results
    outputs = collect_results(model, data_loader)

    # get metrics
    if eval_config:
        eval_res = dataset.evaluate(outputs, **eval_config)

        print('\nFinal metrics:')
        for name, val in eval_res.items():
            print(f'{name}: {val:.04f}')
Exemple #2
0
def main():
    # parse arguments
    args = parse_args()

    # load config
    cfg = Config.fromfile(args.config)
    if args.update_config is not None:
        cfg.merge_from_dict(args.update_config)
    cfg = update_config(cfg, args)
    cfg = propagate_root_dir(cfg, args.data_dir)

    # init distributed env first, since logger depends on the dist info.
    distributed = args.launcher != 'none'
    if distributed:
        init_dist(args.launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))

    # init logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()

    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config: {cfg.text}')

    if cfg.get('nncf_config'):
        check_nncf_is_enabled()
        logger.info('NNCF config: {}'.format(cfg.nncf_config))
        meta.update(get_nncf_metadata())

    # set random seeds
    cfg.seed = args.seed
    meta['seed'] = args.seed
    if cfg.get('seed'):
        logger.info(f'Set random seed to {cfg.seed}, deterministic: {args.deterministic}')
        set_random_seed(cfg.seed, deterministic=args.deterministic)

    # build datasets
    datasets = [build_dataset(cfg.data, 'train', dict(logger=logger))]
    logger.info(f'Train datasets:\n{str(datasets[0])}')

    if len(cfg.workflow) == 2:
        if not args.no_validate:
            warnings.warn('val workflow is duplicated with `--validate`, '
                          'it is recommended to use `--validate`. see '
                          'https://github.com/open-mmlab/mmaction2/pull/123')
        datasets.append(build_dataset(copy.deepcopy(cfg.data), 'val', dict(logger=logger)))
        logger.info(f'Val datasets:\n{str(datasets[-1])}')

    # filter dataset labels
    if cfg.get('classes'):
        datasets = [dataset.filter(cfg.classes) for dataset in datasets]

    # build model
    model = build_model(
        cfg.model,
        train_cfg=cfg.train_cfg,
        test_cfg=cfg.test_cfg,
        class_sizes=datasets[0].class_sizes,
        class_maps=datasets[0].class_maps
    )

    # define ignore layers
    ignore_prefixes = []
    if hasattr(cfg, 'reset_layer_prefixes') and isinstance(cfg.reset_layer_prefixes, (list, tuple)):
        ignore_prefixes += cfg.reset_layer_prefixes
    ignore_suffixes = ['num_batches_tracked']
    if hasattr(cfg, 'reset_layer_suffixes') and isinstance(cfg.reset_layer_suffixes, (list, tuple)):
        ignore_suffixes += cfg.reset_layer_suffixes

    # train model
    train_model(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=(not args.no_validate),
        timestamp=timestamp,
        meta=meta,
        ignore_prefixes=tuple(ignore_prefixes),
        ignore_suffixes=tuple(ignore_suffixes)
    )
Exemple #3
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--config',
                        type=str,
                        required=True,
                        help='Test config file path')
    parser.add_argument('--checkpoint',
                        type=str,
                        required=True,
                        help='Checkpoint file')
    parser.add_argument('--data_dir',
                        type=str,
                        required=True,
                        help='The dir with dataset')
    parser.add_argument('--out_dir',
                        type=str,
                        required=True,
                        help='Output directory')
    parser.add_argument('--dataset',
                        type=str,
                        required=True,
                        help='Dataset name')
    parser.add_argument('--gpus',
                        default=1,
                        type=int,
                        help='GPU number used for annotating')
    parser.add_argument('--proc_per_gpu',
                        default=2,
                        type=int,
                        help='Number of processes per GPU')
    parser.add_argument('--mode',
                        choices=['train', 'val', 'test'],
                        default='train')
    args = parser.parse_args()

    assert exists(args.config)
    assert exists(args.checkpoint)
    assert exists(args.data_dir)

    cfg = Config.fromfile(args.config)
    cfg = update_config(cfg, args, trg_name=args.dataset)
    cfg = propagate_root_dir(cfg, args.data_dir)

    dataset = build_dataset(cfg.data, args.mode, dict(test_mode=True))
    data_pipeline = Compose(dataset.pipeline.transforms[1:])
    print('{} dataset:\n'.format(args.mode) + str(dataset))

    tasks = prepare_tasks(dataset, cfg.input_clip_length)
    print('Prepared tasks: {}'.format(sum([len(v) for v in tasks.values()])))

    if not exists(args.out_dir):
        makedirs(args.out_dir)

    model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, strict=False)

    batch_size = 4 * cfg.data.videos_per_gpu
    if args.gpus == 1:
        model = MMDataParallel(model, device_ids=[0])
        model.eval()

        process_tasks(tasks, dataset, model, args.out_dir, batch_size,
                      cfg.input_clip_length, data_pipeline)
    else:
        raise NotImplementedError
Exemple #4
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    if args.update_config is not None:
        cfg.merge_from_dict(args.update_config)
    cfg = update_config(cfg, args)
    cfg = propagate_root_dir(cfg, args.data_dir)

    # Load output_config from cfg
    output_config = cfg.get('output_config', {})
    # Overwrite output_config from args.out
    output_config = merge_configs(output_config, dict(out=args.out))

    # Load eval_config from cfg
    eval_config = cfg.get('eval_config', {})
    # Overwrite eval_config from args.eval
    eval_config = merge_configs(eval_config, dict(metrics=args.eval))
    # Add options from args.option
    eval_config = merge_configs(eval_config, args.options)

    assert output_config or eval_config, \
        ('Please specify at least one operation (save or eval the '
         'results) with the argument "--out" or "--eval"')

    # init distributed env first, since logger depends on the dist info.
    distributed = args.launcher != 'none'
    if distributed:
        init_dist(args.launcher, **cfg.dist_params)

    # get rank
    rank, _ = get_dist_info()

    if cfg.get('seed'):
        print(f'Set random seed to {cfg.seed}')
        set_random_seed(cfg.seed)

    # build the dataset
    dataset = build_dataset(cfg.data, 'test', dict(test_mode=True))
    if cfg.get('classes'):
        dataset = dataset.filter(cfg.classes)
    if rank == 0:
        print(f'Test datasets:\n{str(dataset)}')

    # build the dataloader
    data_loader = build_dataloader(dataset,
                                   videos_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_model(cfg.model,
                        train_cfg=None,
                        test_cfg=cfg.test_cfg,
                        class_sizes=dataset.class_sizes,
                        class_maps=dataset.class_maps)

    # nncf model wrapper
    if is_checkpoint_nncf(args.checkpoint) and not cfg.get('nncf_config'):
        # reading NNCF config from checkpoint
        nncf_part = get_nncf_config_from_meta(args.checkpoint)
        for k, v in nncf_part.items():
            cfg[k] = v

    if cfg.get('nncf_config'):
        check_nncf_is_enabled()
        if not is_checkpoint_nncf(args.checkpoint):
            raise RuntimeError(
                'Trying to make testing with NNCF compression a model snapshot that was NOT trained with NNCF'
            )
        cfg.load_from = args.checkpoint
        cfg.resume_from = None
        if torch.cuda.is_available():
            model = model.cuda()
        _, model = wrap_nncf_model(model, cfg, None, get_fake_input)
    else:
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)
        # load model weights
        load_checkpoint(model,
                        args.checkpoint,
                        map_location='cpu',
                        force_matching=True)
        if args.fuse_conv_bn:
            model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    if rank == 0:
        if output_config:
            out = output_config['out']
            print(f'\nwriting results to {out}')
            dataset.dump_results(outputs, **output_config)

        if eval_config:
            eval_res = dataset.evaluate(outputs, **eval_config)

            print('\nFinal metrics:')
            for name, val in eval_res.items():
                if 'invalid_info' in name:
                    continue

                if isinstance(val, float):
                    print(f'{name}: {val:.04f}')
                elif isinstance(val, str):
                    print(f'{name}:\n{val}')
                else:
                    print(f'{name}: {val}')

            invalid_info = {
                name: val
                for name, val in eval_res.items() if 'invalid_info' in name
            }
            if len(invalid_info) > 0:
                assert args.out_invalid is not None and args.out_invalid != ''
                if os.path.exists(args.out_invalid):
                    shutil.rmtree(args.out_invalid)
                if not os.path.exists(args.out_invalid):
                    os.makedirs(args.out_invalid)

                for name, invalid_record in invalid_info.items():
                    out_invalid_dir = os.path.join(args.out_invalid, name)

                    item_gen = zip(invalid_record['ids'],
                                   invalid_record['conf'],
                                   invalid_record['pred'])
                    for invalid_idx, pred_conf, pred_label in item_gen:
                        record_info = dataset.get_info(invalid_idx)
                        gt_label = record_info['label']

                        if 'filename' in record_info:
                            src_data_path = record_info['filename']

                            in_record_name, record_extension = os.path.basename(
                                src_data_path).split('.')
                            out_record_name = f'{in_record_name}_gt{gt_label}_pred{pred_label}_conf{pred_conf:.3f}'
                            trg_data_path = os.path.join(
                                out_invalid_dir,
                                f'{out_record_name}.{record_extension}')

                            shutil.copyfile(src_data_path, trg_data_path)
                        else:
                            src_data_path = record_info['frame_dir']

                            in_record_name = os.path.basename(src_data_path)
                            out_record_name = f'{in_record_name}_gt{gt_label}_pred{pred_label}_conf{pred_conf:.3f}'
                            trg_data_path = os.path.join(
                                out_invalid_dir, out_record_name)
                            os.makedirs(trg_data_path)

                            start_frame_id = record_info[
                                'clip_start'] + dataset.start_index
                            end_frame_id = record_info[
                                'clip_end'] + dataset.start_index
                            for frame_id in range(start_frame_id,
                                                  end_frame_id):
                                img_name = f'{frame_id:05}.jpg'
                                shutil.copyfile(
                                    os.path.join(src_data_path, img_name),
                                    os.path.join(trg_data_path, img_name))
Exemple #5
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--config', '-c', type=str, required=True)
    parser.add_argument('--checkpoint', '-w', type=str, required=True)
    parser.add_argument('--dataset_name', '-n', type=str, required=True)
    parser.add_argument('--data_dir', '-d', type=str, required=True)
    parser.add_argument('--predictions', '-p', type=str, required=True)
    parser.add_argument('--movements', '-m', type=str, required=True)
    parser.add_argument('--keypoints', '-k', type=str, required=True)
    parser.add_argument('--out_annotation', '-o', type=str, required=True)
    args = parser.parse_args()

    assert exists(args.config)
    assert exists(args.weights)
    assert exists(args.data_dir)
    assert exists(args.predictions)
    assert exists(args.movements)
    assert exists(args.keypoints)
    assert args.dataset_name is not None and args.dataset_name != ''
    assert args.out_annotation is not None and args.out_annotation != ''

    cfg = Config.fromfile(args.config)
    cfg = update_config(cfg, args, trg_name=args.dataset_name)
    cfg = propagate_root_dir(cfg, args.data_dir)

    dataset = build_dataset(cfg.data, 'train', dict(test_mode=True))
    data_pipeline = Compose(dataset.pipeline.transforms[1:])
    print('{} dataset:\n'.format(args.mode) + str(dataset))

    model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, strict=False)
    model = MMDataParallel(model, device_ids=[0])
    model.eval()

    annotation_path = join(args.data_dir, cfg.data.train.sources[0],
                           cfg.data.train.ann_file)
    records = load_annotation(annotation_path)
    predictions = load_distributed_data(args.predictions,
                                        parse_predictions_file, 'txt')
    movements = load_distributed_data(args.movements, parse_movements_file,
                                      'txt')
    hand_kpts = load_distributed_data(args.keypoints, parse_kpts_file, 'json')
    print('Loaded records: {}'.format(len(records)))

    invalid_stat = dict()
    all_candidates = []

    ignore_candidates = get_ignore_candidates(records, IGNORE_LABELS)
    all_candidates += ignore_candidates

    static_candidates, static_invalids = get_regular_candidates(
        records,
        predictions,
        movements,
        hand_kpts,
        cfg.data.output.length,
        False,
        STATIC_LABELS,
        NEGATIVE_LABEL,
        NO_MOTION_LABEL,
        min_score=0.9,
        min_length=4,
        max_distance=1)
    all_candidates += static_candidates
    invalid_stat = update_stat(invalid_stat, static_invalids)
    print('Static candidates: {}'.format(len(static_candidates)))

    if len(invalid_stat) > 0:
        print('Ignored records after static analysis:')
        for ignore_label, ignore_values in invalid_stat.items():
            print('   - {}: {}'.format(ignore_label.replace('_', ' '),
                                       len(ignore_values)))

    dynamic_candidates, dynamic_invalids = get_regular_candidates(
        records,
        predictions,
        movements,
        hand_kpts,
        cfg.data.output.length,
        True,
        DYNAMIC_LABELS,
        NEGATIVE_LABEL,
        NO_MOTION_LABEL,
        min_score=0.9,
        min_length=4,
        max_distance=1)
    all_candidates += dynamic_candidates
    invalid_stat = update_stat(invalid_stat, dynamic_invalids)
    print('Dynamic candidates: {}'.format(len(dynamic_candidates)))

    if len(invalid_stat) > 0:
        print('Ignored records after dynamic analysis:')
        for ignore_label, ignore_values in invalid_stat.items():
            print('   - {}: {}'.format(ignore_label.replace('_', ' '),
                                       len(ignore_values)))

    fixed_records, fix_stat = find_best_match(all_candidates, model, dataset,
                                              NEGATIVE_LABEL)
    invalid_stat = update_stat(invalid_stat, fix_stat)
    print('Final records: {}'.format(len(fixed_records)))

    if len(invalid_stat) > 0:
        print('Final ignored records:')
        for ignore_label, ignore_values in invalid_stat.items():
            print('   - {}: {}'.format(ignore_label.replace('_', ' '),
                                       len(ignore_values)))
            for ignored_record in ignore_values:
                print('      - {}'.format(ignored_record.path))

    dump_records(fixed_records, args.out_annotation)
    print('Fixed annotation has been stored at: {}'.format(
        args.out_annotation))
Exemple #6
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    if args.update_config is not None:
        cfg.merge_from_dict(args.update_config)
    cfg = update_config(cfg, args)
    cfg = propagate_root_dir(cfg, args.data_dir)

    # Load output_config from cfg
    output_config = cfg.get('output_config', {})
    # Overwrite output_config from args.out
    output_config = merge_configs(output_config, dict(out=args.out))

    # Load eval_config from cfg
    eval_config = cfg.get('eval_config', {})
    # Overwrite eval_config from args.eval
    eval_config = merge_configs(eval_config, dict(metrics=args.eval))
    # Add options from args.option
    eval_config = merge_configs(eval_config, args.options)

    assert output_config or eval_config, \
        ('Please specify at least one operation (save or eval the '
         'results) with the argument "--out" or "--eval"')

    # init distributed env first, since logger depends on the dist info.
    distributed = args.launcher != 'none'
    if distributed:
        init_dist(args.launcher, **cfg.dist_params)

    # get rank
    rank, _ = get_dist_info()

    if cfg.get('seed'):
        print(f'Set random seed to {cfg.seed}')
        set_random_seed(cfg.seed)

    # build the dataset
    dataset = build_dataset(cfg.data, 'test', dict(test_mode=True))
    if cfg.get('classes'):
        dataset = dataset.filter(cfg.classes)
    if rank == 0:
        print(f'Test datasets:\n{str(dataset)}')

    # build the dataloader
    data_loader = build_dataloader(
        dataset,
        videos_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False
    )

    # build the model and load checkpoint
    model = build_model(
        cfg.model,
        train_cfg=None,
        test_cfg=cfg.test_cfg,
        class_sizes=dataset.class_sizes,
        class_maps=dataset.class_maps
    )
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    # load model weights
    load_checkpoint(model, args.checkpoint, map_location='cpu', force_matching=True)

    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)

    if rank == 0:
        if output_config:
            out = output_config['out']
            print(f'\nwriting results to {out}')
            dataset.dump_results(outputs, **output_config)

        if eval_config:
            eval_res = dataset.evaluate(outputs, **eval_config)

            print('\nFinal metrics:')
            for name, val in eval_res.items():
                print(f'{name}: {val:.04f}')