def main(): args = parse_args() assert args.show or args.show_dir, ('Please specify at least one ' 'operation (show the results / save )' 'the results with the argument ' '"--show" or "--show-dir".') cfg = Config.fromfile(args.config) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None distributed = False # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) load_checkpoint(model, args.checkpoint, map_location='cpu') model = MMDataParallel(model, device_ids=[0]) test(model, data_loader, args.show, args.show_dir)
def generate_sample_dataloader(cfg, curr_dir, img_prefix='', ann_file=''): must_keys = ['img_norm_cfg', 'ori_filename', 'img_shape', 'ori_shape'] test_pipeline = cfg.data.test.pipeline for key in must_keys: if test_pipeline[1].type == 'MultiRotateAugOCR': collect_pipeline = test_pipeline[1]['transforms'][-1] else: collect_pipeline = test_pipeline[-1] if 'meta_keys' not in collect_pipeline: continue collect_pipeline['meta_keys'].append(key) img_prefix = osp.join(curr_dir, img_prefix) ann_file = osp.join(curr_dir, ann_file) test = copy.deepcopy(cfg.data.test.datasets[0]) test.img_prefix = img_prefix test.ann_file = ann_file cfg.data.workers_per_gpu = 0 cfg.data.test.datasets = [test] dataset = build_dataset(cfg.data.test) loader_cfg = { **dict((k, cfg.data[k]) for k in [ 'workers_per_gpu', 'samples_per_gpu' ] if k in cfg.data) } test_loader_cfg = { **loader_cfg, **dict(shuffle=False, drop_last=False), **cfg.data.get('test_dataloader', {}) } data_loader = build_dataloader(dataset, **test_loader_cfg) return data_loader
def main(): args = parse_args() # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) if args.device == 'cpu': args.device = None cfg = Config.fromfile(args.model_config) # build the model if args.model_type == 'det': if args.backend == 'TensorRT': model = TensorRTDetector(args.model_file, cfg, 0) else: model = ONNXRuntimeDetector(args.model_file, cfg, 0) else: if args.backend == 'TensorRT': model = TensorRTRecognizer(args.model_file, cfg, 0) else: model = ONNXRuntimeRecognizer(args.model_file, cfg, 0) # build the dataloader samples_per_gpu = 1 cfg = disable_text_recog_aug_test(cfg) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) rank, _ = get_dist_info() if rank == 0: kwargs = {} if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))
def gene_sdmgr_model_dataloader(cfg, dirname, curr_dir, empty_img=False): json_obj = { 'file_name': '1.jpg', 'height': 348, 'width': 348, 'annotations': [{ 'box': [114.0, 19.0, 230.0, 19.0, 230.0, 1.0, 114.0, 1.0], 'text': 'CHOEUN', 'label': 1 }] } ann_file = osp.join(dirname, 'test.txt') list_to_file(ann_file, [json.dumps(json_obj, ensure_ascii=False)]) if not empty_img: img = np.ones((348, 348, 3), dtype=np.uint8) img_file = osp.join(dirname, '1.jpg') mmcv.imwrite(img, img_file) test = copy.deepcopy(cfg.data.test) test.ann_file = ann_file test.img_prefix = dirname test.dict_file = osp.join(curr_dir, 'data/kie_toy_dataset/dict.txt') cfg.data.workers_per_gpu = 1 cfg.data.test = test cfg.model.class_list = osp.join(curr_dir, 'data/kie_toy_dataset/class_list.txt') dataset = build_dataset(cfg.data.test) loader_cfg = { **dict((k, cfg.data[k]) for k in [ 'workers_per_gpu', 'samples_per_gpu' ] if k in cfg.data) } test_loader_cfg = { **loader_cfg, **dict(shuffle=False, drop_last=False), **cfg.data.get('test_dataloader', {}) } data_loader = build_dataloader(dataset, **test_loader_cfg) model = build_model(cfg) return model, data_loader
def main(): args = parse_args() if args.device == 'cpu': args.device = None cfg = Config.fromfile(args.model_config) # build the model if args.model_type == 'det': if args.backend == 'TensorRT': model = TensorRTDetector(args.model_file, cfg, 0) else: model = ONNXRuntimeDetector(args.model_file, cfg, 0) else: if args.backend == 'TensorRT': model = TensorRTRecognizer(args.model_file, cfg, 0) else: model = ONNXRuntimeRecognizer(args.model_file, cfg, 0) # build the dataloader samples_per_gpu = 1 dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) rank, _ = get_dist_info() if rank == 0: kwargs = {} if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))
def main(): args = parse_args() assert ( args.out or args.eval or args.format_only or args.show or args.show_dir), ( 'Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir".') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified.') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if cfg.model.get('pretrained'): cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated samples_per_gpu = 1 if isinstance(cfg.data.test, dict): samples_per_gpu = (cfg.data.get('test_dataloader', {})).get( 'samples_per_gpu', cfg.data.get('samples_per_gpu', 1)) if samples_per_gpu > 1: # Support batch_size > 1 in test for text recognition # by disable MultiRotateAugOCR since it is useless for most case cfg = disable_text_recog_aug_test(cfg) if cfg.data.test.get('pipeline', None) is not None: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test, dict(test_mode=True)) # step 1: give default values and override (if exist) from cfg.data loader_cfg = { **dict(seed=cfg.get('seed'), drop_last=False, dist=distributed), **({} if torch.__version__ != 'parrots' else dict( prefetch_num=2, pin_memory=False, )), **dict((k, cfg.data[k]) for k in [ 'workers_per_gpu', 'seed', 'prefetch_num', 'pin_memory', 'persistent_workers', ] if k in cfg.data) } test_loader_cfg = { **loader_cfg, **dict(shuffle=False, drop_last=False), **cfg.data.get('test_dataloader', {}), **dict(samples_per_gpu=samples_per_gpu) } data_loader = build_dataloader(dataset, **test_loader_cfg) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) model = revert_sync_batchnorm(model) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) is_kie = cfg.model.type in ['SDMGR'] outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, is_kie, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))