def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if not self._should_evaluate(runner): return tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect, not_encode_mask=self.not_encode_mask) if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if self.save_best: self._save_ckpt(runner, key_score)
def after_train_epoch(self, runner): if not self.evaluation_flag(runner): return from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') results = multi_gpu_test( runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') key_score = self.evaluate(runner, results) if self.save_best: best_score = runner.meta['hook_msgs'].get( 'best_score', self.init_value_map[self.rule]) if self.compare_func(key_score, best_score): best_score = key_score runner.meta['hook_msgs']['best_score'] = best_score last_ckpt = runner.meta['hook_msgs']['last_ckpt'] runner.meta['hook_msgs']['best_ckpt'] = last_ckpt mmcv.symlink( last_ckpt, osp.join(runner.work_dir, f'best_{self.key_indicator}.pth')) self.logger.info( f'Now best checkpoint is {last_ckpt}.' f'Best {self.key_indicator} is {best_score:0.4f}')
def before_run(self, runner): from mmdet.apis import multi_gpu_test results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') self.evaluate(runner, results)
def after_train_epoch(self, runner): if not self.every_n_epochs(runner, self.interval): return from mmdet.apis import multi_gpu_test results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') self.evaluate(runner, results)
def after_train_epoch(self, runner): if not self.evaluation_flag(runner): return from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') self.evaluate(runner, results)
def after_train_epoch(self, runner): # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if not self.evaluation_flag(runner): return from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') key_score = self.evaluate(runner, results) if self.save_best: best_score = runner.meta['hook_msgs'].get( 'best_score', self.init_value_map[self.rule]) if self.compare_func(key_score, best_score): best_score = key_score runner.meta['hook_msgs']['best_score'] = best_score last_ckpt = runner.meta['hook_msgs']['last_ckpt'] runner.meta['hook_msgs']['best_ckpt'] = last_ckpt mmcv.symlink( last_ckpt, osp.join(runner.work_dir, f'best_{self.key_indicator}.pth')) self.logger.info( f'Now best checkpoint is {last_ckpt}.' f'Best {self.key_indicator} is {best_score:0.4f}')
def after_train_iter(self, runner): if self.by_epoch or not self.every_n_iters(runner, self.interval): return if self.broadcast_bn_buffer: self._broadcast_bn_buffer(runner) from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') key_score = self.evaluate(runner, results) if self.save_best: self.save_best_checkpoint(runner, key_score)
def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if not self._should_evaluate(runner): return tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test # Changed results to self.results so that MMDetWandbHook can access # the evaluation results and log them to wandb. results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) self.latest_results = results if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip # the action to save the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if cfg.get('USE_MMDET', False): from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.datasets import build_dataloader from mmdet.models import build_detector as build_model if 'detector' in cfg.model: cfg.model = cfg.model.detector elif cfg.get('TRAIN_REID', False): from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.datasets import build_dataloader from mmtrack.models import build_reid as build_model if 'reid' in cfg.model: cfg.model = cfg.model.reid else: from mmtrack.apis import multi_gpu_test, single_gpu_test from mmtrack.datasets import build_dataloader from mmtrack.models import build_model if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # cfg.model.pretrains = None if hasattr(cfg.model, 'detector'): cfg.model.detector.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint if cfg.get('test_cfg', False): model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) else: model = build_model(cfg.model) # We need call `init_weights()` to load pretained weights in MOT task. model.init_weights() fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) if args.checkpoint is not None: checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] if not hasattr(model, 'CLASSES'): model.CLASSES = dataset.CLASSES if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, show_score_thr=args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args eval_hook_args = [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'by_epoch' ] for key in eval_hook_args: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))
def main(): args = parse_args() assert args.out or args.show or args.show_dir, \ ('Please specify at least one operation (save or show the results) ' 'with the argument "--out", "--show" or "show-dir"') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True if args.workers == 0: args.workers = cfg.data.workers_per_gpu # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # set random seeds if args.seed is not None: set_random_seed(args.seed) if 'all' in args.corruptions: corruptions = [ 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate' ] elif 'benchmark' in args.corruptions: corruptions = [ 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' ] elif 'noise' in args.corruptions: corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise'] elif 'blur' in args.corruptions: corruptions = [ 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur' ] elif 'weather' in args.corruptions: corruptions = ['snow', 'frost', 'fog', 'brightness'] elif 'digital' in args.corruptions: corruptions = [ 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' ] elif 'holdout' in args.corruptions: corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate'] elif 'None' in args.corruptions: corruptions = ['None'] args.severities = [0] else: corruptions = args.corruptions rank, _ = get_dist_info() aggregated_results = {} for corr_i, corruption in enumerate(corruptions): aggregated_results[corruption] = {} for sev_i, corruption_severity in enumerate(args.severities): # evaluate severity 0 (= no corruption) only once if corr_i > 0 and corruption_severity == 0: aggregated_results[corruption][0] = \ aggregated_results[corruptions[0]][0] continue test_data_cfg = copy.deepcopy(cfg.data.test) # assign corruption and severity if corruption_severity > 0: corruption_trans = dict(type='Corrupt', corruption=corruption, severity=corruption_severity) # TODO: hard coded "1", we assume that the first step is # loading images, which needs to be fixed in the future test_data_cfg['pipeline'].insert(1, corruption_trans) # print info print(f'\nTesting {corruption} at severity {corruption_severity}') # build the dataloader # TODO: support multiple images per gpu # (only minor changes are needed) dataset = build_dataset(test_data_cfg) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=args.workers, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, # this walkaround is for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) show_dir = args.show_dir if show_dir is not None: show_dir = osp.join(show_dir, corruption) show_dir = osp.join(show_dir, str(corruption_severity)) if not osp.exists(show_dir): osp.makedirs(show_dir) outputs = single_gpu_test(model, data_loader, args.show, show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir) if args.out and rank == 0: eval_results_filename = (osp.splitext(args.out)[0] + '_results' + osp.splitext(args.out)[1]) mmcv.dump(outputs, args.out) eval_types = args.eval if cfg.dataset_type == 'VOCDataset': if eval_types: for eval_type in eval_types: if eval_type == 'bbox': test_dataset = mmcv.runner.obj_from_dict( cfg.data.test, datasets) logger = 'print' if args.summaries else None mean_ap, eval_results = \ voc_eval_with_return( args.out, test_dataset, args.iou_thr, logger) aggregated_results[corruption][ corruption_severity] = eval_results else: print('\nOnly "bbox" evaluation \ is supported for pascal voc') else: if eval_types: print(f'Starting evaluate {" and ".join(eval_types)}') if eval_types == ['proposal_fast']: result_file = args.out else: if not isinstance(outputs[0], dict): result_files = dataset.results2json( outputs, args.out) else: for name in outputs[0]: print(f'\nEvaluating {name}') outputs_ = [out[name] for out in outputs] result_file = args.out + f'.{name}' result_files = dataset.results2json( outputs_, result_file) eval_results = coco_eval_with_return( result_files, eval_types, dataset.coco) aggregated_results[corruption][ corruption_severity] = eval_results else: print('\nNo task was selected for evaluation;' '\nUse --eval to select a task') # save results after each evaluation mmcv.dump(aggregated_results, eval_results_filename) if rank == 0: # print final results print('\nAggregated results:') prints = args.final_prints aggregate = args.final_prints_aggregate if cfg.dataset_type == 'VOCDataset': get_results(eval_results_filename, dataset='voc', prints=prints, aggregate=aggregate) else: get_results(eval_results_filename, dataset='coco', prints=prints, aggregate=aggregate)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES # 计算需要剪枝的变量个数total model.cuda() total = 0 for m in model.backbone.modules(): if isinstance(m, nn.BatchNorm2d): total += m.weight.data.shape[0] # 确定剪枝的全局阈值 bn = torch.zeros(total) index = 0 for m in model.backbone.modules(): if isinstance(m, nn.BatchNorm2d): size = m.weight.data.shape[0] bn[index:(index + size)] = m.weight.data.abs().clone() index += size # 按照权值大小排序 y, i = torch.sort(bn) thre_index = int(total * args.percent) # 确定要剪枝的阈值 thre = y[thre_index].cuda() # ********************************预剪枝*********************************# pruned = 0 cfg_ori = [] cfg = [] cfg_mask = [] model_backbone = list(model.backbone.modules()) for layer_id, m in enumerate(model_backbone): if isinstance(m, nn.BatchNorm2d): weight_copy = m.weight.data.abs().clone() if isinstance(model_backbone[layer_id + 1], channel_selection): mask = torch.ones(weight_copy.shape[0]).cuda() else: # 要保留的通道标记Mask图 mask = weight_copy.gt(thre).float().cuda() # 要保留的通道标记Mask图 pruned = pruned + mask.shape[0] - torch.sum(mask) # m.weight.data.mul_(mask) # m.bias.data.mul_(mask) cfg.append(int(torch.sum(mask))) cfg_ori.append(mask.shape[0]) cfg_mask.append(mask.clone()) print( 'layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}' .format(layer_id, mask.shape[0], int(torch.sum(mask)))) pruned_ratio = pruned / total print("剪枝比例:") print(pruned_ratio) print('Pre-processing Successful!') print('cfg:') print(cfg) # ******************************* 正式剪枝 ********************************# # 每个阶的最一层不剪枝 newmodel = copy.deepcopy(model) newmodel.backbone = PResNet(depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', cfg=cfg) newmodel.cuda() # print(newmodel.backbone) num_parameters = sum([param.nelement() for param in newmodel.parameters()]) savepath = os.path.join(args.save_path, "prune.txt") # with open(savepath, "w") as fp: # fp.write("Configuration: \n" + str(cfg) + "\n") # fp.write("Number of parameters: \n" + str(num_parameters) + "\n") # fp.write("Test accuracy: \n" + str(acc)) old_modules = list(model.backbone.modules()) new_modules = list(newmodel.backbone.modules()) layer_id_in_cfg = 0 start_mask = torch.ones(3) end_mask = cfg_mask[layer_id_in_cfg] conv_count = 0 # downsample_conv_list = [17, 48, 88, 299] downsample_conv_list = [17, 49, 90, 302] for layer_id, m0 in enumerate(old_modules): # m0 = old_modules[layer_id] # print('m0:') # print(m0) m1 = new_modules[layer_id] # print('m1:') # print(m1) if isinstance(m0, nn.BatchNorm2d): idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy()))) if idx1.size == 1: idx1 = np.resize(idx1, (1, )) if isinstance(old_modules[layer_id + 1], channel_selection): # If the next layer is the channel selection layer, then the current batchnorm 2d layer won't be pruned. m1.weight.data = m0.weight.data.clone() m1.bias.data = m0.bias.data.clone() m1.running_mean = m0.running_mean.clone() m1.running_var = m0.running_var.clone() # We need to set the channel selection layer. m2 = new_modules[layer_id + 1] m2.indexes.data.zero_() m2.indexes.data[idx1.tolist()] = 1.0 layer_id_in_cfg += 1 start_mask = end_mask.clone() if layer_id_in_cfg < len(cfg_mask): end_mask = cfg_mask[layer_id_in_cfg] else: m1.weight.data = m0.weight.data[idx1.tolist()].clone() m1.bias.data = m0.bias.data[idx1.tolist()].clone() m1.running_mean = m0.running_mean[idx1.tolist()].clone() m1.running_var = m0.running_var[idx1.tolist()].clone() layer_id_in_cfg += 1 start_mask = end_mask.clone() if layer_id_in_cfg < len( cfg_mask): # do not change in Final FC end_mask = cfg_mask[layer_id_in_cfg] elif isinstance(m0, nn.Conv2d): if conv_count == 0: m1.weight.data = m0.weight.data.clone() conv_count += 1 continue if layer_id in downsample_conv_list: # We need to consider the case where there are downsampling convolutions. # For these convolutions, we just copy the weights. m1.weight.data = m0.weight.data.clone() continue if isinstance(old_modules[layer_id + 1], nn.BatchNorm2d): # This convers the convolutions in the residual block. # The convolutions are either after the channel selection layer or after the batch normalization layer. idx0 = np.squeeze( np.argwhere(np.asarray(start_mask.cpu().numpy()))) idx1 = np.squeeze( np.argwhere(np.asarray(end_mask.cpu().numpy()))) print('In shape: {:d}, Out shape {:d}.'.format( idx0.size, idx1.size)) if idx0.size == 1: idx0 = np.resize(idx0, (1, )) if idx1.size == 1: idx1 = np.resize(idx1, (1, )) w1 = m0.weight.data[:, idx0.tolist(), :, :].clone() # If the current convolution is not the last convolution in the residual block, then we can change the # number of output channels. Currently we use `conv_count` to detect whether it is such convolution. # if conv_count % 3 != 0: w1 = w1[idx1.tolist(), :, :, :].clone() m1.weight.data = w1.clone() conv_count += 1 continue elif isinstance(m0, nn.Linear): idx0 = np.squeeze(np.argwhere(np.asarray( start_mask.cpu().numpy()))) if idx0.size == 1: idx0 = np.resize(idx0, (1, )) m1.weight.data = m0.weight.data[:, idx0].clone() m1.bias.data = m0.bias.data.clone() # torch.save({'cfg': cfg, 'state_dict': newmodel.state_dict()}, os.path.join(args.save_path, 'pruned.pth.tar')) # torch.save(newmodel.state_dict(), os.path.join(args.save_path, 'pruned.pth')) print(newmodel) torch.save(newmodel, os.path.join(args.save_path, 'pruned.pth')) # print(newmodel) if not distributed: newmodel = MMDataParallel(newmodel, device_ids=[0]) newmodel = single_gpu_test(newmodel, data_loader, args.show) else: newmodel = MMDistributedDataParallel( newmodel.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(newmodel, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') # config 파일 읽기 cfg = Config.fromfile(args.config) # 코드 돌릴 때 --cfg-options 설정하면 기존 config 파일에 내용 합치는 코드 if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): # cfg.data.test가 dict 타입이면 실행 cfg.data.test.test_mode = True # coco_detection.py 랑 비슷한 form에 test_mode를 설정해줄 수 있나봄 elif isinstance(cfg.data.test, list): # cfg.data.test가 list 타입이면 실행 for ds_cfg in cfg.data.test: ds_cfg.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) ######################################################################################## """ 여기에서 cctv 영상을 이미지로 변환해서 폴더에 넣고 cfg.data.test 부분에 폴더 경로를 추가""" video = mmcv.VideoReader("/home/minjae/mjseong/mmdetection/data/test.avi") print(len(video)) # get the total frame number print(video.width, video.height, video.resolution, video.fps) video.cvt2frames( "/home/minjae/mjseong/mmdetection/data/KRRI_Video_cvt2frames", start=0, max_num=10000) cfg.data.test.img_prefix = "data_root" + "/home/minjae/mjseong/mmdetection/data/KRRI_Video_cvt2frames/" # cfg.data.test.ann_file = ######################################################################################## dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in ['interval', 'tmpdir', 'start', 'gpu_collect']: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs)) ######################################################################################## """여기에서 box쳐진 test 결과의 이미지를 다시 video로 변환 시켜주기""" mmcv.frames2video( "/home/minjae/mjseong/mmdetection/data/KRRI_Video_cvt2frames", "/home/minjae/mjseong/mmdetection/data/test.avi")
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.update_config: cfg.merge_from_dict(args.update_config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) # nncf model wrapper if cfg.get('nncf_config'): check_nncf_is_enabled() if not is_checkpoint_nncf(args.checkpoint): raise RuntimeError( 'Trying to make testing with NNCF compression a model snapshot that was NOT trained with NNCF' ) cfg.load_from = args.checkpoint cfg.resume_from = None if torch.cuda.is_available(): model = model.cuda() _, model = wrap_nncf_model(model, cfg, None, get_fake_input) checkpoint = torch.load(args.checkpoint, map_location=None) else: fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: # TODO: FIXME: should it be inside this 'else' branch??? from tools.fuse_conv_bn import fuse_module model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if torch.cuda.is_available(): if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) else: model = MMDataCPU(model) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = cfg.get('evaluation', {}) kwargs.pop('interval', None) kwargs.pop('gpu_collect', None) kwargs.update({} if args.options is None else args.options) kwargs['metric'] = args.eval if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, **kwargs)
def main(): args = parse_args() print('#'*100) print(args) print('#'*100) assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) ###################################################################### print(cfg.pretty_text) ####################################################################### # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES ############################################################ if args.eval_options \ and 'load_results' in args.eval_options.keys() \ and args.eval_options['load_results']: import pickle as pkl with open(str(args.out), 'rb') as f: outputs = pkl.load(f) else: if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) ############################################################ rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in ['interval', 'tmpdir', 'start', 'gpu_collect']: eval_kwargs.pop(key, None) ###################################### # from mmdet.utils import collect_env, get_root_logger # import os.path as osp # import time # timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) # log_file = osp.join(cfg.work_dir, f'{timestamp}.log') # logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # eval_kwargs['logger'] = True ################### 很多信息从kwargs,也就是eval_options中得到################# work_dir = os.path.split(str(args.out))[0] if args.eval_options and 'eval_results_path' in args.eval_options.keys(): eval_results_path = os.path.split(args.eval_options['eval_results_path'])[1] eval_results_path = os.path.join(work_dir, eval_results_path) else: eval_results_path = os.path.join(work_dir, 'eval_results.txt') print('#'*80, '\n', 'EVALUATE ReSULTS PATH: %s\n' % eval_results_path, '#'*80, '\n') kwargs = {} # eval_kwargs['classwise'] = True # eval_kwargs['proposal_nums'] = (100, 300, 1000) eval_kwargs.update(dict(metric=args.eval, **kwargs)) s = str(dataset.evaluate(outputs, **eval_kwargs)) with open(eval_results_path, 'wt+') as f: f.write(str(s)) print(s)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None # in case the test dataset is concatenated samples_per_gpu = 1 if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))
def main(): args = parse_args() # assert args.out or args.eval or args.format_only or args.show \ # or args.show_dir, \ # ('Please specify at least one operation (save/eval/format/show the ' # 'results / save the results) with the argument "--out", "--eval"' # ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # test data_loader # for i, item in enumerate(data_loader): # print(item) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint_folder = args.checkpoint_folder check_list = os.listdir(checkpoint_folder) check_list = [i for i in check_list if i.split('.')[1] == 'pth'] pickle_dir = 'pickle_dir/' + checkpoint_folder if os.path.exists(pickle_dir) == False: os.makedirs(pickle_dir) for checkpt in check_list: checkpt_name = checkpt.split('.')[0] checkpt_full_path = os.path.join(checkpoint_folder, checkpt) checkpoint = load_checkpoint(model, checkpt_full_path, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.checkpoint_folder: print(f'\nwriting results to {checkpt}') pickle_name = pickle_dir + '/' + checkpt_name + '.pkl' mmcv.dump(outputs, pickle_name) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: # dataset.evaluate(outputs, args.eval, **kwargs) polyp_evaluate(outputs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) #dataset.load_query() data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) ############## evaluate fairmot results################# #fairmot_file = '/raid/yy1/data/MOT/MOT17/images/results/MOT17_val_jde_half_dla34_det/det_results.pkl' #with open(fairmot_file, 'rb') as fid: # outputs = pickle.load(fid) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in ['interval', 'tmpdir', 'start', 'gpu_collect']: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs)) for thresh in [0.1, 0.2, 0.3, 0.4, 0.5]: num_dets = 0 tmp_outputs = list() for to in outputs: for j in range(to[0].shape[0]): if to[0][j, 4] < thresh: break tmp_outputs.append([to[0][:j]]) num_dets += j print('thresh {:f}, num of dets {:d}'.format(thresh, num_dets)) print(dataset.evaluate(tmp_outputs, **eval_kwargs))
# for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule'
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') # if args.eval and args.format_only: # raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir with timestamp tag timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) cfg.work_dir = osp.dirname(args.checkpoint) if args.out is None: args.out = osp.join( cfg.work_dir, '{}_{}'.format(timestamp, osp.basename(args.checkpoint).replace('pth', 'pkl'))) mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps log_file = osp.join(cfg.work_dir, '{}_test.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # log some basic info logger.info('Command-line argument:\n{}'.format(' '.join(sys.argv[1:]))) logger.info('Distributed training: {}'.format(distributed)) logger.info('MMDetection Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if args.use_cache and osp.exists(args.out): print('\nreuse results from {}'.format(args.out)) outputs = mmcv.load(args.out) else: if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) rank, _ = get_dist_info() if rank == 0: kwargs = {} if args.options is None else args.options if args.format_only: save_dir = osp.join(cfg.work_dir, osp.basename(args.out).split('.')[0] + '_out') print('\nsaving results to {}'.format(save_dir)) kwargs['data_loader'] = data_loader kwargs['save_dir'] = save_dir dataset.format_results(outputs, **kwargs) if args.eval: kwargs['logger'] = logger dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True if args.ann_file is not None: cfg.data.test.ann_file = args.ann_file if args.img_prefix is not None: cfg.data.test.img_prefix = args.img_prefix # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = get_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint models = [] print(f"checkpoints: {args.checkpoint}") for checkpoint in args.checkpoint: model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, checkpoint, map_location='cpu') models.append(model) model = EnsembleHTC(models) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) rank, _ = get_dist_info() if args.out and rank == 0: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) eval_types = args.eval if eval_types: print('Starting evaluate {}'.format(' and '.join(eval_types))) if eval_types == ['proposal_fast']: result_file = args.out coco_eval(result_file, eval_types, dataset.coco) else: if not isinstance(outputs[0], dict): result_file = args.out + '.json' results2json(dataset, outputs, result_file) coco_eval(result_file, eval_types, dataset.coco) else: for name in outputs[0]: print('\nEvaluating {}'.format(name)) outputs_ = [out[name] for out in outputs] result_file = args.out + '.{}.json'.format(name) results2json(dataset, outputs_, result_file) coco_eval(result_file, eval_types, dataset.coco)
def main(): args = parse_args() # touch the output json if not exist with open(args.json_out, 'a+'): pass # init distributed env first, since logger depends on the dist # info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, backend='nccl') rank, world_size = get_dist_info() logger = get_logger('root') # read info of checkpoints and config result_dict = dict() for model_family_dir in os.listdir(args.model_dir): for model in os.listdir(os.path.join(args.model_dir, model_family_dir)): # cpt: rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth # cfg: rpn_r50_fpn_1x_coco.py cfg = model.split('.')[0][:-18] + '.py' cfg_path = os.path.join('configs', model_family_dir, cfg) assert os.path.isfile( cfg_path), f'{cfg_path} is not valid config path' cpt_path = os.path.join(args.model_dir, model_family_dir, model) result_dict[cfg_path] = cpt_path assert cfg_path in modelzoo_dict, f'please fill the ' \ f'performance of cfg: {cfg_path}' cfg = check_finish(result_dict, args.json_out) cpt = result_dict[cfg] try: cfg_name = cfg logger.info(f'evaluate {cfg}') record = dict(cfg=cfg, cpt=cpt) cfg = Config.fromfile(cfg) # cfg.data.test.ann_file = 'data/val_0_10.json' # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True # build the dataloader samples_per_gpu = 2 # hack test with 2 image per gpu if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, cpt, map_location='cpu') # old versions did not save class info in checkpoints, # this walkaround is for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, 'tmp') if rank == 0: ref_mAP_dict = modelzoo_dict[cfg_name] metrics = list(ref_mAP_dict.keys()) metrics = [ m if m != 'AR@1000' else 'proposal_fast' for m in metrics ] eval_results = dataset.evaluate(outputs, metrics) print(eval_results) for metric in metrics: if metric == 'proposal_fast': ref_metric = modelzoo_dict[cfg_name]['AR@1000'] eval_metric = eval_results['AR@1000'] else: ref_metric = modelzoo_dict[cfg_name][metric] eval_metric = eval_results[f'{metric}_mAP'] if abs(ref_metric - eval_metric) > 0.003: record['is_normal'] = False dump_dict(record, args.json_out) check_finish(result_dict, args.json_out) except Exception as e: logger.error(f'rank: {rank} test fail with error: {e}') record['terminate'] = True dump_dict(record, args.json_out) check_finish(result_dict, args.json_out) # hack there to throw some error to prevent hang out subprocess.call('xxx')
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[1]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if 'pretrained' in cfg.model: cfg.model.pretrained = None elif 'init_cfg' in cfg.model.backbone: cfg.model.backbone.init_cfg = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated samples_per_gpu = 1 if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' 'Because we only support single GPU mode in ' 'non-distributed testing. Use the first GPU ' 'in `gpu_ids` now.') else: cfg.gpu_ids = [args.gpu_id] # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) rank, _ = get_dist_info() # allows not to create if args.work_dir is not None and rank == 0: mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=cfg.gpu_ids) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) metric = dataset.evaluate(outputs, **eval_kwargs) print(metric) metric_dict = dict(config=args.config, metric=metric) if args.work_dir is not None and rank == 0: mmcv.dump(metric_dict, json_file)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] model.CLASSES = dataset.CLASSES else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: precision = dataset.evaluate(outputs, args.eval, **kwargs)['precision'] for i, cl in enumerate(dataset.CLASSES): rec = np.arange(0.0, 1.01, 0.01) pre_5 = precision[0, :, i] pre_7 = precision[4, :, i] pre_9 = precision[8, :, i] plt.clf() plt.plot(rec, pre_5, 'm-', label='IoU=0.5') plt.plot(rec, pre_7, 'b-', label='IoU=0.7') plt.plot(rec, pre_9, 'c-', label='IoU=0.9') plt.xlim(0, 1.0) plt.ylim(0, 1.01) plt.title(f"precision-recall curve of {cl}") plt.grid(True) plt.xlabel("recall") plt.ylabel("precision") plt.legend(loc="lower left") name = os.path.join(os.path.dirname(args.out), f'pr_curve_{cl}.png') plt.savefig(name)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') #if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): # raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) #pkl_saved_path = '/home/radmin/jk/code/seg/SelectiveSeg/work_dirs/20210322_pano_all_preds/results/output_pkl/results.pkl' #outputs = mmcv.load(pkl_saved_path) rank, _ = get_dist_info() if rank == 0: if args.out and args.need_pkl_res: pkl_res_path = os.path.join(args.out, 'output_pkl') mmcv.mkdir_or_exist(pkl_res_path) pkl_res_path = os.path.join(pkl_res_path, 'results.pkl') print(f'\nwriting results to {pkl_res_path}') mmcv.dump(outputs, pkl_res_path) if args.need_bin_res: import torch import numpy as np bin_res_path = os.path.join(args.out, 'pred_label') mmcv.mkdir_or_exist(bin_res_path) box_res_path = os.path.join(args.out, 'box_label') mmcv.mkdir_or_exist(box_res_path) for i, res in enumerate(outputs): sem_preds = res['sem_preds'] if 'ins_preds' not in res: ins_preds = torch.zeros_like(sem_preds) else: ins_preds = res['ins_preds'] preds = torch.stack([sem_preds, ins_preds], dim=-1) preds = preds.cpu().numpy() preds = np.array(preds, dtype=np.uint8) file_name = str(i).zfill(6) postfix = '.bin' preds.tofile(bin_res_path + '/' + file_name + postfix) if 'pts_bbox' in res: bbox_preds = res['pts_bbox'] bbox_3d = bbox_preds['boxes_3d'].tensor dim = bbox_3d.size(1) if dim == 3: pseudo_size = bbox_3d.new_ones((bbox_3d.size(0), 3)) pseudo_theta = bbox_3d.new_zeros((bbox_3d.size(0), 1)) bbox_3d = torch.cat( [bbox_3d, pseudo_size, pseudo_theta], dim=1) else: # check here bbox_3d = torch.cat([ bbox_preds['boxes_3d'].gravity_center, bbox_preds['boxes_3d'].tensor[:, 3:] ], dim=1) temp_label = bbox_preds['labels_3d'].view(-1, 1) temp_score = bbox_preds['scores_3d'].view(-1, 1) temp_id = torch.zeros_like(temp_score) file_bbox_3d = torch.cat( [temp_label, bbox_3d, temp_score, temp_id], dim=1) file_bbox_3d = file_bbox_3d.numpy() postfix = '.txt' np.savetxt(box_res_path + '/' + file_name + postfix, file_bbox_3d, fmt='%.3f') if 'seg' in args.eval: mmcv.mkdir_or_exist(args.out) dataset.evaluate_seg(outputs, result_names=['sem_preds'], out_dir=args.out) elif 'pano' in args.eval: import numpy as np from selective_seg.util_tools.evaluate_panoptic import init_eval, printResults, eval_one_scan #log_file = osp.join(args.out, f'{timestamp}.log') log_file = os.path.join(args.out, 'pano_res.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) gt_label_path = '/data/nuscenes_opendata/ordered_seg_label/val/seg_ins_mask' mmcv.mkdir_or_exist(args.out) min_points = 15 # 15 for nuscenes, 50 for semantickitti official param evaluator = init_eval(dataset_type=cfg['dataset_type'], min_points=min_points) for i in range(len(outputs)): gt_file_path = os.path.join(gt_label_path, str(i).zfill(6) + '.bin') gt_labels = np.fromfile(gt_file_path, dtype=np.uint8).reshape(-1, 2) gt_sem_labels = gt_labels[:, 0] gt_ins_labels = gt_labels[:, 1] pred_sem_labels = outputs[i]['sem_preds'].cpu().numpy() pred_ins_labels = outputs[i]['ins_preds'].cpu().numpy() eval_one_scan(evaluator, gt_sem_labels, gt_ins_labels, pred_sem_labels, pred_ins_labels) eval_results = printResults(evaluator, logger=logger) '''
def main( config=None, checkpoint=None, img_prefix=None, ann_file=None, flip=False, out=None, format_only=False, eval=None, show=False, show_dir=None, log_file=None, fold=None, show_score_thr=0.3, gpu_collect=False, tmpdir=None, options=None, launcher="none", local_rank=0, score_thr=None, iou_thr=None, ) -> Tuple[list, WheatDataset]: logger = get_logger("inference", log_file=log_file, log_mode="a") assert out or eval or format_only or show or show_dir, ( "Please specify at least one operation (save/eval/format/show the " 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if eval and format_only: raise ValueError("--eval and --format_only cannot be both specified") if out is not None and not out.endswith((".pkl", ".pickle")): raise ValueError("The output file must be a pkl file.") cfg = Config.fromfile(config) if fold is not None: cfg.data.test.ann_file = cfg.data.test.ann_file.format(fold=fold) if img_prefix is not None: cfg.data.test.img_prefix = img_prefix if ann_file is not None: cfg.data.test.ann_file = ann_file if flip: print(cfg.data.test.pipeline[-1]) print(f"flip: {cfg.data.test.pipeline[-1].flip}") cfg.data.test.pipeline[-1].flip_direction = ["horizontal", "vertical"] cfg.data.test.pipeline[-1].flip = flip if score_thr is not None: if "rcnn" in cfg.test_cfg: cfg.test_cfg.rcnn.score_thr = score_thr else: cfg.test_cfg.score_thr = score_thr if iou_thr is not None: if "rcnn" in cfg.test_cfg: cfg.test_cfg.rcnn.nms.iou_thr = iou_thr else: cfg.test_cfg.nms.iou_thr = iou_thr # set cudnn_benchmark if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if launcher == "none": distributed = False else: distributed = True init_dist(launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get("fp16", None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, checkpoint, map_location="cpu") # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if "CLASSES" in checkpoint["meta"]: model.CLASSES = checkpoint["meta"]["CLASSES"] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, show, show_dir, show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, tmpdir, gpu_collect) rank, _ = get_dist_info() if rank == 0: if out: print(f"\nwriting results to {out}") mmcv.dump(outputs, out) kwargs = {} if options is None else options if format_only: dataset.format_results(outputs, **kwargs) if eval: print_log(f"config: {config}", logger) print_log(f"score_thr: {score_thr}", logger) print_log(f"iou_thr: {iou_thr}", logger) dataset.evaluate(outputs, logger=logger, **kwargs) return outputs, dataset
def main(): args = parse_args() assert ( args.out or args.eval or args.format_only or args.show or args.show_dir), ( 'Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir".') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified.') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if cfg.model.get('pretrained'): cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated samples_per_gpu = 1 if isinstance(cfg.data.test, dict): samples_per_gpu = (cfg.data.get('test_dataloader', {})).get( 'samples_per_gpu', cfg.data.get('samples_per_gpu', 1)) if samples_per_gpu > 1: # Support batch_size > 1 in test for text recognition # by disable MultiRotateAugOCR since it is useless for most case cfg = disable_text_recog_aug_test(cfg) if cfg.data.test.get('pipeline', None) is not None: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test, dict(test_mode=True)) # step 1: give default values and override (if exist) from cfg.data loader_cfg = { **dict(seed=cfg.get('seed'), drop_last=False, dist=distributed), **({} if torch.__version__ != 'parrots' else dict( prefetch_num=2, pin_memory=False, )), **dict((k, cfg.data[k]) for k in [ 'workers_per_gpu', 'seed', 'prefetch_num', 'pin_memory', 'persistent_workers', ] if k in cfg.data) } test_loader_cfg = { **loader_cfg, **dict(shuffle=False, drop_last=False), **cfg.data.get('test_dataloader', {}), **dict(samples_per_gpu=samples_per_gpu) } data_loader = build_dataloader(dataset, **test_loader_cfg) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) model = revert_sync_batchnorm(model) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) is_kie = cfg.model.type in ['SDMGR'] outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, is_kie, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs))
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if cfg.get('USE_MMDET', False): from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.models import build_detector as build_model from mmdet.datasets import build_dataloader else: from mmtrack.apis import multi_gpu_test, single_gpu_test from mmtrack.models import build_model from mmtrack.datasets import build_dataloader if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # cfg.model.pretrains = None if hasattr(cfg.model, 'detector'): cfg.model.detector.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) logger = get_logger('ParamsSearcher', log_file=args.log) # get all cases search_params = get_search_params(cfg.model.tracker, logger=logger) combinations = [p for p in product(*search_params.values())] search_cfgs = [] for c in combinations: search_cfg = dotty(cfg.model.tracker.copy()) for i, k in enumerate(search_params.keys()): search_cfg[k] = c[i] search_cfgs.append(dict(search_cfg)) print_log(f'Totally {len(search_cfgs)} cases.', logger) # init with the first one cfg.model.tracker = search_cfgs[0].copy() # build the model and load checkpoint if cfg.get('test_cfg', False): model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) else: model = build_model(cfg.model) # We need call `init_weights()` to load pretained weights in MOT task. model.init_weights() fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) if args.checkpoint is not None: checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] if not hasattr(model, 'CLASSES'): model.CLASSES = dataset.CLASSES if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) print_log(f'Record {cfg.search_metrics}.', logger) for i, search_cfg in enumerate(search_cfgs): if not distributed: model.module.tracker = build_tracker(search_cfg) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model.module.tracker = build_tracker(search_cfg) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in ['interval', 'tmpdir', 'start', 'gpu_collect']: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) results = dataset.evaluate(outputs, **eval_kwargs) _records = [] for k in cfg.search_metrics: if isinstance(results[k], float): _records.append(f'{(results[k]):.3f}') else: _records.append(f'{(results[k])}') print_log(f'{combinations[i]}: {_records}', logger)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() assert args.eval or args.show \ or args.show_dir, \ ('Please specify at least one operation (eval/show the ' 'results) with the argument "--eval"' ', "--show" or "--show-dir"') cfg = Config.fromfile(args.config) if cfg.get('USE_MMDET', False): from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.datasets import build_dataloader from mmdet.models import build_detector as build_model if 'detector' in cfg.model: cfg.model = cfg.model.detector elif cfg.get('USE_MMCLS', False): from mmtrack.apis import multi_gpu_test, single_gpu_test from mmtrack.datasets import build_dataloader from mmtrack.models import build_reid as build_model if 'reid' in cfg.model: cfg.model = cfg.model.reid else: from mmtrack.apis import multi_gpu_test, single_gpu_test from mmtrack.datasets import build_dataloader from mmtrack.models import build_model if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) logger = get_logger('SOTParamsSearcher', log_file=args.log) # build the model and load checkpoint if cfg.get('test_cfg', False): model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) else: model = build_model(cfg.model) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) if args.checkpoint is not None: checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] if not hasattr(model, 'CLASSES'): model.CLASSES = dataset.CLASSES if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) if 'meta' in checkpoint and 'hook_msgs' in checkpoint[ 'meta'] and 'best_score' in checkpoint['meta']['hook_msgs']: best_score = checkpoint['meta']['hook_msgs']['best_score'] else: best_score = 0 best_result = dict(success=best_score, norm_precision=0., precision=0.) best_params = dict(penalty_k=cfg.model.test_cfg.rpn.penalty_k, lr=cfg.model.test_cfg.rpn.lr, win_influ=cfg.model.test_cfg.rpn.window_influence) print_log(f'init best score as: {best_score}', logger) print_log(f'init best params as: {best_params}', logger) num_cases = len(args.penalty_k_range) * len(args.lr_range) * len( args.win_influ_range) case_count = 0 for penalty_k in args.penalty_k_range: for lr in args.lr_range: for win_influ in args.win_influ_range: case_count += 1 cfg.model.test_cfg.rpn.penalty_k = penalty_k cfg.model.test_cfg.rpn.lr = lr cfg.model.test_cfg.rpn.window_influence = win_influ print_log(f'-----------[{case_count}/{num_cases}]-----------', logger) print_log( f'penalty_k={penalty_k} lr={lr} win_influence={win_influ}', logger) if not distributed: outputs = single_gpu_test( model, data_loader, args.show, args.show_dir, show_score_thr=args.show_score_thr) else: outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: kwargs = args.eval_options if args.eval_options else {} if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args eval_hook_args = [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'by_epoch' ] for key in eval_hook_args: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) eval_results = dataset.evaluate(outputs, **eval_kwargs) # print(eval_results) print_log(f'evaluation results: {eval_results}', logger) print_log('------------------------------------------', logger) if eval_results['success'] > best_result['success']: best_result = eval_results best_params['penalty_k'] = penalty_k, best_params['lr'] = lr, best_params['win_influ'] = win_influ print_log( f'The current best evaluation results: \ {best_result}', logger) print_log(f'The current best params: {best_params}', logger) print_log( f'After parameter searching, the best evaluation results: \ {best_result}', logger) print_log(f'After parameter searching, the best params: {best_params}', logger)