def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False, efficient_test=False): results = single_gpu_test(model, data_loader) return results
def after_train_iter(self, runner): """After train epoch hook.""" if not self.every_n_iters(runner, self.interval): return from mmseg.apis import single_gpu_test runner.log_buffer.clear() results = single_gpu_test(runner.model, self.dataloader, show=False) self.evaluate(runner, results)
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False, pre_eval=False): # Pre eval is set by default when training. results = single_gpu_test(model, data_loader, pre_eval=True) return results
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. distributed = False # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # load onnx config and meta cfg.model.train_cfg = None model = ONNXRuntimeSegmentor(args.model, cfg=cfg, device_id=0) model.CLASSES = dataset.CLASSES model.PALETTE = dataset.PALETTE efficient_test = False if args.eval_options is not None: efficient_test = args.eval_options.get('efficient_test', False) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, efficient_test, args.opacity) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" if not self._should_evaluate(runner): return from mmseg.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader, show=False) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if self.save_best: self._save_ckpt(runner, key_score)
def test_single_gpu(): test_dataset = ExampleDataset() data_loader = DataLoader( test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False, ) model = ExampleModel() # Test efficient test compatibility (will be deprecated) results = single_gpu_test(model, data_loader, efficient_test=True) assert len(results) == 1 pred = np.load(results[0]) assert isinstance(pred, np.ndarray) assert pred.shape == (1, ) assert pred[0] == 1 shutil.rmtree('.efficient_test') # Test pre_eval test_dataset.pre_eval = MagicMock(return_value=['success']) results = single_gpu_test(model, data_loader, pre_eval=True) assert results == ['success'] # Test format_only test_dataset.format_results = MagicMock(return_value=['success']) results = single_gpu_test(model, data_loader, format_only=True) assert results == ['success'] # efficient_test, pre_eval and format_only are mutually exclusive with pytest.raises(AssertionError): single_gpu_test( model, dataloader, efficient_test=True, format_only=True, pre_eval=True)
def after_train_iter(self, runner): """After train epoch hook. Override default ``single_gpu_test``. """ if self.by_epoch or not self.every_n_iters(runner, self.interval): return from mmseg.apis import single_gpu_test runner.log_buffer.clear() results = single_gpu_test(runner.model, self.dataloader, show=False, efficient_test=self.efficient_test) self.evaluate(runner, results)
def after_train_iter(self, runner): """After train epoch hook.""" def choice_iters(self, runner): return (runner.iter + 1) % 100 == 0 and (runner.iter + 1) >= 37000 # return (runner.iter + 1) % 100 == 0 and (runner.iter + 1) >= 77000 if not (self.every_n_iters(runner, self.interval) or choice_iters(self, runner)): return # if not self.every_n_iters(runner, self.interval): # return from mmseg.apis import single_gpu_test runner.log_buffer.clear() results = single_gpu_test(runner.model, self.dataloader, show=False) self.evaluate(runner, results)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if args.aug_test: # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 ] cfg.data.test.pipeline[1].flip = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init the logger before other steps logger = None if args.eval: timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'test_{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) if logger is not None: logger.info(f'Set random seed to {args.seed}, deterministic: ' f'{args.deterministic}') else: print(f'Set random seed to {args.seed}, deterministic: ' f'{args.deterministic}') # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.val, dict(test_mode=True)) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') model.CLASSES = checkpoint['meta']['CLASSES'] model.PALETTE = checkpoint['meta']['PALETTE'] if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, logger, **kwargs)
G_solver.zero_grad() G_loss = loss_seg + loss_aux G_loss.backward() G_solver.step() # print(loss_seg.item(), loss_aux.item()) logger.info( 'step:{:5d} G_lr:{:.6f} G_loss:{:.5f} dec:{:.5f} aux:{:.5f}'.format( i_iter + 1, G_solver.param_groups[-1]['lr'], G_loss.item(), loss_seg.item(), loss_aux.item())) # val if (i_iter+1) % cfg.evaluation.interval==0 or \ ((i_iter+1) > cfg.total_iters and (i_iter+1)%100==0): outputs = single_gpu_test(model_paral, val_data_loader, show=False, out_dir=None) eval_results = val_dataset.evaluate(outputs, metric='mIoU', logger=None) miou = eval_results['mIoU'] # {'mIoU': 0.4836061652681801, 'mAcc': 0.5740488995020039, 'aAcc': 0.9015018912774634} logger.info( 'Iter(val) [{:d}] mIoU: {:.4f}, mAcc: {:.4f}, aAcc: {:.4f}'. format(i_iter + 1, eval_results['mIoU'], eval_results['mAcc'], eval_results['aAcc'])) if miou > max_miou: filename = 'iter_{:d}_max_{:.4f}.pth'.format( i_iter + 1, miou) # iter_20000.pth filepath = os.path.join(cfg.work_dir, filename)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if args.aug_test: # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 ] cfg.data.test.pipeline[1].flip = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') model.CLASSES = checkpoint['meta']['CLASSES'] model.PALETTE = checkpoint['meta']['PALETTE'] if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options # if args.format_only: kwargs = { 'imgfile_prefix': "{}/results".format(os.path.dirname(args.checkpoint)) } print('\nsave to ', "{}/results".format(os.path.dirname(args.checkpoint))) dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. distributed = False # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # load onnx config and meta cfg.model.train_cfg = None if args.backend == 'onnxruntime': model = ONNXRuntimeSegmentor(args.model, cfg=cfg, device_id=0) elif args.backend == 'tensorrt': model = TensorRTSegmentor(args.model, cfg=cfg, device_id=0) model.CLASSES = dataset.CLASSES model.PALETTE = dataset.PALETTE # clean gpu memory when starting a new evaluation. torch.cuda.empty_cache() eval_kwargs = {} if args.eval_options is None else args.eval_options # Deprecated efficient_test = eval_kwargs.get('efficient_test', False) if efficient_test: warnings.warn( '``efficient_test=True`` does not have effect in tools/test.py, ' 'the evaluation and format results are CPU memory efficient by ' 'default') eval_on_format_results = (args.eval is not None and 'cityscapes' in args.eval) if eval_on_format_results: assert len(args.eval) == 1, 'eval on format results is not ' \ 'applicable for metrics other than ' \ 'cityscapes' if args.format_only or eval_on_format_results: if 'imgfile_prefix' in eval_kwargs: tmpdir = eval_kwargs['imgfile_prefix'] else: tmpdir = '.format_cityscapes' eval_kwargs.setdefault('imgfile_prefix', tmpdir) mmcv.mkdir_or_exist(tmpdir) else: tmpdir = None model = MMDataParallel(model, device_ids=[0]) results = single_gpu_test(model, data_loader, args.show, args.show_dir, False, args.opacity, pre_eval=args.eval is not None and not eval_on_format_results, format_only=args.format_only or eval_on_format_results, format_args=eval_kwargs) rank, _ = get_dist_info() if rank == 0: if args.out: warnings.warn( 'The behavior of ``args.out`` has been changed since MMSeg ' 'v0.16, the pickled outputs could be seg map as type of ' 'np.array, pre-eval results or file paths for ' '``dataset.format_results()``.') print(f'\nwriting results to {args.out}') mmcv.dump(results, args.out) if args.eval: dataset.evaluate(results, args.eval, **eval_kwargs) if tmpdir is not None and eval_on_format_results: # remove tmp dir when cityscapes evaluation shutil.rmtree(tmpdir)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if args.aug_test: # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 ] cfg.data.test.pipeline[1].flip = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) rank, _ = get_dist_info() # allows not to create if args.work_dir is not None and rank == 0: mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: print('"CLASSES" not found in meta, use dataset.CLASSES instead') model.CLASSES = dataset.CLASSES if 'PALETTE' in checkpoint.get('meta', {}): model.PALETTE = checkpoint['meta']['PALETTE'] else: print('"PALETTE" not found in meta, use dataset.PALETTE instead') model.PALETTE = dataset.PALETTE # clean gpu memory when starting a new evaluation. torch.cuda.empty_cache() eval_kwargs = {} if args.eval_options is None else args.eval_options # Deprecated efficient_test = eval_kwargs.get('efficient_test', False) if efficient_test: warnings.warn( '``efficient_test=True`` does not have effect in tools/test.py, ' 'the evaluation and format results are CPU memory efficient by ' 'default') eval_on_format_results = (args.eval is not None and 'cityscapes' in args.eval) if eval_on_format_results: assert len(args.eval) == 1, 'eval on format results is not ' \ 'applicable for metrics other than ' \ 'cityscapes' if args.format_only or eval_on_format_results: if 'imgfile_prefix' in eval_kwargs: tmpdir = eval_kwargs['imgfile_prefix'] else: tmpdir = '.format_cityscapes' eval_kwargs.setdefault('imgfile_prefix', tmpdir) mmcv.mkdir_or_exist(tmpdir) else: tmpdir = None if not distributed: model = MMDataParallel(model, device_ids=[0]) results = single_gpu_test(model, data_loader, args.show, args.show_dir, False, args.opacity, pre_eval=args.eval is not None and not eval_on_format_results, format_only=args.format_only or eval_on_format_results, format_args=eval_kwargs) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) results = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, False, pre_eval=args.eval is not None and not eval_on_format_results, format_only=args.format_only or eval_on_format_results, format_args=eval_kwargs) rank, _ = get_dist_info() if rank == 0: if args.out: warnings.warn( 'The behavior of ``args.out`` has been changed since MMSeg ' 'v0.16, the pickled outputs could be seg map as type of ' 'np.array, pre-eval results or file paths for ' '``dataset.format_results()``.') print(f'\nwriting results to {args.out}') mmcv.dump(results, args.out) if args.eval: eval_kwargs.update(metric=args.eval) metric = dataset.evaluate(results, **eval_kwargs) metric_dict = dict(config=args.config, metric=metric) if args.work_dir is not None and rank == 0: mmcv.dump(metric_dict, json_file, indent=4) if tmpdir is not None and eval_on_format_results: # remove tmp dir when cityscapes evaluation shutil.rmtree(tmpdir)
# model = MMDataParallel(model, device_ids=[0]) # # outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) # outputs = single_gpu_test(model, data_loader, show=False, out_dir=None) # else: # model = MMDistributedDataParallel( # model.cuda(), # device_ids=[torch.cuda.current_device()], # broadcast_buffers=False) # outputs = multi_gpu_test(model, data_loader, args.tmpdir, # args.gpu_collect) # dataset.evaluate(outputs, metric='mIoU', logger=None) model = MMDataParallel(model1.student, device_ids=[0]) print('======================') outputs = single_gpu_test(model, data_loader, show=False, out_dir=None) dataset.evaluate(outputs, metric='mIoU', logger=None) # rank, _ = get_dist_info() # if rank == 0: # if args.out: # print(f'\nwriting results to {args.out}') # mmcv.dump(outputs, args.out) # kwargs = {} if args.eval_options is None else args.eval_options # if args.format_only: # dataset.format_results(outputs, **kwargs) # if args.eval: # dataset.evaluate(outputs, args.eval, **kwargs) # dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if args.aug_test: # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 ] cfg.data.test.pipeline[1].flip = True cfg.model.pretrained = None cfg.data.test.test_mode = True if args.gpu_id is not None: cfg.gpu_ids = [args.gpu_id] # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': cfg.gpu_ids = [args.gpu_id] distributed = False if len(cfg.gpu_ids) > 1: warnings.warn(f'The gpu-ids is reset from {cfg.gpu_ids} to ' f'{cfg.gpu_ids[0:1]} to avoid potential error in ' 'non-distribute testing time.') cfg.gpu_ids = cfg.gpu_ids[0:1] else: distributed = True init_dist(args.launcher, **cfg.dist_params) rank, _ = get_dist_info() # allows not to create if args.work_dir is not None and rank == 0: mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) if args.aug_test: json_file = osp.join(args.work_dir, f'eval_multi_scale_{timestamp}.json') else: json_file = osp.join(args.work_dir, f'eval_single_scale_{timestamp}.json') elif rank == 0: work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) mmcv.mkdir_or_exist(osp.abspath(work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) if args.aug_test: json_file = osp.join(work_dir, f'eval_multi_scale_{timestamp}.json') else: json_file = osp.join(work_dir, f'eval_single_scale_{timestamp}.json') # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) # The default loader config loader_cfg = dict( # cfg.gpus will be ignored if distributed num_gpus=len(cfg.gpu_ids), dist=distributed, shuffle=False) # The overall dataloader settings loader_cfg.update({ k: v for k, v in cfg.data.items() if k not in [ 'train', 'val', 'test', 'train_dataloader', 'val_dataloader', 'test_dataloader' ] }) test_loader_cfg = { **loader_cfg, 'samples_per_gpu': 1, 'shuffle': False, # Not shuffle by default **cfg.data.get('test_dataloader', {}) } # build the dataloader data_loader = build_dataloader(dataset, **test_loader_cfg) # build the model and load checkpoint cfg.model.train_cfg = None model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: print('"CLASSES" not found in meta, use dataset.CLASSES instead') model.CLASSES = dataset.CLASSES if 'PALETTE' in checkpoint.get('meta', {}): model.PALETTE = checkpoint['meta']['PALETTE'] else: print('"PALETTE" not found in meta, use dataset.PALETTE instead') model.PALETTE = dataset.PALETTE # clean gpu memory when starting a new evaluation. torch.cuda.empty_cache() eval_kwargs = {} if args.eval_options is None else args.eval_options # Deprecated efficient_test = eval_kwargs.get('efficient_test', False) if efficient_test: warnings.warn( '``efficient_test=True`` does not have effect in tools/test.py, ' 'the evaluation and format results are CPU memory efficient by ' 'default') eval_on_format_results = (args.eval is not None and 'cityscapes' in args.eval) if eval_on_format_results: assert len(args.eval) == 1, 'eval on format results is not ' \ 'applicable for metrics other than ' \ 'cityscapes' if args.format_only or eval_on_format_results: if 'imgfile_prefix' in eval_kwargs: tmpdir = eval_kwargs['imgfile_prefix'] else: tmpdir = '.format_cityscapes' eval_kwargs.setdefault('imgfile_prefix', tmpdir) mmcv.mkdir_or_exist(tmpdir) else: tmpdir = None if not distributed: warnings.warn( 'SyncBN is only supported with DDP. To be compatible with DP, ' 'we convert SyncBN to BN. Please use dist_train.sh which can ' 'avoid this error.') if not torch.cuda.is_available(): assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \ 'Please use MMCV >= 1.4.4 for CPU training!' model = revert_sync_batchnorm(model) model = MMDataParallel(model, device_ids=cfg.gpu_ids) results = single_gpu_test(model, data_loader, args.show, args.show_dir, False, args.opacity, pre_eval=args.eval is not None and not eval_on_format_results, format_only=args.format_only or eval_on_format_results, format_args=eval_kwargs) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) results = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, False, pre_eval=args.eval is not None and not eval_on_format_results, format_only=args.format_only or eval_on_format_results, format_args=eval_kwargs) rank, _ = get_dist_info() if rank == 0: if args.out: warnings.warn( 'The behavior of ``args.out`` has been changed since MMSeg ' 'v0.16, the pickled outputs could be seg map as type of ' 'np.array, pre-eval results or file paths for ' '``dataset.format_results()``.') print(f'\nwriting results to {args.out}') mmcv.dump(results, args.out) if args.eval: eval_kwargs.update(metric=args.eval) metric = dataset.evaluate(results, **eval_kwargs) metric_dict = dict(config=args.config, metric=metric) mmcv.dump(metric_dict, json_file, indent=4) if tmpdir is not None and eval_on_format_results: # remove tmp dir when cityscapes evaluation shutil.rmtree(tmpdir)