def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) model = MMDataParallel(model, device_ids=[0]) model.eval() # the first several iterations may be very slow so skip them num_warmup = 5 pure_inf_time = 0 # benchmark with 200 image and take the average for i, data in enumerate(data_loader): torch.cuda.synchronize() start_time = time.perf_counter() with torch.no_grad(): model(return_loss=False, rescale=True, **data) torch.cuda.synchronize() elapsed = time.perf_counter() - start_time if i >= num_warmup: pure_inf_time += elapsed if (i + 1) % args.log_interval == 0: fps = (i + 1 - num_warmup) / pure_inf_time print(f'Done image [{i + 1:<3}/ 2000], fps: {fps:.1f} img / s') if (i + 1) == 2000: pure_inf_time += elapsed fps = (i + 1 - num_warmup) / pure_inf_time print(f'Overall fps: {fps:.1f} img / s') break
def main(): args = parse_args() # assert args.out or args.eval or args.format_only or args.show \ # or args.show_dir, \ # ('Please specify at least one operation (save/eval/format/show the ' # 'results / save the results) with the argument "--out", "--eval"' # ', "--format-only", "--show" or "--show-dir"') # if args.eval and args.format_only: # raise ValueError('--eval and --format_only cannot be both specified') # if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): # raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # # init distributed env first, since logger depends on the dist info. # if args.launcher == 'none': # distributed = False # else: # distributed = True # init_dist(args.launcher, **cfg.dist_params) # # build the dataloader # # TODO: support multiple images per gpu (only minor changes are needed) # dataset = build_dataset(cfg.data.test) # data_loader = build_dataloader( # dataset, # samples_per_gpu=1, # workers_per_gpu=cfg.data.workers_per_gpu, # dist=distributed, # shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES imgs = [] for file_path in args.input: imgs.append(processing_one_image(file_path)) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, imgs, args.show, args.show_dir, args.show_score_thr)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES assert os.path.exists(args.out) outputs = mmcv.load(args.out) rank, _ = get_dist_info() if rank == 0: kwargs = {} if args.options is None else args.options dataset.format_results(outputs, **kwargs)
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg_start_time = time.time() cfg = Config.fromfile(args.config) cfg_last = time.time() - cfg_start_time print('cfg time:', cfg_last) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) dataset_start_time = time.time() dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) dataset_last = time.time() - dataset_start_time print('dataset & dataloader time:', dataset_last) # build the model and load checkpoint model_start_time = time.time() model = build_detector(cfg.model, train_cfg=None, test_cfg=None) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES model_time = time.time() - model_start_time print('model time:', model_time) model = MMDataParallel(model, device_ids=[0]) single_seg_test(model, data_loader)
def setup(self, config_file, checkpoint_file, fuse_conv): cfg = Config.fromfile(config_file) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, checkpoint_file, map_location='cpu') if fuse_conv: model = fuse_module(model) self._fuse_conv = fuse_conv model = MMDataParallel(model, device_ids=[0]) model.eval() return model, data_loader, dataset
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.update_config: cfg.merge_from_dict(args.update_config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) # nncf model wrapper if cfg.get('nncf_config'): check_nncf_is_enabled() if not is_checkpoint_nncf(args.checkpoint): raise RuntimeError( 'Trying to make testing with NNCF compression a model snapshot that was NOT trained with NNCF' ) cfg.load_from = args.checkpoint cfg.resume_from = None if torch.cuda.is_available(): model = model.cuda() _, model = wrap_nncf_model(model, cfg, None, get_fake_input) checkpoint = torch.load(args.checkpoint, map_location=None) else: fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: # TODO: FIXME: should it be inside this 'else' branch??? from tools.fuse_conv_bn import fuse_module model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if torch.cuda.is_available(): if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) else: model = MMDataCPU(model) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = cfg.get('evaluation', {}) kwargs.pop('interval', None) kwargs.pop('gpu_collect', None) kwargs.update({} if args.options is None else args.options) kwargs['metric'] = args.eval if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, **kwargs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg_start_time = time.time() cfg = Config.fromfile(args.config) cfg_last = time.time() - cfg_start_time print('cfg time:', cfg_last) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) dataset_start_time = time.time() dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) dataset_last = time.time() - dataset_start_time print('dataset & dataloader time:', dataset_last) # data_batch = iter(data_loader).next() # print(len(data_batch['points'][0].data[0])) # print(type(data_batch['seg_label'][0].data[0][0])) # print(data_batch['seg_label'][0].data[0][0].shape) # build the model and load checkpoint model_start_time = time.time() model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES model_time = time.time() - model_start_time print('model time:', model_time) model = MMDataParallel(model, device_ids=[0]) # outputs = mmda_single_gpu_test(model, data_loader, args.show, args.show_dir) # len(outputs)=len(dataset) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) # len(outputs)=len(dataset) if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, jsonfile_prefix=args.json, **kwargs)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, ( "Please specify at least one operation (save/eval/format/show the " 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"' ) if args.eval and args.format_only: raise ValueError("--eval and --format_only cannot be both specified") if args.out is not None and not args.out.endswith((".pkl", ".pickle")): raise ValueError("The output file must be a pkl file.") cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == "none": distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False, ) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get("fp16", None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu") if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if "CLASSES" in checkpoint["meta"]: model.CLASSES = checkpoint["meta"]["CLASSES"] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, ) outputs, metas = multi_gpu_test( model, data_loader, args.tmpdir, args.gpu_collect ) rank, _ = get_dist_info() if rank == 0: if args.out: print("\nwriting results to {}".format(args.out)) mmcv.dump(outputs, args.out) print("\nwriting results to {}".format(args.meta)) mmcv.dump(metas, args.meta) dataset.format_results(outputs, metas, args.det_file) dataset.evaluate(args.label_dir, args.det_file)
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, \ ('Please specify at least one operation (save/eval/format/show the ' 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') #if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): # raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) #pkl_saved_path = '/home/radmin/jk/code/seg/SelectiveSeg/work_dirs/20210322_pano_all_preds/results/output_pkl/results.pkl' #outputs = mmcv.load(pkl_saved_path) rank, _ = get_dist_info() if rank == 0: if args.out and args.need_pkl_res: pkl_res_path = os.path.join(args.out, 'output_pkl') mmcv.mkdir_or_exist(pkl_res_path) pkl_res_path = os.path.join(pkl_res_path, 'results.pkl') print(f'\nwriting results to {pkl_res_path}') mmcv.dump(outputs, pkl_res_path) if args.need_bin_res: import torch import numpy as np bin_res_path = os.path.join(args.out, 'pred_label') mmcv.mkdir_or_exist(bin_res_path) box_res_path = os.path.join(args.out, 'box_label') mmcv.mkdir_or_exist(box_res_path) for i, res in enumerate(outputs): sem_preds = res['sem_preds'] if 'ins_preds' not in res: ins_preds = torch.zeros_like(sem_preds) else: ins_preds = res['ins_preds'] preds = torch.stack([sem_preds, ins_preds], dim=-1) preds = preds.cpu().numpy() preds = np.array(preds, dtype=np.uint8) file_name = str(i).zfill(6) postfix = '.bin' preds.tofile(bin_res_path + '/' + file_name + postfix) if 'pts_bbox' in res: bbox_preds = res['pts_bbox'] bbox_3d = bbox_preds['boxes_3d'].tensor dim = bbox_3d.size(1) if dim == 3: pseudo_size = bbox_3d.new_ones((bbox_3d.size(0), 3)) pseudo_theta = bbox_3d.new_zeros((bbox_3d.size(0), 1)) bbox_3d = torch.cat( [bbox_3d, pseudo_size, pseudo_theta], dim=1) else: # check here bbox_3d = torch.cat([ bbox_preds['boxes_3d'].gravity_center, bbox_preds['boxes_3d'].tensor[:, 3:] ], dim=1) temp_label = bbox_preds['labels_3d'].view(-1, 1) temp_score = bbox_preds['scores_3d'].view(-1, 1) temp_id = torch.zeros_like(temp_score) file_bbox_3d = torch.cat( [temp_label, bbox_3d, temp_score, temp_id], dim=1) file_bbox_3d = file_bbox_3d.numpy() postfix = '.txt' np.savetxt(box_res_path + '/' + file_name + postfix, file_bbox_3d, fmt='%.3f') if 'seg' in args.eval: mmcv.mkdir_or_exist(args.out) dataset.evaluate_seg(outputs, result_names=['sem_preds'], out_dir=args.out) elif 'pano' in args.eval: import numpy as np from selective_seg.util_tools.evaluate_panoptic import init_eval, printResults, eval_one_scan #log_file = osp.join(args.out, f'{timestamp}.log') log_file = os.path.join(args.out, 'pano_res.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) gt_label_path = '/data/nuscenes_opendata/ordered_seg_label/val/seg_ins_mask' mmcv.mkdir_or_exist(args.out) min_points = 15 # 15 for nuscenes, 50 for semantickitti official param evaluator = init_eval(dataset_type=cfg['dataset_type'], min_points=min_points) for i in range(len(outputs)): gt_file_path = os.path.join(gt_label_path, str(i).zfill(6) + '.bin') gt_labels = np.fromfile(gt_file_path, dtype=np.uint8).reshape(-1, 2) gt_sem_labels = gt_labels[:, 0] gt_ins_labels = gt_labels[:, 1] pred_sem_labels = outputs[i]['sem_preds'].cpu().numpy() pred_ins_labels = outputs[i]['ins_preds'].cpu().numpy() eval_one_scan(evaluator, gt_sem_labels, gt_ins_labels, pred_sem_labels, pred_ins_labels) eval_results = printResults(evaluator, logger=logger) '''
def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show, ( "Please specify at least one operation (save/eval/format/show the " 'results) with the argument "--out", "--eval", "--format_only" ' 'or "--show"') if args.eval and args.format_only: raise ValueError("--eval and --format_only cannot be both specified") if args.out is not None and not args.out.endswith((".pkl", ".pickle")): raise ValueError("The output file must be a pkl file.") cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == "none": distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # set random seeds if args.seed is not None: set_random_seed(args.seed, deterministic=args.deterministic) # build the dataloader samples_per_gpu = cfg.data.test.pop("samples_per_gpu", 1) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False, ) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg")) parameters = filter(lambda p: p.requires_grad, model.parameters()) model_engine, _, _, _ = initialize( args=args, model=model, model_parameters=parameters, ) _, client_state = model_engine.load_checkpoint(args.checkpoint, "ds") model = model_engine.module fp16_cfg = cfg.get("fp16", None) if fp16_cfg is not None: wrap_fp16_model(model) # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_module(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if "CLASSES" in client_state: model.CLASSES = client_state["CLASSES"] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, ) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f"\nwriting results to {args.out}") mmcv.dump(outputs, args.out) kwargs = {} if args.options is None else args.options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)