def main(): args = parse_args() cfg = Config.fromfile(args.config) cfg.merge_from_dict(args.cfg_options) # Load output_config from cfg output_config = cfg.get('output_config', {}) if args.out: # Overwrite output_config from args.out output_config = Config._merge_a_into_b(dict(out=args.out), output_config) # Load eval_config from cfg eval_config = cfg.get('eval_config', {}) if args.eval: # Overwrite eval_config from args.eval eval_config = Config._merge_a_into_b(dict(metrics=args.eval), eval_config) if args.eval_options: # Add options from args.eval_options eval_config = Config._merge_a_into_b(args.eval_options, eval_config) assert output_config or eval_config, \ ('Please specify at least one operation (save or eval the ' 'results) with the argument "--out" or "--eval"') dataset_type = cfg.data.test.type if output_config.get('out', None): out = output_config['out'] # make sure the dirname of the output path exists mmcv.mkdir_or_exist(osp.dirname(out)) _, suffix = osp.splitext(out) if dataset_type == 'AVADataset': assert suffix[1:] == 'csv', ('For AVADataset, the format of the ' 'output file should be csv') else: assert suffix[1:] in file_handlers, ( 'The format of the output ' 'file should be json, pickle or yaml') # set cudnn benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if cfg.model.get('test_cfg') is None and cfg.get('test_cfg') is None: cfg.model.setdefault('test_cfg', dict(average_clips=args.average_clips)) else: # You can set average_clips during testing, it will override the # original settting if args.average_clips is not None: if cfg.model.get('test_cfg') is not None: cfg.model.test_cfg.average_clips = args.average_clips else: cfg.test_cfg.average_clips = args.average_clips # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # The flag is used to register module's hooks cfg.setdefault('module_hooks', []) # build the dataloader dataset = build_dataset(cfg.data.test, dict(test_mode=True)) dataloader_setting = dict(videos_per_gpu=cfg.data.get('videos_per_gpu', 1), workers_per_gpu=cfg.data.get( 'workers_per_gpu', 1), dist=distributed, shuffle=False) dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) data_loader = build_dataloader(dataset, **dataloader_setting) # build the model and load checkpoint model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg')) if len(cfg.module_hooks) > 0: register_module_hooks(model, cfg.module_hooks) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if output_config.get('out', None): out = output_config['out'] print(f'\nwriting results to {out}') dataset.dump_results(outputs, **output_config) if eval_config: eval_res = dataset.evaluate(outputs, **eval_config) for name, val in eval_res.items(): print(f'{name}: {val:.04f}')
def main(): args = parse_args() cfg = Config.fromfile(args.config) cfg.merge_from_dict(args.cfg_options) # Load output_config from cfg output_config = cfg.get('output_config', {}) # Overwrite output_config from args.out output_config = merge_configs(output_config, dict(out=args.out)) # Load eval_config from cfg eval_config = cfg.get('eval_config', {}) # Overwrite eval_config from args.eval eval_config = merge_configs(eval_config, dict(metrics=args.eval)) # Add options from args.option eval_config = merge_configs(eval_config, args.options) assert output_config or eval_config, \ ('Please specify at least one operation (save or eval the ' 'results) with the argument "--out" or "--eval"') # set cudnn benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if cfg.test_cfg is None: cfg.test_cfg = dict(average_clips=args.average_clips) else: # You can set average_clips during testing, it will override the # original settting if args.average_clips is not None: cfg.test_cfg.average_clips = args.average_clips # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # build the dataloader dataset = build_dataset(cfg.data.test, dict(test_mode=True)) dataloader_setting = dict(videos_per_gpu=cfg.data.get('videos_per_gpu', 2), workers_per_gpu=cfg.data.get( 'workers_per_gpu', 0), dist=distributed, shuffle=False) dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) data_loader = build_dataloader(dataset, **dataloader_setting) # build the model and load checkpoint model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if output_config: out = output_config['out'] print(f'\nwriting results to {out}') dataset.dump_results(outputs, **output_config) if eval_config: eval_res = dataset.evaluate(outputs, **eval_config) for name, val in eval_res.items(): print(f'{name}: {val:.04f}')
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) if args.update_config is not None: cfg.merge_from_dict(args.update_config) cfg = update_config(cfg, args) cfg = propagate_root_dir(cfg, args.data_dir) # Load output_config from cfg output_config = cfg.get('output_config', {}) # Overwrite output_config from args.out output_config = merge_configs(output_config, dict(out=args.out)) # Load eval_config from cfg eval_config = cfg.get('eval_config', {}) # Overwrite eval_config from args.eval eval_config = merge_configs(eval_config, dict(metrics=args.eval)) # Add options from args.option eval_config = merge_configs(eval_config, args.options) assert output_config or eval_config, \ ('Please specify at least one operation (save or eval the ' 'results) with the argument "--out" or "--eval"') # init distributed env first, since logger depends on the dist info. distributed = args.launcher != 'none' if distributed: init_dist(args.launcher, **cfg.dist_params) # get rank rank, _ = get_dist_info() if cfg.get('seed'): print(f'Set random seed to {cfg.seed}') set_random_seed(cfg.seed) # build the dataset dataset = build_dataset(cfg.data, 'test', dict(test_mode=True)) if cfg.get('classes'): dataset = dataset.filter(cfg.classes) if rank == 0: print(f'Test datasets:\n{str(dataset)}') # build the dataloader data_loader = build_dataloader(dataset, videos_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg, class_sizes=dataset.class_sizes, class_maps=dataset.class_maps) # nncf model wrapper if is_checkpoint_nncf(args.checkpoint) and not cfg.get('nncf_config'): # reading NNCF config from checkpoint nncf_part = get_nncf_config_from_meta(args.checkpoint) for k, v in nncf_part.items(): cfg[k] = v if cfg.get('nncf_config'): check_nncf_is_enabled() if not is_checkpoint_nncf(args.checkpoint): raise RuntimeError( 'Trying to make testing with NNCF compression a model snapshot that was NOT trained with NNCF' ) cfg.load_from = args.checkpoint cfg.resume_from = None if torch.cuda.is_available(): model = model.cuda() _, model = wrap_nncf_model(model, cfg, None, get_fake_input) else: fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) # load model weights load_checkpoint(model, args.checkpoint, map_location='cpu', force_matching=True) if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) if rank == 0: if output_config: out = output_config['out'] print(f'\nwriting results to {out}') dataset.dump_results(outputs, **output_config) if eval_config: eval_res = dataset.evaluate(outputs, **eval_config) print('\nFinal metrics:') for name, val in eval_res.items(): if 'invalid_info' in name: continue if isinstance(val, float): print(f'{name}: {val:.04f}') elif isinstance(val, str): print(f'{name}:\n{val}') else: print(f'{name}: {val}') invalid_info = { name: val for name, val in eval_res.items() if 'invalid_info' in name } if len(invalid_info) > 0: assert args.out_invalid is not None and args.out_invalid != '' if os.path.exists(args.out_invalid): shutil.rmtree(args.out_invalid) if not os.path.exists(args.out_invalid): os.makedirs(args.out_invalid) for name, invalid_record in invalid_info.items(): out_invalid_dir = os.path.join(args.out_invalid, name) item_gen = zip(invalid_record['ids'], invalid_record['conf'], invalid_record['pred']) for invalid_idx, pred_conf, pred_label in item_gen: record_info = dataset.get_info(invalid_idx) gt_label = record_info['label'] if 'filename' in record_info: src_data_path = record_info['filename'] in_record_name, record_extension = os.path.basename( src_data_path).split('.') out_record_name = f'{in_record_name}_gt{gt_label}_pred{pred_label}_conf{pred_conf:.3f}' trg_data_path = os.path.join( out_invalid_dir, f'{out_record_name}.{record_extension}') shutil.copyfile(src_data_path, trg_data_path) else: src_data_path = record_info['frame_dir'] in_record_name = os.path.basename(src_data_path) out_record_name = f'{in_record_name}_gt{gt_label}_pred{pred_label}_conf{pred_conf:.3f}' trg_data_path = os.path.join( out_invalid_dir, out_record_name) os.makedirs(trg_data_path) start_frame_id = record_info[ 'clip_start'] + dataset.start_index end_frame_id = record_info[ 'clip_end'] + dataset.start_index for frame_id in range(start_frame_id, end_frame_id): img_name = f'{frame_id:05}.jpg' shutil.copyfile( os.path.join(src_data_path, img_name), os.path.join(trg_data_path, img_name))
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) # Load output_config from cfg output_config = cfg.get('output_config', {}) # Overwrite output_config from args.out output_config = merge_configs(output_config, dict(out=args.out)) # Load eval_config from cfg eval_config = cfg.get('eval_config', {}) # Overwrite eval_config from args.eval eval_config = merge_configs(eval_config, dict(metrics=args.eval)) # Add options from args.option eval_config = merge_configs(eval_config, args.options) assert output_config or eval_config, \ ('Please specify at least one operation (save or eval the ' 'results) with the argument "--out" or "--eval"') # set cudnn benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if cfg.test_cfg is None: cfg.test_cfg = dict(average_clips=args.average_clips) else: cfg.test_cfg.average_clips = args.average_clips # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader dataset = build_dataset(cfg.data.test, dict(test_mode=True)) data_loader = build_dataloader(dataset, videos_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # map lable from txt to csv file df = pd.read_csv('/data2/phap/datasets/dataset3_test.txt', header=None) df.columns = ['full_name'] df['file_name'] = df['full_name'].apply(lambda x: x.rsplit(' ')[0]) df['true_label'] = df['full_name'].apply(lambda x: x.rsplit(' ')[-1]) # build the model and load checkpoint model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, map_location='cpu') if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) # convert softmax output to one hot pred_arr = [] for i in outputs: pred = np.argmax(i) pred_arr.append(pred) # import output into csv df['pred_label_orig'] = outputs df['pred_label'] = pred_arr # save csv file df.to_csv('dataset3_test_pred_w_rwf_model.csv') print('\nSuccess, csv file saved') rank, _ = get_dist_info() if rank == 0: if output_config: out = output_config['out'] print(f'\nwriting results to {out}') dataset.dump_results(outputs, **output_config) if eval_config: eval_res = dataset.evaluate(outputs, **eval_config) for name, val in eval_res.items(): print(f'{name}: {val:.04f}')
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) if args.update_config is not None: cfg.merge_from_dict(args.update_config) cfg = update_config(cfg, args) cfg = propagate_root_dir(cfg, args.data_dir) # Load output_config from cfg output_config = cfg.get('output_config', {}) # Overwrite output_config from args.out output_config = merge_configs(output_config, dict(out=args.out)) # Load eval_config from cfg eval_config = cfg.get('eval_config', {}) # Overwrite eval_config from args.eval eval_config = merge_configs(eval_config, dict(metrics=args.eval)) # Add options from args.option eval_config = merge_configs(eval_config, args.options) assert output_config or eval_config, \ ('Please specify at least one operation (save or eval the ' 'results) with the argument "--out" or "--eval"') # init distributed env first, since logger depends on the dist info. distributed = args.launcher != 'none' if distributed: init_dist(args.launcher, **cfg.dist_params) # get rank rank, _ = get_dist_info() if cfg.get('seed'): print(f'Set random seed to {cfg.seed}') set_random_seed(cfg.seed) # build the dataset dataset = build_dataset(cfg.data, 'test', dict(test_mode=True)) if cfg.get('classes'): dataset = dataset.filter(cfg.classes) if rank == 0: print(f'Test datasets:\n{str(dataset)}') # build the dataloader data_loader = build_dataloader( dataset, videos_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False ) # build the model and load checkpoint model = build_model( cfg.model, train_cfg=None, test_cfg=cfg.test_cfg, class_sizes=dataset.class_sizes, class_maps=dataset.class_maps ) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) # load model weights load_checkpoint(model, args.checkpoint, map_location='cpu', force_matching=True) if args.fuse_conv_bn: model = fuse_conv_bn(model) if not distributed: model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) if rank == 0: if output_config: out = output_config['out'] print(f'\nwriting results to {out}') dataset.dump_results(outputs, **output_config) if eval_config: eval_res = dataset.evaluate(outputs, **eval_config) print('\nFinal metrics:') for name, val in eval_res.items(): print(f'{name}: {val:.04f}')