def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) rgb_results = args.rgb # "/home/lixun/Desktop/HAR/mmaction/work_dirs/predictions_hmdb51_split1/rgb.pkl" flow_results = args.flow # "/home/lixun/Desktop/HAR/mmaction/work_dirs/predictions_hmdb51_split1/flow.pkl" outputs_rgb = mmcv.load(rgb_results) outputs_flow = mmcv.load(flow_results) outputs = [ outputs_rgb[i] + outputs_flow[i] for i in range(len(outputs_rgb)) ] if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) if args.use_softmax: print("Averaging score over {} clips with softmax".format( outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)".format( outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if cfg.data.test.oversample == 'three_crop': cfg.model.spatial_temporal_module.spatial_size = 8 dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.gpus == 1: model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=True) model = MMDataParallel(model, device_ids=[0]) a = dataset[1] data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs = single_test(model, data_loader) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(recognizers, model_args.pop('type')) outputs = parallel_test(model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) if args.use_softmax: print("Averaging score over {} clips with softmax".format( outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)".format( outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] import datetime currentDT = datetime.datetime.now() with open('data/nturgbd/nturgbd_val_split_generalization_rawframes.txt' ) as f: video_names = [l.strip().split(' ')[0] for l in f.readlines()] with open( osp.join(args.checkpoint + '.result_%s.pkl' % currentDT.strftime("%Y-%m-%d_%H:%M:%S")), 'wb') as f: pickle.dump([results, gt_labels, video_names], f) top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True # pass arg of fcn testing if args.fcn_testing: cfg.model.update({'fcn_testing': True}) cfg.model['cls_head'].update({'fcn_testing': True}) # for regular testing if cfg.data.test.oversample == 'three_crop': cfg.model.spatial_temporal_module.spatial_size = 8 dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.launcher == 'none': raise NotImplementedError("By default, we use distributed testing, so that launcher should be pytorch") else: distributed = True init_dist(args.launcher, **cfg.dist_params) model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=1, dist=distributed, shuffle=False) load_checkpoint(model, args.checkpoint, map_location='cpu') model = MMDistributedDataParallel(model.cuda()) outputs = multi_test(model, data_loader) rank, _ = get_dist_info() if args.out and rank == 0: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) if args.use_softmax: print("Averaging score over {} clips with softmax".format(outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1,5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): global args args = parse_args() rgb_cfg = mmcv.Config.fromfile(args.rgb_config) flow_cfg = mmcv.Config.fromfile(args.flow_config) if args.multi: '''存下所有的精度''' rgb_infoes = [] rgb_file = glob.glob(os.path.join(rgb_cfg.work_dir, 'test_*.pkl')) idx_file = {int(os.path.basename(x).split('_')[1].split('.')[0]):x for x in rgb_file} sort_idx = sorted(idx_file.keys()) for idx in sort_idx: file = idx_file[idx] rgb_infoes.append(pickle.load(open(file, 'rb'))) flow_infoes = [] flow_file = glob.glob(os.path.join(flow_cfg.work_dir, 'test_*.pkl')) idx_file = {int(os.path.basename(x).split('_')[1].split('.')[0]):x for x in flow_file} sort_idx = sorted(idx_file.keys()) for idx in sort_idx: file = idx_file[idx] flow_infoes.append(pickle.load(open(file, 'br'))) # rgb+ flow ratio = 1.5 out_info = [] for rgb_info,flow_info in zip(rgb_infoes,flow_infoes): gt_lable = [] out_pred = [] for k in rgb_info.keys(): gt_lable.append(rgb_info[k][1]) out_pred.append(rgb_info[k][0][0]+ratio*flow_info[k][0][0]) # top1, top5 = top_k_accuracy(out_pred, gt_lable, k=(1, 5)) # print(out_pred) mean_acc = mean_class_accuracy(out_pred,gt_lable) out_info.append(mean_acc) with open(os.path.join(flow_cfg.work_dir,'two_stream.pkl'),'wb') as f: pickle.dump(out_info,f) else: rgb_info = pickle.load(open(os.path.join(rgb_cfg.work_dir,'test.pkl'),'rb')) flow_info = pickle.load(open(os.path.join(flow_cfg.work_dir,'test.pkl'),'rb')) # 验证数据一致 for k in rgb_info.keys(): assert k in flow_info ratio = float(args.ratio) gt_lable = [] out_pred = [] for k in rgb_info.keys(): gt_lable.append(rgb_info[k][1]) if args.way == 'avg': #out_pred.append(softmax(rgb_info[k][0],dim=0)+ ratio* softmax(flow_info[k][0],dim=0)) out_pred.append(rgb_info[k][0]+ ratio* flow_info[k][0]) elif args.way == 'max': rgb = softmax(rgb_info[k][0],dim=0) flow = softmax(flow_info[k][0],dim=0) score = None if rgb.max()>flow.max(): score = rgb else: score = flow out_pred.append(score) else: print('no the way: ',args.way) raise NotImplementedError top1, top5 = top_k_accuracy(out_pred, gt_lable, k=(1, 5)) mean_acc = mean_class_accuracy(out_pred,gt_lable) print("Mean Class Accuracy = {:.04f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.04f}".format(top1 * 100)) print("Top-5 Accuracy = {:.04f}".format(top5 * 100))
from mmaction.core.evaluation.accuracy import (softmax, top_k_accuracy, mean_class_accuracy) import pickle import sys with open(sys.argv[1], 'rb') as f: results, gt_labels = pickle.load(f) top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): global args args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # must use fcn testing cfg.model.update({'fcn_testing': True}) cfg.model['cls_head'].update({'fcn_testing': True}) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if args.testfile != '': cfg.data.test.ann_file = args.testfile dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) assert distributed, "We only support distributed testing" model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=1, dist=distributed, shuffle=False) load_checkpoint(model, args.checkpoint, map_location='cpu') find_unused_parameters = cfg.get('find_unused_parameters', False) model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) outputs = multi_test(model, data_loader) rank, _ = get_dist_info() if args.out and rank == 0: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) if args.use_softmax: print("Averaging score over {} clips with softmax".format(outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): os.environ["CUDA_VISIBLE_DEVICES"] = "1" args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if cfg.data.test.oversample == 'three_crop': cfg.model.spatial_temporal_module.spatial_size = 8 dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.gpus == 1: model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=True) model = MMDataParallel(model, device_ids=[0]) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs = single_test(model, data_loader) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(recognizers, model_args.pop('type')) outputs = parallel_test(model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) if args.use_softmax: print("Averaging score over {} clips with softmax".format( outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)".format( outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if cfg.data.test.oversample == 'three_crop': cfg.model.spatial_temporal_module.spatial_size = 8 dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.gpus == 1: model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=True) model = MMDataParallel(model, device_ids=[1]) params = list(model.parameters()) weight_softmax = np.squeeze(params[-2].data.cpu().numpy( )) # fully conneted layer parameters to numpy already data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs, inputs = single_test(model, data_loader) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(recognizers, model_args.pop('type')) outputs = parallel_test(model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) #print(len(features_blobs)) #print(features_blobs[0].size()) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) num_videos = len(outputs) class_name = 'YoYo' os.mkdir('data/CAM_imgs/' + class_name) for k in range(0, num_videos): os.mkdir('data/CAM_imgs/' + class_name + '/CAMs_{:02d}'.format(k)) idx = get_top_5_index("tools/results.pkl", k) # change the dir of results.pkl to tools/ conv_feat = pickle.load(open( "tools/hook_features/feat_{:02d}.pkl".format(k), 'rb'), encoding='utf-8') conv_feat = conv_feat.cpu().numpy() CAMs = returnCAM( conv_feat, weight_softmax, [idx[0] ]) # generate class activation mapping for the top1 prediction single_input = inputs[k].numpy() writeCAMs(class_name, CAMs, single_input, k) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) if args.use_softmax: print("Averaging score over {} clips with softmax".format( outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)".format( outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
def main(): args = parse_args() assert args.out, ('Please specify the output path for results') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) if cfg.model.get('necks', None) is not None: cfg.model.necks.aux_head_config = None if cfg.data.test.oversample == 'three_crop': cfg.model.spatial_temporal_module.spatial_size = 8 if args.fcn_testing: cfg.model['cls_head'].update({'fcn_testing': True}) cfg.model.update({'fcn_testing': True}) if args.flip: cfg.model.update({'flip': True}) dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.ignore_cache and args.out is not None: if not distributed: if args.gpus == 1: model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=True, map_location='cpu') model = MMDataParallel(model, device_ids=[0]) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs = single_test(model, data_loader) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(recognizers, model_args.pop('type')) outputs = parallel_test(model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) else: data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=True, map_location='cpu') model = MMDistributedDataParallel(model.cuda()) outputs = multi_gpu_test(model, data_loader, args.tmpdir) else: try: if distributed: rank, _ = get_dist_info() if rank == 0: outputs = mmcv.load(args.out) else: outputs = mmcv.load(args.out) except: raise FileNotFoundError rank, _ = get_dist_info() if args.out: if rank == 0: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) results = [] for res in outputs: res_list = [res[i] for i in range(res.shape[0])] results += res_list results = results[:len(gt_labels)] print('results_length', len(results)) top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) non_mean_acc = non_mean_class_accuracy(results, gt_labels) if args.log: f = open(args.log, 'w') f.write(f'Testing ckpt from {args.checkpoint}\n') f.write(f'Testing config from {args.config}\n') f.write("Mean Class Accuracy = {:.04f}\n".format(mean_acc * 100)) f.write("Top-1 Accuracy = {:.04f}\n".format(top1 * 100)) f.write("Top-5 Accuracy = {:.04f}\n".format(top5 * 100)) f.close() else: print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100)) print("Non mean Class Accuracy", non_mean_acc) print('saving non_mean acc')
def main(): global args args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.checkpoint == None: args.checkpoint = os.path.join(cfg.work_dir, 'latest.pth') else: args.checkpoint = os.path.join(cfg.work_dir, 'epoch_%d.pth' % (int(args.checkpoint))) cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.data.test.test_mode = True # pass arg of fcn testing if args.fcn_testing: cfg.model.update({'fcn_testing': True}) cfg.model['cls_head'].update({'fcn_testing': True}) # for regular testing if cfg.data.test.oversample == 'three_crop': cfg.model.spatial_temporal_module.spatial_size = 8 dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.launcher == 'none': raise NotImplementedError( "By default, we use distributed testing, so that launcher should be pytorch" ) else: distributed = True init_dist(args.launcher, **cfg.dist_params) model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=1, dist=distributed, shuffle=False) load_checkpoint(model, args.checkpoint, map_location='cpu') find_unused_parameters = cfg.get('find_unused_parameters', False) model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) outputs = multi_test(model, data_loader, cfg.work_dir) rank, _ = get_dist_info() if args.out and rank == 0: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) data_path = [] gt_labels = [] pre = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) data_path.append(ann['path']) pre.append(outputs[i].mean(axis=0)) save_data = { path: [p, g] for path, p, g in zip(data_path, pre, gt_labels) } with open(os.path.join(cfg.work_dir, 'test.pkl'), 'wb') as f: pickle.dump(save_data, f) if args.use_softmax: print("Averaging score over {} clips with softmax".format( outputs[0].shape[0])) results = [softmax(res, dim=1).mean(axis=0) for res in outputs] else: print("Averaging score over {} clips without softmax (ie, raw)". format(outputs[0].shape[0])) results = [res.mean(axis=0) for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) with open(os.path.join(cfg.work_dir, 'test_result.txt'), 'w') as f: f.writelines('model is :' + args.checkpoint + '\n') f.writelines("Mean Class Accuracy = {:.04f}".format(mean_acc * 100) + '\n') f.writelines("Top-1 Accuracy = {:.04f}".format(top1 * 100) + '\n') f.writelines("Top-5 Accuracy = {:.04f}".format(top5 * 100) + '\n') print("Mean Class Accuracy = {:.04f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.04f}".format(top1 * 100)) print("Top-5 Accuracy = {:.04f}".format(top5 * 100))