def test(model, dataset, test_data, use_gpu): print("===> Test") with torch.no_grad(): model.eval() if config.SAVE_RESULTS: h5_res = h5py.File(os.path.join(config.SAVE_DIR, 'result_test.h5'), 'w') for key_idx, key in enumerate(test_data): seq = dataset[key]['features'][...] seq = torch.from_numpy(seq).unsqueeze(0) if use_gpu: seq.cuda() probs = model(seq) probs = probs.data.cpu().squeeze().numpy() cps = dataset[key]['change_points'][...] num_frames = dataset[key]['n_frames'][...] nfps = dataset[key]['n_frame_per_seg'][...].tolist() positions = dataset[key]['picks'][...] machine_summary = vsum_tool.generate_summary( probs, cps, num_frames, nfps, positions) if config.SAVE_RESULTS: h5_res.create_dataset(key + '/score', data=probs) h5_res.create_dataset(key + '/machine_summary', data=machine_summary) if config.SAVE_RESULTS: h5_res.close()
def evaluate(model, dataset, test_keys, use_gpu): print("===> Evaluation") with torch.no_grad(): model.eval() fms = [] eval_metric = 'avg' if config.METRIC == 'tvsum' else 'max' if config.VERBOSE: table = [["No.", "Video", "F-Score"]] if config.SAVE_RESULTS: h5_res = h5py.File(os.path.join(config.SAVE_DIR, 'result.h5'), 'w') for key_idx, key in enumerate(test_keys): seq = dataset[key]['features'][...] seq = torch.from_numpy(seq).unsqueeze(0) if use_gpu: seq = seq.cuda() probs = model(seq) probs = probs.data.cpu().squeeze().numpy() cps = dataset[key]['change_points'][...] num_frames = dataset[key]['n_frames'][()] nfps = dataset[key]['n_frame_per_seg'][...].tolist() positions = dataset[key]['picks'][...] # user_summary = dataset[key]['user_summary'][...] machine_summary = vsum_tool.generate_summary( probs, cps, num_frames, nfps, positions) user_summary = machine_summary #dataset[key]['user_summary'][...] REMOVE IT fm, _, _ = vsum_tool.evaluate_summary(machine_summary, user_summary, eval_metric) fms.append(fm) if config.VERBOSE: table.append([key_idx + 1, key, "{:.1%}".format(fm)]) if config.SAVE_RESULTS: h5_res.create_dataset(key + '/score', data=probs) h5_res.create_dataset(key + '/machine_summary', data=machine_summary) h5_res.create_dataset(key + '/gtscore', data=dataset[key]['gtscore'][...]) h5_res.create_dataset(key + '/fm', data=fm) if config.VERBOSE: print(tabulate(table)) if config.SAVE_RESULTS: h5_res.close() mean_fm = np.mean(fms) print("Average F-Score {:.1%}".format(mean_fm)) return mean_fm
def evaluate(model, dataset, test_keys, use_gpu): with torch.no_grad(): model.eval() fms = [] table = [["No.", "Video", "F-score"]] if not os.path.isdir(args.save_dir): os.mkdir(args.save_dir) h5_res = h5py.File(os.path.join(args.save_dir, 'result.h5'), 'w') for key_idx, key in enumerate(test_keys): seq = dataset[key]['features'][...] seq = torch.from_numpy(seq).unsqueeze(0) if use_gpu: seq = seq.cuda() probs = model(seq) probs = probs.data.cpu().squeeze().numpy() cps = dataset[key]['change_points'][...] num_frames = dataset[key]['n_frames'][()] nfps = dataset[key]['n_frame_per_seg'][...].tolist() positions = dataset[key]['picks'][...] video_name = dataset[key]['video_name'][()] fps = dataset[key]['fps'][()] sum = 0 for i in range(len(nfps)): sum += nfps[i] machine_summary = vsum_tool.generate_summary( probs, cps, num_frames, nfps, positions) h5_res.create_dataset(key + '/score', data=probs) h5_res.create_dataset(key + '/machine_summary', data=machine_summary) h5_res.create_dataset(key + '/video_name', data=video_name) h5_res.create_dataset(key + '/fps', data=fps) h5_res.close()