def dataset_eer(patches_pl, session, descs_op, dataset, batch_size): ''' Computes the Equal Error Rate (EER) of the descriptors computed with descs_op in the dataset dataset. Args: patches_pl: patch input tf placeholder for descs_op. session: tf session with descs_op variables loaded. descs_op: tf op for describing patches in patches_pl. dataset: dataset for which descriptors will be computed. batch_size: size of batch to describe patches from dataset. Returns: the computed EER. ''' # extracting descriptors for entire dataset descs, labels = _dataset_descriptors(patches_pl, session, descs_op, dataset, batch_size) # get pairwise comparisons examples = zip(descs, labels) pos = [] neg = [] for (desc1, label1), (desc2, label2) in combinations(examples, 2): dist = -np.sum((desc1 - desc2)**2) if label1 == label2: pos.append(dist) else: neg.append(dist) # compute eer eer = utils.eer(pos, neg) return eer
def validation_eer(dataset, compute_descriptors): ''' Computes the validation Equal Error Rate (EER) in dataset using descriptors computed with compute_descriptors and matching them using SIFT's original criterion with distance ratio check threshold of 0.7, following PolyU-HRF's recognition protocol. Args: dataset: dataset for which EER should be computed. compute_descriptors: function that receives image and keypoint detections and computes keypoints at these locations. Returns: computed EER. ''' # describe patches with detections and get # subject and register ids from labels all_descs = [] all_pts = [] subject_ids = set() register_ids = set() id2index_dict = {} index = 0 for img, pts, label in dataset: # add image detections to all detections all_pts.append(pts) # add patches descriptors to all descriptors descs = compute_descriptors(img, pts) all_descs.append(descs) # add ids to all ids subject_ids.add(label[0]) register_ids.add(label[2]) # make 'id2index' correspondence id2index_dict[tuple(label)] = index index += 1 # convert dict into function id2index = lambda x: id2index_dict[tuple(x)] # convert sets into lists subject_ids = list(subject_ids) register_ids = list(register_ids) # match and compute eer pos, neg = polyu_match(all_descs, all_pts, subject_ids, register_ids, id2index, matching.basic, thr=0.7) eer = utils.eer(pos, neg) return eer
def random_roc(): # random data should give 0.5 eer # and random roc curve pos = np.random.random(1000) neg = np.random.random(1000) # compare eer versions print(utils.eer(pos, neg)) # plot curve fars, frrs = utils.roc(pos, neg) plt.plot(fars, frrs) plt.show()
def separable_roc(): # separable data should give low # eer and convex roc curve pos = np.random.normal(1, 0.5, 1000) neg = np.random.normal(0, 0.5, 1000) # compare eer versions print(utils.eer(pos, neg)) # plot curve fars, frrs = utils.roc(pos, neg) plt.plot(fars, frrs) plt.show()
def test(model, DB, criterion): n_files = len(DB) n_frames, n_correct, n_total = 0, 0, 0 mean_cost, mean_accuracy, mean_AUC, mean_EER = 0, 0, 0, 0 temp_AUC = 0 for i in range(n_files): feat_path = DB['filename'][i] label_path = DB['label_path'][i] inputs, targets = test_input_load(feat_path, label_path) device_num = 'cuda:' + args.gpu_id device = torch.device(device_num) if args.cuda: inputs, targets = inputs.to(device), targets.to(device) linear_out, sigmoid_out, _ = model(x=inputs) linear_out, sigmoid_out = linear_out.squeeze(0), sigmoid_out.squeeze(0) linear_out[linear_out != linear_out] = 0 sigmoid_out[sigmoid_out != sigmoid_out] = 0 temp_cost = criterion(linear_out, targets.float()).data.cpu().numpy().item() pred = sigmoid_out >= 0.5 n_correct += (pred.long() == targets.long()).sum().item() n_frames += len(targets) np_targets = targets.data.cpu().numpy() np_sigmoid_out = sigmoid_out.data.cpu().numpy() np_sigmoid_out = np.nan_to_num(np_sigmoid_out) ROC_AUC = roc_auc_score(np_targets, np_sigmoid_out) _, _, temp_eer = eer(np_targets.flatten(), np_sigmoid_out.flatten()) temp_AUC += ROC_AUC mean_cost += temp_cost / n_files mean_EER += temp_eer / n_files mean_accuracy = 100. * n_correct / n_frames mean_AUC = temp_AUC / n_files print(tabulate([['Averaged cost', mean_cost], ['Averaged AUC (%)', mean_AUC*100], ['Averaged ACC (%)', mean_accuracy], ['Averaged EER (%)', mean_EER*100]], tablefmt='rst')) return mean_accuracy, mean_AUC, mean_EER, mean_cost, temp_AUC, n_files
def main(): # parse descriptor and adjust accordingly compute_descriptors = None if FLAGS.descriptors == 'sift': compute_descriptors = utils.sift_descriptors elif FLAGS.descriptors == 'dp': if FLAGS.patch_size is None: raise TypeError('Patch size is required when using dp descriptor') compute_descriptors = lambda img, pts: utils.dp_descriptors( img, pts, FLAGS.patch_size) else: if FLAGS.model_dir_path is None: raise TypeError( 'Trained model path is required when using trained descriptor') if FLAGS.patch_size is None: raise TypeError( 'Patch size is required when using trained descriptor') # create net graph and restore saved model from models import description img_pl, _ = utils.placeholder_inputs() net = description.Net(img_pl, training=False) sess = tf.Session() print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') compute_descriptors = lambda img, pts: utils.trained_descriptors( img, pts, FLAGS.patch_size, sess, img_pl, net.descriptors) # parse matching mode and adjust accordingly if FLAGS.mode == 'basic': match = matching.basic else: match = matching.spatial # make dir path be full appropriate dir path imgs_dir_path = None pts_dir_path = None subject_ids = None register_ids = None session_ids = None if FLAGS.fold == 'DBI-train': # adjust paths for appropriate fold imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Training') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBI', 'Training') # adjust ids for appropriate fold subject_ids = [ 6, 9, 11, 13, 16, 18, 34, 41, 42, 47, 62, 67, 118, 186, 187, 188, 196, 198, 202, 207, 223, 225, 226, 228, 242, 271, 272, 278, 287, 293, 297, 307, 311, 321, 323 ] register_ids = [1, 2, 3] session_ids = [1, 2] else: # adjust paths for appropriate fold if FLAGS.fold == 'DBI-test': imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Test') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBI', 'Test') else: imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBII') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBII') # adjust ids for appropriate fold subject_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168 ] register_ids = [1, 2, 3, 4, 5] session_ids = [1, 2] # load images, points, compute descriptors and make indices correspondences print('Loading images and detections, and computing descriptors...') all_descs, all_pts, id2index = load_dataset(imgs_dir_path, pts_dir_path, subject_ids, session_ids, register_ids, compute_descriptors) print('Done') print('Matching...') pos, neg = polyu_match(all_descs, all_pts, subject_ids, register_ids, id2index, match, thr=FLAGS.thr) print('Done') # print equal error rate print('EER = {}'.format(utils.eer(pos, neg))) # save results to file if FLAGS.results_path is not None: print('Saving results to file {}...'.format(FLAGS.results_path)) # create directory tree, if non-existing dirname = os.path.dirname(FLAGS.results_path) dirname = os.path.abspath(dirname) if not os.path.exists(dirname): os.makedirs(dirname) # save comparisons with open(FLAGS.results_path, 'w') as f: # save same subject scores for score in pos: print(1, score, file=f) # save different subject scores for score in neg: print(0, score, file=f) # save invoking command string with open(FLAGS.results_path + '.cmd', 'w') as f: print(*sys.argv, file=f) print('Done')
import utils if __name__ == '__main__': import sys # read comparisons file for path in sys.argv[1:]: if path.endswith('.txt'): pos = [] neg = [] with open(path, 'r') as f: for line in f: t, score = line.split() score = float(score) if int(t) == 1: pos.append(score) else: neg.append(score) print(path, utils.eer(pos, neg))