def evaluate(sess, embeddings, labels, actual_issame, batch_size, seed, nrof_folds, log_dir, step, summary_writer): start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') embedding_size = embeddings.get_shape()[1] nrof_images = len(actual_issame)*2 nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): t = time.time() emb, lab = sess.run([embeddings, labels]) emb_array[lab] = emb print('Batch %d in %.3f seconds' % (i, time.time()-t)) _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, seed, actual_issame, nrof_folds=nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() # pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0,nrof_images),1) image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2)*facenet.FLIP sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array}) embedding_size = int(embeddings.get_shape()[1]) assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images,)) for i in range(nrof_batches): feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size} emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:,:embedding_size] = emb_array[0::2,:] embeddings[:,embedding_size:] = emb_array[1::2,:] else: embeddings = emb_array assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' _, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val)) stat['lfw_accuracy'][epoch-1] = np.mean(accuracy) stat['lfw_valrate'][epoch-1] = val
def lfw_test(nbatch): print('testing lfw..') embeddings_list = [] for i in xrange( len(lfw_data_list) ): lfw_data = lfw_data_list[i] embeddings = None ba = 0 while ba<lfw_data.shape[0]: bb = min(ba+args.batch_size, lfw_data.shape[0]) _data = nd.slice_axis(lfw_data, axis=0, begin=ba, end=bb) _label = nd.ones( (bb-ba,) ) db = mx.io.DataBatch(data=(_data,), label=(_label,)) model.forward(db, is_train=False) net_out = model.get_outputs() _embeddings = net_out[0].asnumpy() if embeddings is None: embeddings = np.zeros( (lfw_data.shape[0], _embeddings.shape[1]) ) embeddings[ba:bb,:] = _embeddings ba = bb embeddings_list.append(embeddings) acc_list = [] embeddings = embeddings_list[0] _, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10) acc_list.append(np.mean(accuracy)) print('[%d]Accuracy: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) embeddings = np.concatenate(embeddings_list, axis=1) embeddings = sklearn.preprocessing.normalize(embeddings) print(embeddings.shape) _, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10) acc_list.append(np.mean(accuracy)) print('[%d]Accuracy-Flip: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) pca = PCA(n_components=128) embeddings = pca.fit_transform(embeddings) embeddings = sklearn.preprocessing.normalize(embeddings) print(embeddings.shape) _, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10) acc_list.append(np.mean(accuracy)) print('[%d]Accuracy-PCA: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) return max(*acc_list)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder:images, phase_train_placeholder:False } emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0,nrof_images),1) image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2)*facenet.FLIP sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array}) embedding_size = int(embeddings.get_shape()[1]) assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images,)) for i in range(nrof_batches): feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size} emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:,:embedding_size] = emb_array[0::2,:] embeddings[:,embedding_size:] = emb_array[1::2,:] else: embeddings = emb_array assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer,best_accuracy,saver_save,model_dir,subdir): start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels labels_array = np.expand_dims(np.arange(0,len(image_paths)),1) image_paths_array = np.expand_dims(np.array(image_paths),1) sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array}) embedding_size = embeddings.get_shape()[1] nrof_images = len(actual_issame)*2 assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images,)) for _ in range(nrof_batches): feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size} emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds) if np.mean(accuracy) > best_accuracy: save_variables_and_metagraph(sess, saver_save, summary_writer, model_dir, subdir, step) best_accuracy = np.mean(accuracy) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val)) return best_accuracy
def evaluate(sess, image_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder, batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, batch_size, seed, nrof_folds, log_dir, step, summary_writer, embedding_size): start_time = time.time() # Run forward pass to calculate embeddings print('Running forward pass on LFW images: ', end='') nrof_images = len(actual_issame)*2 assert(len(image_paths)==nrof_images) labels_array = np.reshape(np.arange(nrof_images),(-1,3)) image_paths_array = np.reshape(np.expand_dims(np.array(image_paths),1), (-1,3)) sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array}) emb_array = np.zeros((nrof_images, embedding_size)) nrof_batches = int(np.ceil(nrof_images / batch_size)) label_check_array = np.zeros((nrof_images,)) for i in xrange(nrof_batches): batch_size = min(nrof_images-i*batch_size, batch_size) emb, lab = sess.run([embeddings, labels_batch], feed_dict={batch_size_placeholder: batch_size, learning_rate_placeholder: 0.0, phase_train_placeholder: False}) emb_array[lab,:] = emb label_check_array[lab] = 1 print('%.3f' % (time.time()-start_time)) assert(np.all(label_check_array==1)) _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, seed, actual_issame, nrof_folds=nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model facenet.load_model(args.model) # TODO: replace near 0 parameters to 0 # trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) # print("trainable_vars", len(trainable_vars)) # zero_threshold = 1e-2 # n_var_elements = 0 # n_non_zero = 0 # n_non_zero_after = 0 # assign_ops = [] # for var in trainable_vars: # matrix = var.eval(sess) # # if var.name.endswith("weights:0"): # # print(matrix) # n_var_elements += np.size(matrix) # n_non_zero += np.count_nonzero(matrix) # # matrix[np.abs(matrix)<=zero_threshold] = 0 # # n_non_zero_after += np.count_nonzero(matrix) # # assign_ops.append(var.assign(matrix)) # # print("non_zero: ",n_non_zero,n_var_elements,n_non_zero/n_var_elements) # print("non_zero after: ",n_non_zero_after,n_var_components,n_non_zero_after/n_var_components) # sess.run(assign_ops) # Get input and output tensors graph = tf.get_default_graph() images_placeholder = graph.get_tensor_by_name("input:0") embeddings = graph.get_tensor_by_name("embeddings:0") phase_train_placeholder = graph.get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') start_time = time.time() batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) duration = time.time() - start_time print('Forward pass duration in %.3f seconds' % duration) tpr, fpr, recall, precision, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Precision: %1.3f+-%1.3f' % (np.mean(precision), np.std(precision))) print('Recall: %1.3f+-%1.3f' % (np.mean(recall), np.std(recall))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len( actual_issame) * 2 # nrof_pairs * nrof_images_per_pair nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0, nrof_images), 1) image_paths_array = np.expand_dims( np.repeat(np.array(image_paths), nrof_flips), 1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like( labels_array) * facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2) * facenet.FLIP sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array }) embedding_size = int(embeddings.get_shape()[1]) assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) for i in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((nrof_embeddings, embedding_size * nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:, :embedding_size] = emb_array[0::2, :] embeddings[:, embedding_size:] = emb_array[1::2, :] else: embeddings = emb_array assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def test(args): with tf.Graph().as_default(): with tf.Session() as sess: #saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3) saver = tf.train.Saver(tf.global_variables()) saver.restore(sess, args.model) # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.test_list_dir)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.test_data_dir), pairs, args.test_list_dir) image_size = args.image_size print('image size', image_size) images_placeholder = tf.placeholder(tf.float32, shape=(None, args.image_height, args.image_width, args.image_width), name='image') phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train') #network definition. prelogits1 = network.infer(images_placeholder, args.embedding_size) if args.fc_bn: print('do batch norm after network') prelogits = slim.batch_norm( prelogits1, is_training=phase_train_placeholder, epsilon=1e-5, scale=True, scope='softmax_bn') #embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.identity(prelogits) embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on testing images') batch_size = args.test_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size print('handing {}/{}'.format(start_index, nrof_images)) end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = utils.load_data(paths_batch, False, False, args.image_height,args.image_width,False,\ (args.image_height,args.image_width)) feed_dict = { images_placeholder: images, phase_train_placeholder: False } feats, a = sess.run([embeddings, prelogits], feed_dict=feed_dict) # do not know for sure whether we should turn this on? it depends. feats = utils.l2_normalize(feats) emb_array[start_index:end_index, :] = feats tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, 0.001, nrof_folds=args.test_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) # eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) #fill_value="extrapolate" print('Equal Error Rate (EER): %1.3f' % eer) tpr1, fpr1, accuracy1, val1, val_std1, far1 = lfw.evaluate( emb_array, actual_issame, 0.0001, nrof_folds=args.test_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy1), np.std(accuracy1))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val1, val_std1, far1)) auc = metrics.auc(fpr1, tpr1) print('Area Under Curve (AUC): %1.3f' % auc) # eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr1, tpr1) (x), 0., 1.) #fill_value="extrapolate" print('Equal Error Rate (EER): %1.3f' % eer)
def test_cosface(): with open('feature.txt', 'w') as writer: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.join(data_dir, 'data/pairs.txt')) # pairs = pairs[0:100] # pair_len = len(pairs) # pairs = np.concatenate((pairs[0:400], pairs[pair_len-401:pair_len-1])) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.join(data_dir, 'dataset/lfw-112x96'), pairs, 'jpg') embedding_size = cosface_wrapper.embedding_size # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = 200 nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size print('handing {}/{}'.format(start_index, nrof_images)) end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] # images = utils.load_data(paths_batch, False, False, cosface_wrapper.image_height, # cosface_wrapper.image_width, False, # (cosface_wrapper.image_height, # cosface_wrapper.image_width)) images = np.zeros((len(paths_batch), cosface_wrapper.image_height, cosface_wrapper.image_width, 3)) for i in range(len(paths_batch)): img = cv2.imread(paths_batch[i]) images[i, :, :, :] = cosface.data_preprocess(img) start_time = time.time() feats = cosface.infer(images) end_time = time.time() cost_time = end_time - start_time print('cost time:{}, speed:{} /s'.format( cost_time, batch_size * 1.0 / cost_time)) emb_array[start_index:end_index, :] = feats for write_i in range(len(paths_batch)): writer.write('{}\n'.format( json.dumps({ 'path': paths_batch[write_i], 'feat': feats[write_i].tolist() }))) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=10) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def custom_facenet_evaluation(args): tf.reset_default_graph() # Read the directory containing images pairs = read_pairs(args.insightface_pair) image_list, issame_list = get_paths_with_pairs(args.facenet_dataset_dir, pairs) # Evaluate custom dataset with facenet pre-trained model print("Getting embeddings with facenet pre-trained model") with tf.Graph().as_default(): # Getting batched images by TF dataset # image_list = path_list tf_dataset = facenet.tf_gen_dataset( image_list=image_list, label_list=None, nrof_preprocess_threads=args.nrof_preprocess_threads, image_size=args.facenet_image_size, method='cache_slices', BATCH_SIZE=args.batch_size, repeat_count=1, to_float32=True, shuffle=False) tf_dataset_iterator = tf_dataset.make_initializable_iterator() tf_dataset_next_element = tf_dataset_iterator.get_next() with tf.Session() as sess: sess.run(tf_dataset_iterator.initializer) phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train') image_batch = tf.placeholder(name='img_inputs', shape=[ None, args.facenet_image_size, args.facenet_image_size, 3 ], dtype=tf.float32) label_batch = tf.placeholder(name='img_labels', shape=[ None, ], dtype=tf.int32) # Load the model input_map = { 'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder } facenet.load_model(args.facenet_model, input_map=input_map) # Get output tensor embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") batch_size = args.batch_size input_placeholder = image_batch print('getting embeddings..') total_time = 0 batch_number = 0 embeddings_array = None embeddings_array_flip = None while True: try: images = sess.run(tf_dataset_next_element) data_tmp = images.copy() # fix issues #4 for i in range(data_tmp.shape[0]): data_tmp[i, ...] -= 127.5 data_tmp[i, ...] *= 0.0078125 data_tmp[i, ...] = cv2.cvtColor(data_tmp[i, ...], cv2.COLOR_RGB2BGR) # Getting flip to left_right batched images by TF dataset data_tmp_flip = images.copy() # fix issues #4 for i in range(data_tmp_flip.shape[0]): data_tmp_flip[i, ...] = np.fliplr(data_tmp_flip[i, ...]) data_tmp_flip[i, ...] -= 127.5 data_tmp_flip[i, ...] *= 0.0078125 data_tmp_flip[i, ...] = cv2.cvtColor( data_tmp_flip[i, ...], cv2.COLOR_RGB2BGR) start_time = time.time() mr_feed_dict = { input_placeholder: data_tmp, phase_train_placeholder: False } mr_feed_dict_flip = { input_placeholder: data_tmp_flip, phase_train_placeholder: False } _embeddings = sess.run(embeddings, mr_feed_dict) _embeddings_flip = sess.run(embeddings, mr_feed_dict_flip) if embeddings_array is None: embeddings_array = np.zeros( (len(image_list), _embeddings.shape[1])) embeddings_array_flip = np.zeros( (len(image_list), _embeddings_flip.shape[1])) try: embeddings_array[batch_number * batch_size:min( (batch_number + 1) * batch_size, len(image_list)), ...] = _embeddings embeddings_array_flip[batch_number * batch_size:min( (batch_number + 1) * batch_size, len(image_list)), ...] = _embeddings_flip # print('try: ', batch_number * batch_size, min((batch_number + 1) * batch_size, len(image_list)), ...) except ValueError: print( 'batch_number*batch_size value is %d min((batch_number+1)*batch_size, len(image_list)) %d,' ' batch_size %d, data.shape[0] %d' % (batch_number * batch_size, min((batch_number + 1) * batch_size, len(image_list)), batch_size, images.shape[0])) print( 'except: ', batch_number * batch_size, min((batch_number + 1) * batch_size, images.shape[0]), ...) duration = time.time() - start_time batch_number += 1 total_time += duration except tf.errors.OutOfRangeError: print( 'tf.errors.OutOfRangeError, Reinitialize tf_dataset_iterator' ) sess.run(tf_dataset_iterator.initializer) break print(f"total_time: {total_time}") _xnorm = 0.0 _xnorm_cnt = 0 for embed in [embeddings_array, embeddings_array_flip]: for i in range(embed.shape[0]): _em = embed[i] _norm = np.linalg.norm(_em) # print(_em.shape, _norm) _xnorm += _norm _xnorm_cnt += 1 _xnorm /= _xnorm_cnt final_embeddings_output = embeddings_array + embeddings_array_flip final_embeddings_output = sklearn.preprocessing.normalize( final_embeddings_output) print(final_embeddings_output.shape) tpr, fpr, accuracy, val, val_std, far = verification.evaluate( final_embeddings_output, issame_list, nrof_folds=10) acc2, std2 = np.mean(accuracy), np.std(accuracy) auc = metrics.auc(fpr, tpr) print('XNorm: %f' % (_xnorm)) print('Accuracy-Flip: %1.5f+-%1.5f' % (acc2, std2)) print('TPR: ', np.mean(tpr), 'FPR: ', np.mean(fpr)) print('Area Under Curve (AUC): %1.3f' % auc) tpr_lfw, fpr_lfw, accuracy_lfw, val_lfw, val_std_lfw, far_lfw = lfw.evaluate( final_embeddings_output, issame_list, nrof_folds=10, distance_metric=0, subtract_mean=False) print('accuracy_lfw: %2.5f+-%2.5f' % (np.mean(accuracy_lfw), np.std(accuracy_lfw))) print( f"val_lfw: {val_lfw}, val_std_lfw: {val_std_lfw}, far_lfw: {far_lfw}") print('val_lfw rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val_lfw, val_std_lfw, far_lfw)) auc_lfw = metrics.auc(fpr_lfw, tpr_lfw) print('TPR_LFW:', np.mean(tpr_lfw), 'FPR_LFW: ', np.mean(fpr_lfw)) print('Area Under Curve LFW (AUC): %1.3f' % auc_lfw) return acc2, std2, _xnorm, [embeddings_array, embeddings_array_flip]
def main(args): with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) print(len(pairs)) # 一共有6000对 # 读入后如[['Abel_Pacheco','1','4']] # Get the paths for the corresponding images # 获取文件路径和是否匹配关系对 paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs) # print(len(actual_issame))#len(actual_issame) = 6000 # args.lfw_file_ext表示图像的格式,默认png格式 # actual_issame为标志位,如果是一对actual_issame=true 否则等于false # Load the model facenet.load_model(args.model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") # image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size embedding_size = embeddings.get_shape()[1] # 特征向量维数128 print(embedding_size) # Run forward pass to calculate embeddings # 3. 使用前向传播验证 print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size # default=100 nrof_images = len(paths) # 测试的人脸比对次数6000,len(paths)=12000 # print(paths)表示paths为读取图片的路径 # print(nrof_images) nrof_batches = int( math.ceil(1.0 * nrof_images / batch_size)) ## 总共批次数,ceil() 函数返回数字的上入整数。结果为120 emb_array = np.zeros( (nrof_images, embedding_size)) # 声明固定大小的空矩阵12000*128 for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) # 加载图片做一些变换crop和flip feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) # 特征向量push print(('WanCheng: %d' % (i + 1))) # 十折交叉验证(10-fold cross validation),精度测试方法。数据集分成10份, # 轮流将其中9份做训练集,1份做测试保,10次结果均值作算法精度估计 tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) # nrof_folds =10 print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] image_size = image_size.value # pdb.set_trace() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) print('nrof_batches :{}'.format(nrof_batches)) all_time = 0 for i in range(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] # pdb.set_trace() images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = {images_placeholder:images} start = time() emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) end = time() all_time += (end - start) print('index: {} time: {}'.format(i, (end-start))) # pdb.set_trace() print('all_time :', all_time) msgpack_numpy.dump((paths, emb_array, actual_issame), open('lfw_feature.p', 'wb')) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, args.seed, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer) def parse_arguments(argv): parser = argparse.ArgumentParser()
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] image_size = image_size.value # pdb.set_trace() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) print('nrof_batches :{}'.format(nrof_batches)) all_time = 0 for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] # pdb.set_trace() images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = {images_placeholder: images} start = time() emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) end = time() all_time += (end - start) print('index: {} time: {}'.format(i, (end - start))) # pdb.set_trace() print('all_time :', all_time) msgpack_numpy.dump((paths, emb_array, actual_issame), open('lfw_feature.p', 'wb')) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, args.seed, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): print('Runnning forward pass on LFW images') start_time = time.time() num_embeddings = len(actual_issame) * 2 num_flips = 2 if use_flipped_images else 1 num_images = num_embeddings * num_flips labels_array = np.expand_dims(np.arange(num_images, ), 1) image_path_array = np.expand_dims(np.repeat(np.array(image_paths), num_flips), 1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like(labels_array) * facenet.FIXED_STANDARDIZATION if use_flipped_images: control_array += (labels_array % 2) * facenet.FLIP sess.run(enqueue_op, {image_paths_placeholder: image_path_array, labels_placeholder: labels_array, control_placeholder: control_array}) assert num_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' num_batches = num_images // batch_size embedding_size = int(embeddings.get_shape()[1]) embedding_array = np.zeros((num_images, embedding_size)) pred_label_array = np.zeros((num_images, )) for i in range(num_batches): feed_dict = {phase_train_placeholder: False, batch_size_placeholder: batch_size} _embedding, _pred_label = sess.run([embeddings, labels], feed_dict=feed_dict) pred_label_array[_pred_label] = _pred_label embedding_array[_pred_label, :] = _embedding if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((num_embeddings, embedding_size * num_flips)) if use_flipped_images: embeddings[:, :embedding_size] = embedding_array[0::2, :] embeddings[:, embedding_size:] = embedding_array[1::2, :] else: embeddings = embedding_array assert np.array_equal(lab_array, np.arange(nrof_images))==True, \ 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' _, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val)) stat['lfw_accuracy'][epoch-1] = np.mean(accuracy) stat['lfw_valrate'][epoch-1] = val
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels,入队一批图片和标签 nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair,全部原始图片的数量=pairs数量×一个paris图片数量 nrof_flips = 2 if use_flipped_images else 1 #是否采用翻转图片 nrof_images = nrof_embeddings * nrof_flips #输入到模型的图片数量,包括翻转图片(一张原图片对应一个翻转图片) labels_array = np.expand_dims(np.arange(0,nrof_images),1) #[0,1,...,5999],变为列向量 # 图片路径列向量,repeat是为了放置翻转图片path,注意,repeat的行为和concatenate的行为不同,前者重复元素挨着,后者是成块拼接,这个决定了模型输出向量的拼接方式 image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1) control_array = np.zeros_like(labels_array, np.int32) #控制每个图片预处理的向量,和labels_array同形,列向量 if use_fixed_image_standardization: #采用均值127.5的标准化处理过程到(0,1),而不是tf自带的x-mean/std的标准化 control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2)*facenet.FLIP #labels_array % 2 :[0,1,0,1,...],每间隔2张图片,有一个需要翻转 #输入队列(图片path,图片标签,图片预处理方式) sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array}) embedding_size = int(embeddings.get_shape()[1]) #特征向量维度 assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size #总批数 = 样本数//每批样本数 emb_array = np.zeros((nrof_images, embedding_size)) #全部样本的嵌入向量 lab_array = np.zeros((nrof_images,)) #全部样本对应的label #分成batch进行推断,mini_batch for i in range(nrof_batches): feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size} emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: #每10个batch就显示进度. print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips)) #原始图片和翻转图片的嵌入向量拼接成最终特征向量 if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:,:embedding_size] = emb_array[0::2,:] #注意:np.repeate(image_paths,2)方式,导致原图片和翻转图片是相邻的[a,a,b,b],不是[a,b...a,b] embeddings[:,embedding_size:] = emb_array[1::2,:] #embeddings的宽度是emb_array宽度的2倍,emb_array[1::2],从1开始每间隔2取样 else: embeddings = emb_array assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate(TAR): %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) #from sklearn import metrics,给定tpr,fpt计算auc,绘制roc print('Area Under Curve (AUC): %1.3f' % auc) # brentq是scipy.optimize中混合求根方法,先用一种方法求解,如果速度不快,则寻找另外更好的方法,加速求解 eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) #等效错误率,从(0,1)到(1,0)的直线和ROC曲线的交点,其横坐标就是EER print('Equal Error Rate (EER): %1.3f' % eer) #EER也就是FPR=FNR的值,由于FNR=1-TPR,可以画一条从(0,1)到(1,0)的直线,找到交点 import matplotlib.pyplot as plt plt.plot(fpr,tpr,'-') plt.title('ROC') plt.show()
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer): ''' :param sess: :param enqueue_op: :param image_paths_placeholder: :param labels_placeholder: :param phase_train_placeholder: :param batch_size_placeholder: :param embeddings: :param labels: :param image_paths: :param actual_issame: :param batch_size: :param nrof_folds: :param log_dir: :param step: :param summary_writer: :return: ''' start_time = time.time() print('Running forward pass on LFW images') # Enqueue one epoch of image paths and labels labels_array = np.expand_dims(np.arange(0, len(image_paths), 1)) image_paths_array = np.expand_dims(np.array(image_paths), 1) sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array }) embedding_size = embeddings.get_shape()[1] nrof_images = len(actual_issame) * 2 assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) for _ in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly casused by train examples left in the input popline' _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time #Add validation loss and accuracy to summary summary = tf.Summary() summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f: f.writer('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy).val))
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors #images_placeholder = tf.get_default_graph().get_tensor_by_name("image_batch:0") images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") #keep_probability_placeholder = tf.get_default_graph().get_tensor_by_name('keep_probability:0') #weight_decay_placeholder = tf.get_default_graph().get_tensor_by_name('weight_decay:0') phase_train_placeholder = tf.get_default_graph().get_tensor_by_name('phase_train:0') image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) #feed_dict = {phase_train_placeholder: False, images_placeholder:images, keep_probability_placeholder:1.0, weight_decay_placeholder:0.0} feed_dict = {phase_train_placeholder: False, images_placeholder: images} emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) print('Evaluate_model: %s' % args.evaluate_mode) if args.evaluate_mode == 'Euclidian': tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) if args.evaluate_mode == 'similarity': #pca = PCA(whiten=True) pca = PCA(n_components=128) pca.fit(emb_array) emb_array_pca = pca.transform(emb_array) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate_cosine(emb_array_pca, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) facenet.plot_roc(fpr, tpr, 'NN4')
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) #pdb.set_trace() # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model #facenet.load_model(args.model) # Get input and output tensors #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size print('image size', image_size) #images_placeholder = tf.placeholder(tf.float32,shape=(None,image_size,image_size,3),name='image') images_placeholder = tf.placeholder(tf.float32, shape=(None, args.image_height, args.image_width, 3), name='image') phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train') #with slim.arg_scope(resnet_v1.resnet_arg_scope(False)): if args.network_type == 'resnet50': with slim.arg_scope(resnet_v2.resnet_arg_scope(False)): prelogits, end_points = resnet_v2.resnet_v2_50( images_placeholder, is_training=phase_train_placeholder, num_classes=256, output_stride=16) #prelogits, end_points = resnet_v2.resnet_v2_50(images_placeholder,is_training=phase_train_placeholder,num_classes=256,output_stride=8) #prelogits, end_points = resnet_v2_modify.resnet_v2_50(images_placeholder,is_training=phase_train_placeholder,num_classes=256) #prelogits = slim.batch_norm(prelogits, is_training=phase_train_placeholder,epsilon=1e-5, scale=True,scope='softmax_bn') prelogits = tf.squeeze(prelogits, [1, 2], name='SpatialSqueeze') elif args.network_type == 'sphere_network': prelogits = network.infer(images_placeholder) if args.fc_bn: prelogits = slim.batch_norm( prelogits, is_training=phase_train_placeholder, epsilon=1e-5, scale=True, scope='softmax_bn') #embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.identity(prelogits) #saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3) saver = tf.train.Saver(tf.global_variables(), max_to_keep=3) saver.restore(sess, args.model) if args.save_model: saver.save(sess, './tmp_saved_model', global_step=1) return 0 embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) if args.do_flip: embedding_size *= 2 emb_array = np.zeros((nrof_images, embedding_size)) else: emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size print('handing {}/{}'.format(start_index, nrof_images)) end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] #images = facenet.load_data(paths_batch, False, False, image_size,True,image_size) #images = facenet.load_data2(paths_batch, False, False, args.image_height,args.image_width,True,) images = utils.load_data(paths_batch, False, True, args.image_height, args.image_width, True, (args.image_height, args.image_width)) feed_dict = { images_placeholder: images, phase_train_placeholder: False } feats = sess.run(embeddings, feed_dict=feed_dict) if args.do_flip: images_flip = utils.load_data( paths_batch, False, True, args.image_height, args.image_width, True, (args.image_height, args.image_width)) feed_dict = { images_placeholder: images_flip, phase_train_placeholder: False } feats_flip = sess.run(embeddings, feed_dict=feed_dict) feats = np.concatenate((feats, feats_flip), axis=1) #feats = (feats+feats_flip)/2 #images = facenet.load_data(paths_batch, False, False, 160,True,182) #images = facenet.load_data(paths_batch, False, False, image_size,src_size=256) #feed_dict = { images_placeholder:images, phase_train_placeholder:True} #pdb.set_trace() #feats = facenet.prewhiten(feats) feats = utils.l2_normalize(feats) emb_array[start_index:end_index, :] = feats #pdb.set_trace() tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, evaluate_mode): start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels labels_array = np.expand_dims(np.arange(0, len(image_paths)), 1) image_paths_array = np.expand_dims(np.array(image_paths), 1) sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array }) embedding_size = embeddings.get_shape()[1] nrof_images = len(actual_issame) * 2 nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) for _ in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' if evaluate_mode == 'Euclidian': _, _, accuracy, val, val_std, far, fp_idx, fn_idx, best_threshold, val_threshold = lfw.evaluate( emb_array, actual_issame, nrof_folds=nrof_folds) if evaluate_mode == 'similarity': pca = PCA(n_components=128) pca.fit(emb_array) emb_array_pca = pca.transform(emb_array) _, _, accuracy, val, val_std, far, fp_idx, fn_idx, best_threshold, val_threshold = lfw.evaluate_cosine( emb_array_pca, actual_issame, nrof_folds=nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val)) acc = np.mean(accuracy) return acc, val, far
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) np.random.shuffle(pairs) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) #actual_issame= actual_issame[0:args.lfw_batch_size] # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors #images_placeholder = tf.get_default_graph().get_tensor_by_name("image_batch:0") images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") #keep_probability_placeholder = tf.get_default_graph().get_tensor_by_name('keep_probability:0') #weight_decay_placeholder = tf.get_default_graph().get_tensor_by_name('weight_decay:0') phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name('phase_train:0') image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) #emb_array = np.zeros((2*batch_size, embedding_size)) for i in range(nrof_batches): print("Test batch:%d/%d\n" % (i, nrof_batches)) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) #feed_dict = {phase_train_placeholder: False, images_placeholder:images, keep_probability_placeholder:1.0, weight_decay_placeholder:0.0} feed_dict = { phase_train_placeholder: False, images_placeholder: images } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) print('Evaluate_model: %s' % args.evaluate_mode) if args.evaluate_mode == 'Euclidian': tpr, fpr, accuracy, val, val_std, far, fp_idx, fn_idx, best_threshold, val_threshold = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) if args.evaluate_mode == 'similarity': #pca = PCA(whiten=True) pca = PCA(n_components=128) pca.fit(emb_array) emb_array_pca = pca.transform(emb_array) tpr, fpr, accuracy, val, val_std, far, fp_idx, fn_idx, best_threshold, val_threshold = lfw.evaluate_cosine( emb_array_pca, actual_issame, nrof_folds=args.lfw_nrof_folds) ################### edit by mzh 12012017: select the false positive/negative images #################### nrof_test_paths = nrof_batches * batch_size nrof_test_tp_pairs = sum(actual_issame[0:int(nrof_test_paths / 2)]) nrof_test_tn_pairs = len( actual_issame[0:int(nrof_test_paths / 2)]) - nrof_test_tp_pairs paths_pairs = [ paths[0:nrof_test_paths:2], paths[1:nrof_test_paths:2] ] # paths_pairs shape: [2, number of pairs], each column is corresponding to a pair of images paths_pairs_array = np.array(paths_pairs) fp_images_paths = paths_pairs_array[:, fp_idx] fn_images_paths = paths_pairs_array[:, fn_idx] _, nrof_fp_pairs = fp_images_paths.shape _, nrof_fn_pairs = fn_images_paths.shape ################### edit by mzh 12012017: select the false positive/negative images #################### print('Accuracy: %1.3f+-%1.3f @ Best threshold %f\n' % (np.mean(accuracy), np.std(accuracy), best_threshold)) print( 'Validation rate: %2.5f+-%2.5f @ FAR=%2.5f @val_threshold %f\n' % (val, val_std, far, val_threshold)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f\n' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f\n' % eer) subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S') log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir) print('log_dir: %s\n' % log_dir) if not os.path.isdir( log_dir): # Create the log directory if it doesn't exist os.makedirs(log_dir) with open(os.path.join(log_dir, 'validation_on_dataset.txt'), 'at') as f: print('Saving the evaluation results...\n') f.write('arguments: %s\n--------------------\n' % ' '.join(sys.argv)) f.write('Accuracy: %1.3f+-%1.3f\n' % (np.mean(accuracy), np.std(accuracy))) f.write('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f\n' % (val, val_std, far)) f.write('Best threshold: %2.5f \n' % (best_threshold)) f.write('Validation threshold: %2.5f \n' % (val_threshold)) f.write('Area Under Curve (AUC): %1.3f\n' % auc) f.write('Equal Error Rate (EER): %1.3f\n' % eer) print('Saving the False positive pairs images ...\n ') f.write( 'False positive pairs: %d / %d ---------------------------------------------\n' % (nrof_fp_pairs, nrof_test_tp_pairs)) for i in range(nrof_fp_pairs): f.write('%d %s\n' % (i, fp_images_paths[:, i])) print('Saving the False negative pairs images ...\n ') f.write( 'False negative pairs: %d / %d ---------------------------------------------\n' % (nrof_fn_pairs, nrof_test_tn_pairs)) for i in range(nrof_fn_pairs): f.write('%d %s\n' % (i, fn_images_paths[:, i])) ################### edit by mzh 12012017: write the false positive/negative images to the file #################### false_images_list = os.path.join(log_dir, 'validation_on_dataset.txt') save_dir = log_dir save_false_images.save_false_images(false_images_list, save_dir) with open(os.path.join(log_dir, 'validation_on_dataset.txt'), 'at') as f: print('Saving the tpr, fpr of ROC ...\n ') f.write( 'ROC: tpr, fpr --------------------------------------------------------------\n' ) for tp, fp in zip(tpr, fpr): f.write('tpr/fpr: %f/%f\n' % (tp, fp)) facenet.plot_roc(fpr, tpr, 'ROC')
weight = weights[ii] XX *= weight if F is None: F = XX else: if concat: F = np.concatenate((F,XX), axis=1) else: F += XX ii+=1 #if concat: # F = np.concatenate((F,X2), axis=1) #else: # F += X2 print(F.shape) npca = 0 if concat and pca: #F = sklearn.preprocessing.normalize(F) npca = 180 #pca = PCA(n_components=512) #F = pca.fit_transform(F) for npca in xrange(512,513,1): _, _, accuracy, val, val_std, far = lfw.evaluate(F, issame_list, nrof_folds=10, pca=npca) print('[%d]Accuracy: %1.5f+-%1.5f' % (npca, np.mean(accuracy), np.std(accuracy))) else: F = sklearn.preprocessing.normalize(F) _, _, accuracy, val, val_std, far = lfw.evaluate(F, issame_list, nrof_folds=10, pca=npca) print('[%d]Accuracy: %1.5f+-%1.5f' % (0, np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
def custom_insightface_evaluation(args): tf.reset_default_graph() # Read the directory containing images pairs = read_pairs(args.insightface_pair) image_list, issame_list = get_paths_with_pairs( args.insightface_dataset_dir, pairs) # Evaluate custom dataset with facenet pre-trained model print("Getting embeddings with facenet pre-trained model") # Getting batched images by TF dataset tf_dataset = facenet.tf_gen_dataset( image_list=image_list, label_list=None, nrof_preprocess_threads=args.nrof_preprocess_threads, image_size=args.insightface_dataset_dir, method='cache_slices', BATCH_SIZE=args.batch_size, repeat_count=1, to_float32=True, shuffle=False) # tf_dataset = facenet.tf_gen_dataset(image_list, label_list, args.nrof_preprocess_threads, args.facenet_image_size, method='cache_slices', # BATCH_SIZE=args.batch_size, repeat_count=1, shuffle=False) tf_dataset_iterator = tf_dataset.make_initializable_iterator() tf_dataset_next_element = tf_dataset_iterator.get_next() images = tf.placeholder(name='img_inputs', shape=[ None, args.insightface_image_size, args.insightface_image_size, 3 ], dtype=tf.float32) labels = tf.placeholder(name='img_labels', shape=[ None, ], dtype=tf.int64) dropout_rate = tf.placeholder(name='dropout_rate', dtype=tf.float32) w_init_method = tf.contrib.layers.xavier_initializer(uniform=False) net = L_Resnet_E_IR_fix_issue9.get_resnet(images, args.net_depth, type='ir', w_init=w_init_method, trainable=False, keep_rate=dropout_rate) embeddings = net.outputs # mv_mean = tl.layers.get_variables_with_name('resnet_v1_50/bn0/moving_mean', False, True)[0] # 3.2 get arcface loss logit = arcface_loss(embedding=net.outputs, labels=labels, w_init=w_init_method, out_num=args.num_output) sess = tf.Session() saver = tf.train.Saver() feed_dict = {} feed_dict_flip = {} path = args.ckpt_file + args.ckpt_index_list[0] saver.restore(sess, path) print('ckpt file %s restored!' % args.ckpt_index_list[0]) feed_dict.update(tl.utils.dict_to_one(net.all_drop)) feed_dict_flip.update(tl.utils.dict_to_one(net.all_drop)) feed_dict[dropout_rate] = 1.0 feed_dict_flip[dropout_rate] = 1.0 batch_size = args.batch_size input_placeholder = images sess.run(tf_dataset_iterator.initializer) print('getting embeddings..') total_time = 0 batch_number = 0 embeddings_array = None embeddings_array_flip = None while True: try: images = sess.run(tf_dataset_next_element) data_tmp = images.copy() # fix issues #4 for i in range(data_tmp.shape[0]): data_tmp[i, ...] -= 127.5 data_tmp[i, ...] *= 0.0078125 data_tmp[i, ...] = cv2.cvtColor(data_tmp[i, ...], cv2.COLOR_RGB2BGR) # Getting flip to left_right batched images by TF dataset data_tmp_flip = images.copy() # fix issues #4 for i in range(data_tmp_flip.shape[0]): data_tmp_flip[i, ...] = np.fliplr(data_tmp_flip[i, ...]) data_tmp_flip[i, ...] -= 127.5 data_tmp_flip[i, ...] *= 0.0078125 data_tmp_flip[i, ...] = cv2.cvtColor(data_tmp_flip[i, ...], cv2.COLOR_RGB2BGR) start_time = time.time() feed_dict[input_placeholder] = data_tmp _embeddings = sess.run(embeddings, feed_dict) feed_dict_flip[input_placeholder] = data_tmp_flip _embeddings_flip = sess.run(embeddings, feed_dict_flip) if embeddings_array is None: embeddings_array = np.zeros( (len(image_list), _embeddings.shape[1])) embeddings_array_flip = np.zeros( (len(image_list), _embeddings_flip.shape[1])) try: embeddings_array[batch_number * batch_size:min((batch_number + 1) * batch_size, len(image_list)), ...] = _embeddings embeddings_array_flip[batch_number * batch_size:min( (batch_number + 1) * batch_size, len(image_list)), ...] = _embeddings_flip # print('try: ', batch_number * batch_size, min((batch_number + 1) * batch_size, len(image_list)), ...) except ValueError: print( 'batch_number*batch_size value is %d min((batch_number+1)*batch_size, len(image_list)) %d,' ' batch_size %d, data.shape[0] %d' % (batch_number * batch_size, min((batch_number + 1) * batch_size, len(image_list)), batch_size, images.shape[0])) print('except: ', batch_number * batch_size, min((batch_number + 1) * batch_size, images.shape[0]), ...) duration = time.time() - start_time batch_number += 1 total_time += duration except tf.errors.OutOfRangeError: print( 'tf.errors.OutOfRangeError, Reinitialize tf_dataset_iterator') sess.run(tf_dataset_iterator.initializer) break print(f"total_time: {total_time}") _xnorm = 0.0 _xnorm_cnt = 0 for embed in [embeddings_array, embeddings_array_flip]: for i in range(embed.shape[0]): _em = embed[i] _norm = np.linalg.norm(_em) # print(_em.shape, _norm) _xnorm += _norm _xnorm_cnt += 1 _xnorm /= _xnorm_cnt final_embeddings_output = embeddings_array + embeddings_array_flip final_embeddings_output = sklearn.preprocessing.normalize( final_embeddings_output) print(final_embeddings_output.shape) tpr, fpr, accuracy, val, val_std, far = verification.evaluate( final_embeddings_output, issame_list, nrof_folds=10) acc2, std2 = np.mean(accuracy), np.std(accuracy) auc = metrics.auc(fpr, tpr) print('XNorm: %f' % (_xnorm)) print('Accuracy-Flip: %1.5f+-%1.5f' % (acc2, std2)) print('TPR: ', np.mean(tpr), 'FPR: ', np.mean(fpr)) print('Area Under Curve (AUC): %1.3f' % auc) tpr_lfw, fpr_lfw, accuracy_lfw, val_lfw, val_std_lfw, far_lfw = lfw.evaluate( final_embeddings_output, issame_list, nrof_folds=10, distance_metric=0, subtract_mean=False) print('accuracy_lfw: %2.5f+-%2.5f' % (np.mean(accuracy_lfw), np.std(accuracy_lfw))) print( f"val_lfw: {val_lfw}, val_std_lfw: {val_std_lfw}, far_lfw: {far_lfw}") print('val_lfw rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val_lfw, val_std_lfw, far_lfw)) auc_lfw = metrics.auc(fpr_lfw, tpr_lfw) print('TPR_LFW:', np.mean(tpr_lfw), 'FPR_LFW: ', np.mean(fpr_lfw)) print('Area Under Curve LFW (AUC): %1.3f' % auc_lfw) sess.close() return acc2, std2, _xnorm, [embeddings_array, embeddings_array_flip]
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model facenet.load_model(args.model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) print('Processing step {} of {} ...'.format( i + 1, nrof_batches)) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: time_elapsed_log = open('validateLfw_timeElapsed_Log.txt', 'w') start_total_time = time.time() # Read the file containing the pairs used for testing print('Reading pairs from LFW') start_current_time = time.time() pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) end_current_time = time.time() time_elapsed_log.write('Read pairs from LFW took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') # Get the paths for the corresponding images print('Getting the paths for the corresponding images') start_current_time = time.time() paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) end_current_time = time.time() time_elapsed_log.write( 'Get the paths for the corresponding images took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') # Load the model print('Loading model') start_current_time = time.time() facenet.load_model(args.model) end_current_time = time.time() time_elapsed_log.write('Load model took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') # Get input and output tensors print('Getting input and output tensors') print(' images_placeholder') start_current_time = time.time() images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") end_current_time = time.time() time_elapsed_log.write('Get images placeholder took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') print(' embeddings') start_current_time = time.time() embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") end_current_time = time.time() time_elapsed_log.write('Get embeddings took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') print(' phase_train_placeholder') start_current_time = time.time() phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") end_current_time = time.time() time_elapsed_log.write('Get phase train placeholder took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') print('Getting embedding_size') #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs start_current_time = time.time() image_size = args.image_size embedding_size = embeddings.get_shape()[1] end_current_time = time.time() time_elapsed_log.write('Get embedding size took ' + str(end_current_time - start_current_time)[0:5] + ' seconds\n') # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') start_forward_pass_time = time.time() batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): print('Batch ' + str(i + 1) + '/' + str(nrof_batches)) start_batch_time = time.time() start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] print(' Loading data') start_batch_loadData_time = time.time() images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } end_batch_loadData_time = time.time() time_elapsed_log.write(' Load data from batch ' + str(i + 1) + '/' + str(nrof_batches) + ' took ' + str(end_batch_loadData_time - start_batch_loadData_time)[0:5] + ' seconds\n') print(' Running embeddings') start_batch_runEmbeddings_time = time.time() emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) end_batch_runEmbeddings_time = time.time() time_elapsed_log.write( ' Run embeddings from batch ' + str(i + 1) + '/' + str(nrof_batches) + ' took ' + str(end_batch_runEmbeddings_time - start_batch_runEmbeddings_time)[0:5] + ' seconds\n') end_batch_time = time.time() time_elapsed_log.write(' Batch ' + str(i + 1) + '/' + str(nrof_batches) + ' took ' + str(end_batch_time - start_batch_time)[0:5] + ' seconds\n') end_forward_pass_time = time.time() time_elapsed_log.write('Run forward pass on LFW images took ' + str(end_forward_pass_time - start_forward_pass_time)[0:5] + ' seconds\n') start_evaluation_time = time.time() tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) end_evaluation_time = time.time() time_elapsed_log.write('Evaluation took ' + str(end_evaluation_time - start_evaluation_time)[0:5] + ' seconds\n') print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer) end_total_time = time.time() time_elapsed_log.write('Total time was ' + str(end_total_time - start_total_time)[0:5] + ' seconds\n') time_elapsed_log.close()
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # print("pairs.txt is: {}".format(pairs)) # Get the paths for the corresponding images # print("os.path.expanduser(args.lfw_dir) is: {}".format(os.path.expanduser(args.lfw_dir))) # print("pairs[0:10,] is: {}".format(pairs[0:10,])) # print("args.lfw_file_ext is: {}".format(args.lfw_file_ext)) paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # each element of paths is a 2-tuple of the image paths of the 2 images in the pair # each element of actual_issame is whether the two images in the pair are actually the same, # it can be seen as the ground truth # print("paths is: {}".format(paths)) # print("len(actual_issame) is: {}".format(len(actual_issame))) # print("actual_issame is: {}".format(actual_issame)) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) # print([op.name for op in tf.get_default_graph().get_operations()]) # print('Metagraph file: %s' % meta_file) # print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors # print("Getting the input and output tensors for facenet") images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") print([op.name for op in tf.get_default_graph().get_operations()]) image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # print("image_size is: {}".format(image_size)) # print("embedding_size is: {}".format(embedding_size)) # Run forward pass to calculate embeddings # print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) # print("batch_size is: {}".format(batch_size)) # print("nrof_images is: {}".format(nrof_images)) # print("nrof_batches is: {}".format(nrof_batches)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): # print("Batch: {0}/{1}".format(i+1, len(nrof_batches))) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] # Load the images images = facenet.load_data(paths_batch, False, False, image_size) # Feed it into the network feed_dict = { images_placeholder: images, phase_train_placeholder: False } # Update the emb_array emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) # print("emb_array is: {}".format(emb_array)) # tpr is the true positive rate (how many samples that were classified as true were actually true) # fpr is the false positive rate (how many samples that were classified as true were actually false) # accuracy is (tp + tn)/number of samples # val is the percentage of samples that were classified to be correct # val_std is the std of val # far is the percentage of samples that were classified to be wrong tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) # print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) # print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) # print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model facenet.load_model(args.model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) distance, cos = getDistances(emb_array) #dataname = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S') data_frame = pd.DataFrame( data={ 'person1': paths[0::2], 'person2': paths[1::2], 'true': actual_issame, 'distance': distance, 'cos': cos }) data_frame.to_csv(args.lfw_dir + '\\results_data_mini.csv') tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) plotVerifyExp(os.path.split(os.path.realpath(__file__))[0], 'roc') print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer) plotExp(distance, cos, actual_issame)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Get the paths for the corresponding images paths, actual_issame, ids = lfw.get_paths(args.lfw_dir, args.labels_file) logging.info('len of paths:%d' % len(paths)) logging.info('paths[0]:%s' % paths[0]) # Load the model logging.info('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) logging.info('Metagraph file: %s' % meta_file) logging.info('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings logging.info('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) logging.info( 'batch size:%d, nrof_images len:%d, nrof_batches len:%d' % (batch_size, nrof_images, nrof_batches)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) logging.info('forward process complete.') logging.info('emb_array len:%d' % len(emb_array)) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) logging.info('num of accuracy:%d' % len(accuracy)) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len( actual_issame ) * 2 # nrof_pairs * nrof_images_per_pair, 图片总数, 一个pair里有两张图片 nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0, nrof_images), 1) # 如果进行了翻转,翻转后图片的路径和这张图片本身是一样的,image_paths_array: [image1, image1, image2, image2, ...] image_paths_array = np.expand_dims( np.repeat(np.array(image_paths), nrof_flips), 1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like( labels_array) * facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2) * facenet.FLIP sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array }) embedding_size = int(embeddings.get_shape()[1]) # 128 assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) for i in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') # 如果使用了图片翻转, embedding是将原图和翻转图的两个向量直接拼起来 embeddings = np.zeros((nrof_embeddings, embedding_size * nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:, :embedding_size] = emb_array[0::2, :] embeddings[:, embedding_size:] = emb_array[1::2, :] else: embeddings = emb_array assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' # 直接用训练好的模型提取特征, 然后调用lfw的evaluate方法 # distance_metric 0: Euclidian, 1:Cosine similarity distance. # 现在的embeddings是256维的向量(如果用了filp),每个pair里有两张图片,所以embedings第一维应该是偶数 _, _, accuracy, val, val_std, far = lfw.evaluate( embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() # pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val)) stat['lfw_accuracy'][epoch - 1] = np.mean(accuracy) stat['lfw_valrate'][epoch - 1] = val
def main(args): with tf.Graph().as_default(): gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=args.gpu_memory_fraction) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, allow_soft_placement=True)) as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) print('%s pairs' % len(paths)) # Load the model facenet.load_model(args.model_dir) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") #phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") if '.pb' in args.model_dir: image_size = args.image_size embedding_size = args.embedding_size else: image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) print("%s batches" % nrof_batches) emb_array = np.zeros((nrof_images, embedding_size)) emb_array_flip = np.zeros((nrof_images, embedding_size)) t_total = 0 for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) #feed_dict = { images_placeholder:images, phase_train_placeholder:False } feed_dict = {images_placeholder: images} images_flip = facenet.load_data(paths_batch, False, True, image_size) feed_dict_flip = {images_placeholder: images_flip} t_start = time.time() emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) if args.mirrorface: emb_array_flip[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict_flip) t = 1000 * (time.time() - t_start) / batch_size #print(t) if i >= 0: t_total += t print("---inference speed = %s milliseconds---" % (t_total / (nrof_batches - 1))) if args.mirrorface: emb_sum = np.add(emb_array, emb_array_flip) emb_norm = np.linalg.norm(emb_sum, ord=2, axis=1) for i in list(xrange(len(emb_norm))): emb_array[i] = np.divide(emb_sum[i], emb_norm[i]) tpr, fpr, accuracy, val, val_std, far, f1 = lfw.evaluate( emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds, distance_metric=args.distance_metric) ''' for i in xrange(len(tpr)): print('%1.5f %1.5f'%(1-tpr[i], fpr[i])) ''' print('Accuracy: %1.4f+-%1.4f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) print('F1: %2.5f' % f1) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def evaluate( sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization, csv_file_path, pruned_ratio, epoch_no, MB_of_model_through_inference, MB_of_shared_variables, MB_of_task_specific_variables, MB_of_whole_masks, MB_of_task_specific_masks, MB_of_task_specific_batch_norm_variables, MB_of_task_specific_biases): # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len( actual_issame) * 2 # nrof_pairs * nrof_images_per_pair nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0, nrof_images), 1) image_paths_array = np.expand_dims( np.repeat(np.array(image_paths), nrof_flips), 1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like( labels_array) * facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2) * facenet.FLIP sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array }) embedding_size = int(embeddings.get_shape()[1]) assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) # sess = tf_debug.LocalCLIDebugWrapperSession(sess) for i in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((nrof_embeddings, embedding_size * nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:, :embedding_size] = emb_array[0::2, :] embeddings[:, embedding_size:] = emb_array[1::2, :] else: embeddings = emb_array assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' start_eval_time = time.time() # tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) tpr, fpr, accuracy = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) evaluate_time = time.time() - start_eval_time acc_mean = np.mean(accuracy) acc_std = np.std(accuracy) print('Accuracy: %2.5f+-%2.5f' % (acc_mean, acc_std)) # print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer) print('Eval time: {}'.format(evaluate_time)) if csv_file_path: with open(csv_file_path, 'a') as f: writer = csv.writer(f) writer.writerow([ '{:.5f}'.format(pruned_ratio), '{:.5f}'.format(acc_mean), '{:.5f}'.format(acc_std), epoch_no, '{:.3f}'.format(MB_of_model_through_inference), '{:.3f}'.format(MB_of_shared_variables), '{:.3f}'.format(MB_of_task_specific_variables), '{:.3f}'.format(MB_of_whole_masks), '{:.3f}'.format(MB_of_task_specific_masks), '{:.3f}'.format(MB_of_task_specific_batch_norm_variables), '{:.3f}'.format(MB_of_task_specific_biases) ])
def evaluate(sess, image_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder, batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, embedding_size): start_time = time.time() # Run forward pass to calculate embeddings print('Running forward pass on LFW images: ', end='') nrof_images = len(actual_issame) * 2 assert (len(image_paths) == nrof_images) labels_array = np.reshape(np.arange(nrof_images), (-1, 3)) image_paths_array = np.reshape(np.expand_dims(np.array(image_paths), 1), (-1, 3)) sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array }) emb_array = np.zeros((nrof_images, embedding_size)) nrof_batches = int(np.ceil(nrof_images / batch_size)) # ------------- print start ----------------- # print('nrof_images^^^^^', nrof_images) # print('batch_size^^^^^^', batch_size) # print('nrof_batches^^^^', nrof_batches) # ------------- print end ------------------- label_check_array = np.zeros((nrof_images, )) for i in xrange(nrof_batches): batch_size = min(nrof_images - i * batch_size, batch_size) emb, lab = sess.run( [embeddings, labels_batch], feed_dict={ batch_size_placeholder: batch_size, learning_rate_placeholder: 0.0, phase_train_placeholder: False }) emb_array[lab, :] = emb label_check_array[lab] = 1 print('用时:%.3f' % (time.time() - start_time)) # --------- print start --------------------- # print('label_check_array^^^^', label_check_array) # print('if label-check_array == 1', np.all(label_check_array==1)) # --------- print end ----------------------- assert (np.all(label_check_array == 1)) _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization): start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len( actual_issame) * 2 # nrof_pairs * nrof_images_per_pair nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0, nrof_images), 1) image_paths_array = np.expand_dims( np.repeat(np.array(image_paths), nrof_flips), 1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like( labels_array) * facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2) * facenet.FLIP sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array }) embedding_size = int(embeddings.get_shape()[1]) assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) for i in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print('.', end='') sys.stdout.flush() print('') embeddings = np.zeros((nrof_embeddings, embedding_size * nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:, :embedding_size] = emb_array[0::2, :] embeddings[:, embedding_size:] = emb_array[1::2, :] else: embeddings = emb_array assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' _, _, accuracy, val, val_std, far = lfw.evaluate( embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() #pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val)) stat['lfw_accuracy'][epoch - 1] = np.mean(accuracy) stat['lfw_valrate'][epoch - 1] = val metrics = { 'metrics': [{ 'name': 'accuracy-score', 'numberValue': np.mean(accuracy), 'format': "PERCENTAGE", }] } with open('/mlpipeline-metrics.json', 'w') as f: json.dump(metrics, f)
def evaluate(sess, image_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder, data_augmentations_placeholder, batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, batch_size, nrof_folds, log_dir, prefix, epoch, summary_writer, embedding_size): start_time = time.time() # Run forward pass to calculate embeddings logger.info('Running forward pass on LFW images: ') nrof_images = len(actual_issame) * 2 assert (len(image_paths) == nrof_images) labels_array = np.reshape(np.arange(nrof_images), (-1, 3)) image_paths_array = np.reshape(np.expand_dims(np.array(image_paths), 1), (-1, 3)) data_augmentations_array = np.zeros_like(labels_array) sess.run(enqueue_op, feed_dict={ image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, data_augmentations_placeholder: data_augmentations_array }) emb_array = np.zeros((nrof_images, embedding_size)) nrof_batches = int(np.ceil(nrof_images / batch_size)) label_check_array = np.zeros((nrof_images, )) for i in xrange(nrof_batches): batch_size = min(nrof_images - i * batch_size, batch_size) emb, lab = sess.run( [embeddings, labels_batch], feed_dict={ batch_size_placeholder: batch_size, learning_rate_placeholder: 0.0, phase_train_placeholder: False }) emb_array[lab, :] = emb label_check_array[lab] = 1 logger.debug('%.3f' % (time.time() - start_time)) assert (np.all(label_check_array == 1)) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds) logger.debug('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) logger.debug('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) logger.debug('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) logger.debug('Equal Error Rate (EER): %1.3f' % eer) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() # pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy)) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, epoch) with open(os.path.join(log_dir, prefix + '_result.txt'), 'at') as f: f.write('%d\t%.5f\t%.5f\n' % (epoch, np.mean(accuracy), val))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, loss, regularization_losses): print(batch_size) start_time = time.time() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels labels_array = np.expand_dims(np.arange(0, len(image_paths)), 1) image_paths_array = np.expand_dims(np.array(image_paths), 1) sess.run( enqueue_op, { image_paths_placeholder: image_paths_array, labels_placeholder: labels_array }) embedding_size = embeddings.get_shape()[1] nrof_images = len(actual_issame) * 2 assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images, )) for _ in range(nrof_batches): feed_dict = { phase_train_placeholder: False, batch_size_placeholder: batch_size } emb, lab, reg_loss = sess.run( [embeddings, labels, regularization_losses], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal( lab_array, np.arange(nrof_images) ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds) # print(labels) # print(lab) # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( # labels=lab, logits=err, name='cross_entropy_per_example') # cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') # err = sess.run(cross_entropy_mean) regloss = np.sum(reg_loss) accuracy = np.mean(accuracy) print('Evaluate: RegLoss %2.3f' % regloss) print('Accuracy: %1.3f+-%1.3f' % (accuracy, np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) lfw_time = time.time() - start_time # Add validation loss and accuracy to summary summary = tf.Summary() # pylint: disable=maybe-no-member summary.value.add(tag='lfw/accuracy', simple_value=accuracy) summary.value.add(tag='lfw/val_rate', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) # summary.value.add(tag='accuracy', simple_value=accuracy) summary.value.add(tag='lfw/center_loss', simple_value=regloss) summary_writer.add_summary(summary, step) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f: f.write('%d\t%.5f\t%.5f\n' % (step, accuracy, val)) return accuracy
def main(args): with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) as sess: # Read the file containing the pairs used for testing if args.lfw_pairs == None: paths, actual_issame = crossFire(getImages(os.path.expanduser(args.lfw_dir))) print("Crossfire check:", len(actual_issame), "pairs to be checked.") else: pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs) print("Pair file check:", len(actual_issame), "pairs to be checked.") # Load the model facenet.load_model(args.model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): print('Compute batch:', i, '/', nrof_batches, end='\r') start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data2(paths_batch, False, False, image_size) feed_dict = { images_placeholder:images, phase_train_placeholder:False } emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) print() print('--- Evaluate ---') tpr, fpr, accuracy, bestThreshold, minThreshold, maxThreshold, val, val_std, far, thresholdAtVal = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print() print('--- Verify using Euclidian distance ---') print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) #print('Accuracy array:', accuracy) print('Best threshold: %1.3f' % bestThreshold) print('Minimal threshold of hit rate @ 100%%: %1.3f' % minThreshold) print('Maximum threshold of false alarm rate @ 0%%: %1.3f' % maxThreshold) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) print('Threshold @ %1.3f' % thresholdAtVal) auc = metrics.auc(fpr, tpr) # plt.clf() # plt.plot(fpr, tpr, '.') # plt.savefig('euclidian_distance_auc.jpg') print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer) tpr, fpr, accuracy, bestThreshold, minThreshold, maxThreshold, val, val_std, far, thresholdAtVal = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds, distance_metric=1) print() print("--- Verify using Cosine Similarity ---") print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) #print('Accuracy array:', accuracy) print('Best threshold: %1.3f' % bestThreshold) print('Minimal threshold of hit rate @ 100%%: %1.3f' % minThreshold) print('Maximum threshold of false alarm rate @ 0%%: %1.3f' % maxThreshold) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) print('Threshold @ %1.3f' % thresholdAtVal) auc = metrics.auc(fpr, tpr) # plt.clf() # plt.plot(fpr, tpr, '.') # plt.savefig('cosine_similarity_auc.jpg') print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)