def evaluate(sess, enque_op, face_pl, nose_pl, lefteye_pl, rightmouth_pl, labels_pl, phase_train_pl, batch_size_pl, embeddings, labels, image_list, actual_issame, batch_size, num_folds, log_dir, step, summary_writer): print("evaluating on lfw...") start_time = time.time() face_array = np.array(image_list) nose_array = np.array([ss.replace('face', 'nose') for ss in face_array]) lefteye_array = np.array( [ss.replace('face', 'lefteye') for ss in face_array]) rightmouth_array = np.array( [ss.replace('face', 'rightmouth') for ss in face_array]) labels_array = np.expand_dims(np.arange(0, len(image_list)), 1) face_array = np.expand_dims(face_array, 1) nose_array = np.expand_dims(nose_array, 1) lefteye_array = np.expand_dims(lefteye_array, 1) rightmouth_array = np.expand_dims(rightmouth_array, 1) sess.run( enque_op, { face_pl: face_array, nose_pl: nose_array, lefteye_pl: lefteye_array, rightmouth_pl: rightmouth_array, labels_pl: labels_array }) embeddings_dim = embeddings.get_shape()[1] num_images = len(actual_issame) * 2 assert num_images % batch_size == 0, 'Num of sample must be even.' num_batches = num_images // batch_size emb_array = np.zeros((num_images, embeddings_dim)) lab_array = np.zeros((num_images, )) for i in range(num_batches): feed_dict = {phase_train_pl: False, batch_size_pl: batch_size} emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal( lab_array, np.arange(num_images)) == True, 'Wrong labels used for evaluation' _, _, acc, val, val_std, far = test_utils.evaluate(emb_array, actual_issame, num_folds=num_folds) print('acc: %1.3f+-%1.3f' % (np.mean(acc), np.std(acc))) print('vr : %2.5f+=%2.5f @ FAR=%2.5F' % (val, val_std, far)) lfw_time = time.time() - start_time # Summary summary = tf.Summary() summary.value.add(tag='lfw/acc', simple_value=np.mean(acc)) summary.value.add(tag='lfw/vr', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as fp: fp.write('%d\t%.5f\t%.5f\n' % (step, np.mean(acc), val))
def main(args): if not os.path.exists(args.pretrained_model): print('invalid pretrained model path') return weights = np.load(args.pretrained_model) pairs = test_utils.read_pairs('/exports_data/czj/data/lfw/files/pairs.txt') imglist, labels = test_utils.get_paths( '/exports_data/czj/data/lfw/lfw_aligned/', pairs, '_face_.jpg') total_images = len(imglist) # ---- build graph ---- # input = tf.placeholder(tf.float32, shape=[None, 160, 160, 3], name='image_batch') prelogits, _ = inception_resnet_v1.inference(input, 1, phase_train=False) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10) # ---- extract ---- # gpu_options = tf.GPUOptions(allow_growth=True) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False, allow_soft_placement=True)) with sess.as_default(): beg_time = time.time() to_assign = [ v.assign(weights[()][v.name][0]) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) ] sess.run(to_assign) print('restore parameters: %.2fsec' % (time.time() - beg_time)) beg_time = time.time() images = load_data(imglist) print('load images: %.2fsec' % (time.time() - beg_time)) beg_time = time.time() batch_size = 32 beg = 0 end = 0 features = np.zeros((total_images, 128)) while end < total_images: end = min(beg + batch_size, total_images) features[beg:end] = sess.run(embeddings, {input: images[beg:end]}) beg = end print('extract features: %.2fsec' % (time.time() - beg_time)) tpr, fpr, acc, vr, vr_std, far = test_utils.evaluate(features, labels, num_folds=10) # display auc = metrics.auc(fpr, tpr) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Acc: %1.3f+-%1.3f' % (np.mean(acc), np.std(acc))) print('VR@FAR=%2.5f: %2.5f+-%2.5f' % (far, vr, vr_std)) print('AUC: %1.3f' % auc) print('EER: %1.3f' % eer) sess.close()
def main(args): pairs = test_utils.read_pairs(args.lfw_pairs) model_list = test_utils.get_model_list(args.model_list) for t, model in enumerate(model_list): # get lfw pair filename paths, labels = test_utils.get_paths(args.lfw_dir, pairs, model[1]) with tf.device('/gpu:%d' % (t + 1)): gpu_options = tf.GPUOptions(allow_growth=True) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False, allow_soft_placement=True)) with sess.as_default(): print("[%d] model: %s" % (t, model[1])) # restore model test_utils.load_model(sess, model[0]) # load data tensor images_pl = tf.get_default_graph().get_tensor_by_name('image_batch:0') embeddings = tf.get_default_graph().get_tensor_by_name('embeddings:0') phase_train_pl = tf.get_default_graph().get_tensor_by_name('phase_train:0') image_size = args.image_size emb_size = embeddings.get_shape()[1] # extract feature batch_size = args.lfw_batch_size num_images = len(paths) num_batches = num_images // batch_size emb_arr = np.zeros((num_images, emb_size)) for i in range(num_batches): print('process %d/%d' % (i + 1, num_batches), end='\r') beg_idx = i * batch_size end_idx = min((i + 1) * batch_size, num_images) images = test_utils.load_data(paths[beg_idx:end_idx], image_size) emb = sess.run(embeddings, feed_dict={images_pl: images, phase_train_pl: False}) emb_arr[beg_idx:end_idx, :] = emb # get lfw pair filename print("\ndone.") # concate feaure if t == 0: emb_ensemble = emb_arr * math.sqrt(float(model[2])) else: emb_ensemble = np.concatenate((emb_ensemble, emb_arr * math.sqrt(float(model[2]))), axis=1) print("ensemble feature:", emb_ensemble.shape) ''' norm = np.linalg.norm(emb_ensemble, axis=1) for i in range(emb_ensemble.shape[0]): emb_ensemble[i] = emb_ensemble[i] / norm[i] ''' tpr, fpr, acc, vr, vr_std, far = test_utils.evaluate(emb_ensemble, labels, num_folds=args.num_folds) # display auc = metrics.auc(fpr, tpr) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Acc: %1.3f+-%1.3f' % (np.mean(acc), np.std(acc))) print('VR@FAR=%2.5f: %2.5f+-%2.5f' % (far, vr, vr_std)) print('AUC: %1.3f' % auc) print('EER: %1.3f' % eer)
def evaluate(sess, enque_op, imgpaths_pl, labels_pl, phase_train_pl, batch_size_pl, embeddings, label_batch, image_list, lfw_label, batch_size, num_folds, log_dir, step, summary_writer): print("evaluating on lfw...") start_time = time.time() # Enqueue one epoch of image paths and labels labels_array = np.expand_dims(np.arange(0, len(image_list)), 1) paths_array = np.expand_dims(np.array(image_list), 1) sess.run(enque_op, {imgpaths_pl: paths_array, labels_pl: labels_array}) embeddings_dim = embeddings.get_shape()[1] num_images = len(lfw_label) * 2 assert num_images % batch_size == 0, 'Num of sample must be even.' num_batches = num_images // batch_size emb_array = np.zeros((num_images, embeddings_dim)) lab_array = np.zeros((num_images, )) for _ in range(num_batches): feed_dict = {phase_train_pl: False, batch_size_pl: batch_size} emb, lab = sess.run([embeddings, label_batch], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal( lab_array, np.arange(num_images)), 'Wrong labels used for evaluation' _, _, acc, val, val_std, far = test_utils.evaluate(emb_array, lfw_label, num_folds=num_folds) print('acc: %1.3f+-%1.3f' % (np.mean(acc), np.std(acc))) print('vr : %2.5f+=%2.5f @ FAR=%2.5F' % (val, val_std, far)) lfw_time = time.time() - start_time # Summary summary = tf.Summary() summary.value.add(tag='lfw/acc', simple_value=np.mean(acc)) summary.value.add(tag='lfw/vr', simple_value=val) summary.value.add(tag='time/lfw', simple_value=lfw_time) summary_writer.add_summary(summary, step) with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as fp: fp.write('%d\t%.5f\t%.5f\n' % (step, np.mean(acc), val))
bidirectional=experiment_arguments.bidirectional) # Move model to device and load weights model.to(device) model.load_state_dict(torch.load(model_path)) # Convert data to torch tensors training_eval = [tensors_from_pair(pair, equivariant_commands, equivariant_actions) for pair in train_pairs] testing_pairs = [tensors_from_pair(pair, equivariant_commands, equivariant_actions) for pair in test_pairs] # Compute accuracy and print some translation if args.compute_train_accuracy: train_acc = test_accuracy(model, training_eval) print("Model train accuracy: %s" % train_acc.item()) if args.compute_test_accuracy: test_acc = test_accuracy(model, testing_pairs) print("Model test accuracy: %s" % test_acc.item()) if args.print_param_nums: print("Model contains %s params" % model.num_params) for i in range(args.print_translations): pair = random.choice(test_pairs) print('>', pair[0]) print('=', pair[1]) output_words = evaluate(model, equivariant_commands, equivariant_actions, pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('')
if experiment_arguments.split in ["add_book", "add_house"]: new_prim_training_pairs = [ tensors_from_pair(new_prim_pair, equivariant_eng, equivariant_fra) ] num_new_prim_pairs = int(args.p_new_prim * len(training_eval)) new_prim_training_pairs = new_prim_training_pairs * num_new_prim_pairs model = train_new_prim(model, new_prim_training_pairs, experiment_arguments) # Compute accuracy and print some translation if args.compute_train_accuracy: train_acc, train_bleu = test_accuracy(model, training_eval, True) print("Model train accuracy: %s" % train_acc) print("Model train bleu score: %s" % train_bleu) if args.compute_test_accuracy: test_acc, test_bleu = test_accuracy(model, testing_pairs, True) print("Model test accuracy: %s" % test_acc) print("Model test bleu score: %s" % test_bleu) if args.print_param_nums: print("Model contains %s params" % model.num_params) for i in range(args.print_translations): pair = random.choice(test_pairs) print('>', pair[0]) print('=', pair[1]) output_words = evaluate(model, equivariant_eng, equivariant_fra, pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('')
if experiment_arguments.split in ["add_book", "add_house"]: num_new_prim_pairs = int(args.p_new_prim * len(training_eval)) new_prim_training_pairs = [ tensors_from_pair(new_prim_pair, eng_lang, fra_lang) ] * num_new_prim_pairs model = train_new_prim(model, new_prim_training_pairs, experiment_arguments) # Compute accuracy and print some translations if args.compute_train_accuracy: train_acc, train_bleu = test_accuracy(model, training_eval, True) print("Model train accuracy: %s" % train_acc) print("Model train BLEU: %s" % train_bleu) if args.compute_test_accuracy: test_acc, test_bleu = test_accuracy(model, testing_pairs, True) print("Model test accuracy: %s" % test_acc) print("Model test BLEU: %s" % test_bleu) if args.print_param_nums: print("Model contains %s params" % model.num_params) for i in range(args.print_translations): pair = random.choice(test_pairs) print('>', pair[0]) print('=', pair[1]) output_words, bleu_score = evaluate(model, eng_lang, fra_lang, pair[0], True, pair[1]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('BLEU Score: ', bleu_score) print('')