def evaluate(sess, phase_train_placeholder, batch_size_placeholder,
        embeddings, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer,
             evaluate_mode, keep_probability_placeholder, best_acc, args, dataset):
    start_time = time.time()
    # Run forward pass to calculate embeddings
    print('Evaluating face verification %s...'%dataset)
    nrof_images = len(actual_issame) * 2
    nrof_batches = int(nrof_images / batch_size) ##floor division
    nrof_enque = batch_size*nrof_batches

    actual_issame = actual_issame[0:int(nrof_enque/2)]##left the elements in the final batch if it is not enough

    
    embedding_size = embeddings.get_shape()[1]

    emb_array = np.zeros((nrof_enque, embedding_size))
    lab_array = np.zeros((nrof_enque,))

    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    for ii in range(nrof_batches):
        print('batch %d, batch size %d' % (ii, batch_size))
        start_index = ii* batch_size
        end_index = min((ii + 1) * batch_size, nrof_images)
        paths_batch = image_paths[start_index:end_index]
        images = facenet_ext.load_data(paths_batch, False, False, args.image_size)

        feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size, keep_probability_placeholder: 1.0, images_placeholder: images}
        emb = sess.run(embeddings, feed_dict=feed_dict)
        emb_array[start_index:end_index, :] = emb
        
    # assert np.array_equal(lab_array, np.arange(nrof_enque))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
    if evaluate_mode == 'Euclidian':
        _, _, accuracy, val, val_std, far, fp_idx, fn_idx,best_threshold, val_threshold = lfw_ext.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds)
    if evaluate_mode == 'similarity':
        pca = PCA(n_components=128)
        pca.fit(emb_array)
        emb_array_pca = pca.transform(emb_array)
        _, _, accuracy, val, val_std, far, fp_idx, fn_idx,best_threshold, val_threshold = lfw_ext.evaluate_cosine(emb_array_pca, actual_issame, nrof_folds=nrof_folds)
    for i in range(len(accuracy)):
        print('Accuracy: %1.3f@ best_threshold %1.3f' % (accuracy[i], best_threshold[i]))
    print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
    lfw_time = time.time() - start_time
    # Add validation loss and accuracy to summary
    summary = tf.Summary()
    #pylint: disable=maybe-no-member
    summary.value.add(tag='LFW/accuracy', simple_value=np.mean(accuracy))
    summary.value.add(tag='LFW/val_rate', simple_value=val)
    summary.value.add(tag='LFW/far_rate', simple_value=far)
    summary.value.add(tag='time/LFW', simple_value=lfw_time)
    summary_writer.add_summary(summary, step)
    with open(os.path.join(log_dir,'LFW_result.txt'),'at') as f:
        f.write('%d\t%.5f\t%.5f\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val, far, best_acc))

    acc = np.mean(accuracy)
    return acc, val, far
Beispiel #2
0
def rank_n(probe, gallery, data_dir, lfw_batch_size, sess):
    #########################   webcari    ##########################
    image_list_probe, label_list_id_probe \
        = facenet_ext.get_image_paths_and_labels_webcari_rank(probe, data_dir)

    image_list_gallery, label_list_id_gallery \
        = facenet_ext.get_image_paths_and_labels_webcari_rank(gallery, data_dir)

    ## mapping the string id label to the number id label
    see_id = list(set(label_list_id_probe + label_list_id_gallery))
    see_id.sort()
    See_id = []
    for id in see_id:
        See_id.append(id.upper())

    label_tmp = []
    for label in label_list_id_probe:
        label_tmp.append(See_id.index(label.upper()))
    label_list_id_probe = label_tmp

    label_tmp = []
    for label in label_list_id_gallery:
        label_tmp.append(See_id.index(label.upper()))
    label_list_id_gallery = label_tmp

    paths = image_list_probe

    # Get input and output tensors
    # images_placeholder = tf.get_default_graph().get_tensor_by_name("image_batch:0")
    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
    embeddings_visual = tf.get_default_graph().get_tensor_by_name(
        "embeddings_expression:0")
    embeddings_cari = tf.get_default_graph().get_tensor_by_name(
        "embeddings_cari:0")
    keep_probability_placeholder = tf.get_default_graph().get_tensor_by_name(
        'keep_probability:0')
    # weight_decay_placeholder = tf.get_default_graph().get_tensor_by_name('weight_decay:0')
    phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
        'phase_train:0')
    phase_train_placeholder_visual = tf.get_default_graph().get_tensor_by_name(
        'phase_train_expression:0')
    phase_train_placeholder_cari = tf.get_default_graph().get_tensor_by_name(
        'phase_train_cari:0')

    logits_visual = tf.get_default_graph().get_tensor_by_name('logits_expr:0')
    logits_cari = tf.get_default_graph().get_tensor_by_name('logits_cari:0')
    image_size = images_placeholder.get_shape()[1]
    embedding_size = embeddings.get_shape()[1]
    embeddings_visual_size = embeddings_visual.get_shape()[1]
    embeddings_cari_size = embeddings_cari.get_shape()[1]

    # Run forward pass to calculate embeddings
    print('Runnning forward pass on probe images')
    batch_size = lfw_batch_size
    nrof_images = len(paths)
    nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
    emb_array = np.zeros((nrof_images, embedding_size))
    emb_visual_array = np.zeros((nrof_images, embeddings_visual_size))
    emb_cari_array = np.zeros((nrof_images, embeddings_cari_size))
    # emb_array = np.zeros((2*batch_size, embedding_size))
    logits_visual_size = logits_visual.get_shape()[1]
    logits_visual_array = np.zeros((nrof_images, logits_visual_size),
                                   dtype=float)
    logits_cari_size = logits_cari.get_shape()[1]
    logits_cari_array = np.zeros((nrof_images, logits_cari_size), dtype=float)
    for i in range(nrof_batches):
        print("Test batch:%d/%d\n" % (i, nrof_batches))
        start_index = i * batch_size
        end_index = min((i + 1) * batch_size, nrof_images)
        paths_batch = paths[start_index:end_index]
        images = facenet_ext.load_data(paths_batch, False, False, image_size)
        feed_dict = {
            phase_train_placeholder: False,
            phase_train_placeholder_visual: False,
            phase_train_placeholder_cari: False,
            images_placeholder: images,
            keep_probability_placeholder: 1.0
        }
        # feed_dict = {phase_train_placeholder: False, images_placeholder: images}
        emb_, emb_visual_, emb_cari_, logits_visual_, logits_cari_ = sess.run(
            [
                embeddings, embeddings_visual, embeddings_cari, logits_visual,
                logits_cari
            ],
            feed_dict=feed_dict)
        emb_array[start_index:end_index, :] = emb_
        emb_visual_array[start_index:end_index, :] = emb_visual_
        emb_cari_array[start_index:end_index, :] = emb_cari_
        logits_visual_array[start_index:end_index, :] = logits_visual_
        logits_cari_array[start_index:end_index, :] = logits_cari_

    emb_array_probe = emb_array

    paths = image_list_gallery
    # Run forward pass to calculate embeddings
    print('Runnning forward pass on gallery images')
    batch_size = lfw_batch_size
    nrof_images = len(paths)
    nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
    emb_array = np.zeros((nrof_images, embedding_size))
    emb_visual_array = np.zeros((nrof_images, embeddings_visual_size))
    emb_cari_array = np.zeros((nrof_images, embeddings_cari_size))
    # emb_array = np.zeros((2*batch_size, embedding_size))
    logits_visual_size = logits_visual.get_shape()[1]
    logits_visual_array = np.zeros((nrof_images, logits_visual_size),
                                   dtype=float)
    logits_cari_size = logits_cari.get_shape()[1]
    logits_cari_array = np.zeros((nrof_images, logits_cari_size), dtype=float)
    for i in range(nrof_batches):
        print("Test batch:%d/%d\n" % (i, nrof_batches))
        start_index = i * batch_size
        end_index = min((i + 1) * batch_size, nrof_images)
        paths_batch = paths[start_index:end_index]
        images = facenet_ext.load_data(paths_batch, False, False, image_size)
        feed_dict = {
            phase_train_placeholder: False,
            phase_train_placeholder_visual: False,
            phase_train_placeholder_cari: False,
            images_placeholder: images,
            keep_probability_placeholder: 1.0
        }
        # feed_dict = {phase_train_placeholder: False, images_placeholder: images}
        emb_, emb_visual_, emb_cari_, logits_visual_, logits_cari_ = sess.run(
            [
                embeddings, embeddings_visual, embeddings_cari, logits_visual,
                logits_cari
            ],
            feed_dict=feed_dict)
        emb_array[start_index:end_index, :] = emb_
        emb_visual_array[start_index:end_index, :] = emb_visual_
        emb_cari_array[start_index:end_index, :] = emb_cari_
        logits_visual_array[start_index:end_index, :] = logits_visual_
        logits_cari_array[start_index:end_index, :] = logits_cari_

    emb_array_gallery = emb_array

    dist_emb_prob_array = np.zeros(
        (emb_array_probe.shape[0], emb_array_gallery.shape[0]))
    for i in range(emb_array_probe.shape[0]):
        emb_prob = emb_array_probe[i]
        for j in range(emb_array_gallery.shape[0]):
            emb_gallery = emb_array_gallery[j]
            diff = np.subtract(emb_prob, emb_gallery)
            dist = np.sum(np.square(diff))
            dist_emb_prob_array[i][j] = dist

    rank1_idx = np.argmin(dist_emb_prob_array, 1)
    label_list_id_probe_predict = [label_list_id_gallery[i] for i in rank1_idx]
    correct_prediction = np.equal(label_list_id_probe_predict,
                                  label_list_id_probe)
    rank1_acc = np.mean(correct_prediction)
    print('Rank 1 acc: %2.3f' % rank1_acc)

    n = 10
    label_list_id_probe_predict = []
    rank_n_idx = np.argpartition(dist_emb_prob_array, n, 1)
    for i in range(rank_n_idx.shape[0]):
        label_list_id_probe_predict += [
            label_list_id_gallery[i] for i in rank_n_idx[i][:n]
        ]
    correct_prediction = np.equal(
        label_list_id_probe_predict,
        list(np.array(label_list_id_probe).repeat(n)))
    corr_n = 0
    for i in range(len(label_list_id_probe)):
        corr_ = correct_prediction[i::n]
        if sum(corr_) > 0:
            corr_n += 1
    rank2_acc = corr_n / len(label_list_id_probe)
    print('Rank 2 acc: %2.3f' % rank2_acc)

    return rank1_acc, rank2_acc
def main(args):

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    print('log_dir: %s\n' % log_dir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)

    with tf.Graph().as_default():

        with tf.Session() as sess:

            ### all samples CAVI
            image_list_all, label_list_id_all, label_list_cari_all, nrof_classes_all = facenet_ext.get_image_paths_and_labels_cavi(
                args.data_dir)

            paths = image_list_all
            labels_actual = label_list_id_all

            # Load the model
            print('Model directory: %s' % args.model_dir)
            meta_file, ckpt_file = facenet_ext.get_model_filenames(
                os.path.expanduser(args.model_dir))

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)
            facenet_ext.load_model(args.model_dir, meta_file, ckpt_file)

            # Get input and output tensors
            #images_placeholder = tf.get_default_graph().get_tensor_by_name("image_batch:0")
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            embeddings_visual = tf.get_default_graph().get_tensor_by_name(
                "embeddings_expression:0")
            embeddings_cari = tf.get_default_graph().get_tensor_by_name(
                "embeddings_cari:0")
            keep_probability_placeholder = tf.get_default_graph(
            ).get_tensor_by_name('keep_probability:0')
            #weight_decay_placeholder = tf.get_default_graph().get_tensor_by_name('weight_decay:0')
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name('phase_train:0')
            phase_train_placeholder_visual = tf.get_default_graph(
            ).get_tensor_by_name('phase_train_expression:0')
            phase_train_placeholder_cari = tf.get_default_graph(
            ).get_tensor_by_name('phase_train_cari:0')

            logits_visual = tf.get_default_graph().get_tensor_by_name(
                'logits_expr:0')
            logits_cari = tf.get_default_graph().get_tensor_by_name(
                'logits_cari:0')
            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]
            embeddings_visual_size = embeddings_visual.get_shape()[1]
            embeddings_cari_size = embeddings_cari.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on input images')
            batch_size = args.lfw_batch_size
            nrof_images = len(paths)
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            emb_visual_array = np.zeros((nrof_images, embeddings_visual_size))
            emb_cari_array = np.zeros((nrof_images, embeddings_cari_size))
            #emb_array = np.zeros((2*batch_size, embedding_size))
            logits_visual_size = logits_visual.get_shape()[1]
            logits_visual_array = np.zeros((nrof_images, logits_visual_size),
                                           dtype=float)
            logits_cari_size = logits_cari.get_shape()[1]
            logits_cari_array = np.zeros((nrof_images, logits_cari_size),
                                         dtype=float)
            for i in range(nrof_batches):
                print("Test batch:%d/%d\n" % (i, nrof_batches))
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet_ext.load_data(paths_batch, False, False,
                                               image_size)
                feed_dict = {
                    phase_train_placeholder: False,
                    phase_train_placeholder_visual: False,
                    phase_train_placeholder_cari: False,
                    images_placeholder: images,
                    keep_probability_placeholder: 1.0
                }
                #feed_dict = {phase_train_placeholder: False, images_placeholder: images}
                emb_, emb_visual_, emb_cari_, logits_visual_, logits_cari_ = sess.run(
                    [
                        embeddings, embeddings_visual, embeddings_cari,
                        logits_visual, logits_cari
                    ],
                    feed_dict=feed_dict)
                emb_array[start_index:end_index, :] = emb_
                emb_visual_array[start_index:end_index, :] = emb_visual_
                emb_cari_array[start_index:end_index, :] = emb_cari_
                logits_visual_array[start_index:end_index, :] = logits_visual_
                logits_cari_array[start_index:end_index, :] = logits_cari_

            len_emb = emb_array.shape[0]
            label_list_cari_all_ = label_list_cari_all[:len_emb]
            filter_visual = [x == 0 for x in label_list_cari_all_]
            filter_cari = [x == 1 for x in label_list_cari_all_]

            row_visual = list(compress(range(len_emb), filter_visual))
            row_cari = list(compress(range(len_emb), filter_cari))

            emb_visual_array_real = emb_visual_array[row_visual]
            emb_c2v_array = emb_visual_array[row_cari]
            emb_cari_array_real = emb_cari_array[row_cari]
            emb_v2c_array = emb_cari_array[row_visual]

            label_id_emb_visual_real = list(
                np.array(label_list_id_all)[row_visual])
            label_id_emb_c2v = list(np.array(label_list_id_all)[row_cari])
            label_id_emb_cari_real = list(
                np.array(label_list_id_all)[row_cari])
            label_id_emb_v2c = list(np.array(label_list_id_all)[row_visual])

            np.save(os.path.join(log_dir, 'features_emb_visual_real.npy'),
                    emb_visual_array_real)
            np.save(os.path.join(log_dir, 'features_emb_cari_real.npy'),
                    emb_cari_array_real)
            np.save(os.path.join(log_dir, 'features_emb_c2v.npy'),
                    emb_c2v_array)
            np.save(os.path.join(log_dir, 'features_emb_v2c.npy'),
                    emb_v2c_array)

            np.save(os.path.join(log_dir, 'label_id_emb_visual_real.npy'),
                    label_id_emb_visual_real)
            np.save(os.path.join(log_dir, 'label_id_emb_cari_real.npy'),
                    label_id_emb_cari_real)
            np.save(os.path.join(log_dir, 'label_id_emb_c2v.npy'),
                    label_id_emb_c2v)
            np.save(os.path.join(log_dir, 'label_id_emb_v2c.npy'),
                    label_id_emb_v2c)

    return
def main(args):

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    print('log_dir: %s\n' % log_dir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:


            #########################   CARI    ##########################
            ## See_test samples
            # args.test_pairs = '/data/zming/datasets/CaVINet-master/train-test-files/training_zm.txt'
            image_list_test, label_list_test, nrof_classes_test, label_verif, label_cari_test \
                = facenet_ext.get_image_paths_and_labels_cavi_fromfile(args.test_pairs, args.data_dir)

            ## downsampling the test samples
            down_sample = 20
            # down_sample = 10
            #down_sample = args.downsample
            xx = zip(image_list_test[0::2], image_list_test[1::2], label_list_test[0::2], label_list_test[1::2],
                     label_cari_test[0::2], label_cari_test[1::2])
            yy = xx[0::down_sample]
            # image_list_test = zip(*yy)[0] + zip(*yy)[1]
            list_img_tmp = []
            list_label_tmp = []
            list_cari_tmp = []
            for y in yy:
                list_img_tmp.append(y[0])
                list_img_tmp.append(y[1])
                list_label_tmp.append(y[2])
                list_label_tmp.append(y[3])
                list_cari_tmp.append(y[4])
                list_cari_tmp.append(y[5])
            image_list_test = list_img_tmp
            label_list_test = list_label_tmp
            label_cari_test = list_cari_tmp
            label_verif = label_verif[0::down_sample]

            ## shuffle the test pairs
            random_idx = range(len(label_verif))
            random.shuffle(random_idx)
            xx = zip(image_list_test[0::2], image_list_test[1::2], label_list_test[0::2], label_list_test[1::2],
                     label_cari_test[0::2], label_cari_test[1::2])
            yy = [xx[i] for i in random_idx]
            label_verif = [label_verif[i] for i in random_idx]
            list_img_tmp = []
            list_label_tmp = []
            list_cari_tmp = []
            for y in yy:
                list_img_tmp.append(y[0])
                list_img_tmp.append(y[1])
                list_label_tmp.append(y[2])
                list_label_tmp.append(y[3])
                list_cari_tmp.append(y[4])
                list_cari_tmp.append(y[5])
            image_list_test = list_img_tmp
            label_list_test = list_label_tmp
            label_cari_test = list_cari_tmp


            paths = image_list_test
            actual_issame = label_verif


            ###### Load the model #####
            print('Model directory: %s' % args.model_dir)
            meta_file, ckpt_file = facenet_ext.get_model_filenames(os.path.expanduser(args.model_dir))

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)
            facenet_ext.load_model(args.model_dir, meta_file, ckpt_file)


            # Get input and output tensors
            #images_placeholder = tf.get_default_graph().get_tensor_by_name("image_batch:0")
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            embeddings_visual = tf.get_default_graph().get_tensor_by_name("embeddings_expression:0")
            embeddings_cari = tf.get_default_graph().get_tensor_by_name("embeddings_cari:0")
            keep_probability_placeholder = tf.get_default_graph().get_tensor_by_name('keep_probability:0')
            #weight_decay_placeholder = tf.get_default_graph().get_tensor_by_name('weight_decay:0')
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name('phase_train:0')
            phase_train_placeholder_visual = tf.get_default_graph().get_tensor_by_name('phase_train_expression:0')
            phase_train_placeholder_cari = tf.get_default_graph().get_tensor_by_name('phase_train_cari:0')

            logits_visual = tf.get_default_graph().get_tensor_by_name('logits_expr:0')
            logits_cari = tf.get_default_graph().get_tensor_by_name('logits_cari:0')
            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]
            embeddings_visual_size = embeddings_visual.get_shape()[1]
            embeddings_cari_size = embeddings_cari.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on input images')
            batch_size = args.lfw_batch_size
            nrof_images = len(paths)
            nrof_batches = int(math.ceil(1.0*nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            emb_visual_array = np.zeros((nrof_images, embeddings_visual_size))
            emb_cari_array = np.zeros((nrof_images, embeddings_cari_size))
            #emb_array = np.zeros((2*batch_size, embedding_size))
            logits_visual_size = logits_visual.get_shape()[1]
            logits_visual_array = np.zeros((nrof_images, logits_visual_size), dtype=float)
            logits_cari_size = logits_cari.get_shape()[1]
            logits_cari_array = np.zeros((nrof_images, logits_cari_size), dtype=float)
            for i in range(nrof_batches):
                print("Test batch:%d/%d\n"%(i,nrof_batches))
                start_index = i*batch_size
                end_index = min((i+1)*batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet_ext.load_data(paths_batch, False, False, image_size)
                feed_dict = {phase_train_placeholder: False, phase_train_placeholder_visual: False, phase_train_placeholder_cari: False, images_placeholder:images, keep_probability_placeholder:1.0}
                #feed_dict = {phase_train_placeholder: False, images_placeholder: images}
                emb_, emb_visual_, emb_cari_, logits_visual_, logits_cari_ = sess.run([embeddings, embeddings_visual, embeddings_cari, logits_visual, logits_cari], feed_dict=feed_dict)
                emb_array[start_index:end_index,:] = emb_
                emb_visual_array[start_index:end_index,:] = emb_visual_
                emb_cari_array[start_index:end_index,:] = emb_cari_
                logits_visual_array[start_index:end_index,:] = logits_visual_
                logits_cari_array[start_index:end_index,:] = logits_cari_

               print('Evaluate_model: %s' % args.evaluate_mode)


            #### Evaluation Face verification #############
            if args.evaluate_mode == 'Euclidian':
                tprs, fprs, accuracy, val, val_std, fp_idx, fn_idx,best_threshold = lfw_ext.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds, far=args.far)

            nrof_test_tp_pairs = sum(actual_issame)
            nrof_test_tn_pairs = len(actual_issame) - nrof_test_tp_pairs
            nrof_test_paths = len(actual_issame)*2
            # paths_pairs shape: [2, number of pairs], each column is corresponding to a pair of images
            paths_pairs = [paths[0:nrof_test_paths:2], paths[1:nrof_test_paths:2]]
            paths_pairs_array = np.array(paths_pairs)
            fp_images_paths = paths_pairs_array[:, fp_idx];
            fn_images_paths = paths_pairs_array[:, fn_idx];
            _, nrof_fp_pairs = fp_images_paths.shape
            _, nrof_fn_pairs = fn_images_paths.shape

            ################### select the false positive/negative images ####################

            print('Accuracy: %1.3f+-%1.3f\n' % (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f\n' % (val, val_std, args.far))

            auc = np.zeros(tprs.shape[0])
            #eer = np.zeros(tprs.shape[0])
            for i in range(tprs.shape[0]):
                fpr = fprs[i]
                tpr = tprs[i]
                auc[i] = metrics.auc(fpr, tpr)
                #eer[i] = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)

            auc_mean = np.mean(auc)
            auc_std = np.std(auc)
            # eer_mean = np.mean(eer)
            # eer_std = np.std(eer)
            print('Area Under Curve (AUC): %1.3f+-%1.3f\n' % (auc_mean, auc_std))
            #print('Equal Error Rate (EER): %1.3f+-%1.3f\n' % (eer_mean, eer_std))


            with open(os.path.join(log_dir, 'validation_on_dataset.txt'), 'at') as f:
                print('Saving the evaluation results...\n')
                f.write('arguments: %s\n--------------------\n' % ' '.join(sys.argv))
                #f.write('Identity verification acc is : %2.3f\n' % identity_verif_acc)
                f.write('Average accuracy: %1.3f+-%1.3f\n' % (np.mean(accuracy), np.std(accuracy)))
                f.write('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f\n' % (val, val_std, args.far))
                f.write('Best threshold: %2.5f \n' % (best_threshold))
                #f.write('Validation threshold: %2.5f \n' % (val_threshold))
                f.write('Area Under Curve (AUC): %1.3f+-%1.3f\n' % (auc_mean, auc_std))
                #f.write('Equal Error Rate (EER): %1.3f+-%1.3f\n' % (eer_mean, eer_std))
                f.write('------------- \n')
                #f.write('Expression recognition acc: %2.3f\n'%test_expr_acc)
                #f.write('Expression verification acc: %2.3f\n'%expression_verif_acc)
                #f.write('------------- \n')
                #f.write('Global Verification-Expression liveness acc is : %2.3f\n'%global_acc)
                #print('Saving the False positive pairs images ...\n ')
                f.write('False positive pairs: %d / %d -----------------------------------------\n' % ( \
                nrof_fp_pairs, nrof_test_tp_pairs))
                for i in range(nrof_fp_pairs):
                    f.write('%d %s\n' % (i, fp_images_paths[:, i]))
                print('Saving the False negative pairs images ...\n ')
                f.write('False negative pairs: %d / %d ---------------------------------------\n' % ( \
                nrof_fn_pairs, nrof_test_tn_pairs))
                for i in range(nrof_fn_pairs):
                    f.write('%d %s\n' % (i, fn_images_paths[:, i]))
            ################### edit by mzh 12012017: write the false positive/negative images to the file  ####################

            false_images_list = os.path.join(log_dir, 'validation_on_dataset.txt')
            save_dir = log_dir
            save_false_images.save_false_images(false_images_list, save_dir)

            with open(os.path.join(log_dir, 'validation_on_dataset.txt'), 'at') as f:
                print('Saving the tpr, fpr of ROC ...\n ')
                f.write('ROC: tpr, fpr ---------------------------------------------\n')
                for tp,fp in zip(tpr, fpr):
                    f.write('tpr/fpr: %f/%f\n'%(tp,fp))

            fig = plt.figure()
            plt.plot(fpr, tpr, label='ROC')
            plt.title('Receiver Operating Characteristics')
            plt.xlabel('False Positive Rate')
            plt.ylabel('True Positive Rate')
            plt.legend()
            plt.plot([0, 1], [0, 1], 'g--')
            plt.grid(True)
            #plt.show()
            fig.savefig(os.path.join(log_dir, 'roc.png'))