def eval_on_biwi(store_file, results_fol, store_each=20, overwrite=False):
    config.load_config()
    config.CONFIG["STORAGE"]["database_file"] = store_file

    aux_info = load_identities()
    face_rec = recognition.Recognition()
    dataset = BiwiDataset()

    cached_results = len(os.listdir(results_fol))
    start = int(store_each*cached_results)
    results = {
        "is_same": [],
        "score": [],
        "label": [],
        "roll": [],
        "yaw": [],
        "pitch": [],
    }

    # Eval
    for ctr, (iden, frame) in itertools.islice(enumerate(dataset), start, None):
        image_path, (center3D, angle) = dataset[iden, frame]
        image = cv2.imread(image_path)
        faces = face_rec.recognize(image)  # type: typing.List[datum.Datum]
        match = match_detection(faces, (center3D, angle))

        if match is None:
            results["score"].append(1.0)
            results["is_same"].append(False)
        else:
            results["score"].append(faces[match].match_score)
            results["is_same"].append(
                faces[match].identity == aux_info.iden[iden - 1]
            )
        results["label"].append(iden)

        results["roll"].append(angle[0])
        results["pitch"].append(angle[1])
        results["yaw"].append(angle[2])

        if ctr % 10 == 9:
            print(
                "\rImages processed: {}/~14000. Current folder: {}.".format(ctr, iden),
                end="",
            )

        if ctr % store_each == store_each - 1:
            df = pd.DataFrame(results)
            df.to_pickle(path.join(results_fol, "results_{}.pkl".format(ctr)))
            results = {
                "is_same": [],
                "score": [],
                "label": [],
                "roll": [],
                "yaw": [],
                "pitch": [],
            }

    df = pd.DataFrame(results)
    df.to_pickle(path.join(results_fol, "results_END.pkl"))
Exemple #2
0
def main(
    flw_dir=LFW_ROOT + "flw_mtcnnpy_160",
    pairs_filename=LFW_ROOT + "pairs.txt",
    store_file="/home/sam/Desktop/face.pkl",
    store=1,
):
    if store == 2:
        with open(store_file) as f:
            embedding_list, y_true = pickle.load(f)
            y_true = y_true.flatten()
    else:
        config.load_config()

        encoder = encoding.FacialEncoder()
        # encoder = encoding_arc.EncodingArc()

        embedding_list = []
        y_true = []

        for (path0,
             path1), issame in lfw_utils.get_paths(flw_dir, pairs_filename):

            im0 = cv2.imread(path0)
            im1 = cv2.imread(path1)

            embs = encoder.predict([im0, im1])

            embedding_list.append(embs)
            y_true.append(issame)

        embedding_list = np.vstack(embedding_list)
        y_true = np.array(y_true)

        if store == 1:
            with open(store_file, "wb") as f:
                pickle.dump((embedding_list, y_true), f)

    tpr, fpr, accuracy, val, val_std, far = lfw_utils.evaluate(
        embedding_list, y_true)

    print("Accuracy: %2.5f+-%2.5f" % (np.mean(accuracy), np.std(accuracy)))
    print("Validation rate: %2.5f+-%2.5f @ FAR=%2.5f" % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print("Area Under Curve (AUC): %1.3f" % auc)
    eer = optimize.brentq(
        lambda x: 1.0 - x - interpolate.interp1d(fpr, tpr)(x), 0.0, 1.0)
    print("Equal Error Rate (EER): %1.3f" % eer)
def create_faces_dataset(in_dir, out_dir=None, out_file="database.pkl"):
    # Path fixing
    if out_dir is None:
        out_dir = in_dir
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # Processing pipeline
    config.load_config()
    config.logger_config()
    det = detection.FacialDetector()
    enc = encoding_arc.EncodingArc()

    labels = []
    embeddigs = []

    for label, file_list in files.image_folder_traversal(in_dir):
        logging.info("Adding {} to database".format(label))
        for file_name in file_list:
            image = cv2.imread(file_name)
            if image is None:
                continue
            try:
                # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                faces = det.extract_images(image)
                embedding = enc.predict(faces)
                if embedding.shape[0] > 1:
                    logging.warn(
                        "Multiple faces in image image {}".format(file_name))
                    continue

            except Exception as e:
                logging.warn("Error in image {}".format(file_name))
                logging.warn(e.message)
                # os.remove(file_name)
                # logging.warn("File {} deleted.".format(file_name))
            else:
                logging.debug("Image added: {}".format(file_name))
                labels.append(label)
                embeddigs.append(embedding[0])
    df = pd.DataFrame({"identities": labels, "embeddings": embeddigs})

    out_path = os.path.join(out_dir, out_file)
    df.to_pickle(out_path)
    logging.info("Face embeddings saved to {}".format(out_path))
def create_biwi_db(out_path):
    dataset = BiwiDataset()

    # Processing pipeline
    config.load_config()
    # config.logger_config()
    detector = detection.FacialDetector()
    encoder = encoding_arc.EncodingArc()

    labels = []
    embeddings = []

    aux_info = load_identities()

    # Create faces database (MIN_ANGLES)
    for idx in range(len(aux_info)):
        label = aux_info.iden[idx]
        if label in labels:
            continue

        iden = aux_info.folder[idx]
        frame = aux_info.center_frame[idx]
        im_path, (center3D, angle) = dataset[int(iden), frame]

        image = cv2.imread(im_path)

        data = detector.predict(image, extract_image=True)
        face_match = match_detection(data, (center3D, angle))
        embedding = encoder.predict([data[face_match].image])

        labels.append(label)
        embeddings.append(embedding[0])

        if idx % 5 == 4:
            print("\rProgress: {}/{}".format(idx + 1, len(aux_info)), end="")

    df = pd.DataFrame({"identities": labels, "embeddings": embeddings})
    df.to_pickle(out_path)
    print("\rProgress: {}/{}".format(idx + 1, len(aux_info)), end="")
    # logging.info("Face embeddings saved to {}".format(out_path))
    return df
Exemple #5
0
# IMAGE_PATH = "/home/sam/Pictures/IMG-20190419-WA0001.jpg"
IMAGE_PATH = "/home/sam/UMA/4/4_2/3-TFG/3-Workspace/face_recognition_ros/face_recognition_ros/data/database/family_dataset/sam/IMG_20190730_182021.jpg"
image = cv2.cvtColor(cv2.imread(IMAGE_PATH, 1), cv2.COLOR_BGR2RGB)

# RESOLUTION = (1280, 720) # 0.433
RESOLUTION = (640, 480)  # 0.213
METHOD = "mtcnn"

LOOPS = 50

image = cv2.resize(image, RESOLUTION)
# plt.imshow(image)

# In[3]:

conf = config.load_config()

detector = detection.FacialDetector(method=METHOD, conf=conf["DETECTION"])

# encoder = encoding.FacialEncoder(conf)
encoder = encoding_arc.EncodingArc(conf)

# matcher = default.FaceMatcher(conf)
# matcher = svm.SVMMatcher(conf)
# matcher = knn.KNNMatcher(conf)

# In[6]:

# for _ in range(LOOPS):
faces = detector.extract_datum(image)
from __future__ import print_function

from scipy import misc
import sys
import os
import argparse
import numpy as np
import random
from time import sleep

import facenet

from face_recognition_ros import detection
from face_recognition_ros.utils import config

config.load_config()


def create_dataset_mtcnn(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    # src_path, _ = os.path.split(os.path.realpath(__file__))
    # facenet.store_revision_info(src_path, output_dir, " ".join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print("Loading detector")

    # Create and choose detector