コード例 #1
0
ファイル: enroll.py プロジェクト: edvisees/Face-Recognition
def enroll_face(image,
                label,
                embeddings_path="face_embeddings.npy",
                labels_path="labels.pkl",
                down_scale=1.0):

    faces = detect_faces(image, down_scale)
    if len(faces) == 1:
        face = faces[0]
    if len(faces) < 1:
        print("[!] Skipping. No faces detected.")
        return False
    if len(faces) > 1:
        #TODO: verify with g.t if taking multiple faces increases performance
        # face = faces[get_centered_face_index(image, faces)]
        print("[!] Skipping. Multiple faces detected.")
        return False

    face_embeddings = extract_face_embeddings(image, face, shape_predictor,
                                              face_recognizer)
    add_embeddings(face_embeddings,
                   label,
                   embeddings_path=embeddings_path,
                   labels_path=labels_path)
    return True
コード例 #2
0
    def enroll_face(image,
                    label,
                    label1,
                    label2,
                    label3,
                    label4,
                    embeddings_path="face_embeddings.npy",
                    labels_path="labels.cpickle",
                    labels_path1="labels1.cpickle",
                    labels_path2="labels2.cpickle",
                    labels_path3="labels3.cpickle",
                    labels_path4="labels4.cpickle",
                    down_scale=1.0):

        faces = detect_faces(image, down_scale)
        if len(faces) < 1:
            return False
        if len(faces) > 1:
            raise ValueError("Multiple faces not allowed for enrolling")
        face = faces[0]
        face_embeddings = extract_face_embeddings(image, face, shape_predictor,
                                                  face_recognizer)
        add_embeddings(face_embeddings,
                       label,
                       label1,
                       label2,
                       label3,
                       label4,
                       embeddings_path=embeddings_path,
                       labels_path=labels_path)
        return True
コード例 #3
0
    ap.add_argument("-i", "--image", help="Path to image", required=True)
    ap.add_argument("-e", "--embeddings", help="Path to saved embeddings",
                    default="tmp/face_embeddings.npy")
    ap.add_argument("-l", "--labels", help="Path to saved labels",
                    default="tmp/labels.pkl")
    args = vars(ap.parse_args())

    embeddings = np.load(args["embeddings"])
    labels = pickle.load(open(args["labels"], 'rb'))
    shape_predictor = dlib.shape_predictor("models/"
                                           "shape_predictor_5_face_landmarks.dat")
    face_recognizer = dlib.face_recognition_model_v1("models/"
                                                     "dlib_face_recognition_resnet_model_v1.dat")

    image = cv2.imread(args["image"])
    image_original = image.copy()
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    faces = detect_faces(image)
    print("Detected {} faces".format(len(faces)))
    for face in faces:
        embedding = extract_face_embeddings(image, face, shape_predictor, face_recognizer)
        label = recognize_face(embedding, embeddings, labels)
        (x1, y1, x2, y2) = face.left(), face.top(), face.right(), face.bottom()
        cv2.rectangle(image_original, (x1, y1), (x2, y2), (255, 120, 120), 2, cv2.LINE_AA)
        cv2.putText(image_original, label[0], (x1, y1 - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)

    cv2.imshow("Image", image_original)
    cv2.waitKey(0)
コード例 #4
0
                def run(self):
                    import numpy as np
                    import datetime

                    now = datetime.datetime.now()

                    def recognize_face(embedding,
                                       embeddings,
                                       labels,
                                       labels1,
                                       labels2,
                                       labels3,
                                       labels4,
                                       threshold=0.5):
                        distances = np.linalg.norm(embeddings - embedding,
                                                   axis=1)
                        argmin = np.argmin(distances)
                        minDistance = distances[argmin]

                        if minDistance > threshold:
                            label = ""
                        else:
                            label = labels[argmin]

                        return (label, minDistance)

                    if __name__ == "__main__":

                        import cv2
                        import argparse
                        from face_embeddings import extract_face_embeddings
                        from face_detector import detect_faces
                        import cloudpickle as cPickle
                        import dlib
                        import urllib.request
                        import os

                        global off
                        off = False
                        a = os.getcwd()
                        url = enter1.get()
                        ap = argparse.ArgumentParser()
                        ap.add_argument("-i", "--image", default="4.jpg")
                        ap.add_argument("-e",
                                        "--embeddings",
                                        default="face_embeddings.npy")
                        ap.add_argument("-l",
                                        "--labels",
                                        default="labels.cpickle")
                        ap.add_argument("-l1",
                                        "--labels1",
                                        default="labels1.cpickle")
                        ap.add_argument("-l2",
                                        "--labels2",
                                        default="labels2.cpickle")
                        ap.add_argument("-l3",
                                        "--labels3",
                                        default="labels3.cpickle")
                        ap.add_argument("-l4",
                                        "--labels4",
                                        default="labels4.cpickle")
                        args = vars(ap.parse_args())

                        embeddings = np.load(args["embeddings"])
                        labels = cPickle.load(open(args["labels"], "rb"))
                        labels1 = cPickle.load(open(args["labels1"], "rb"))
                        labels2 = cPickle.load(open(args["labels2"], "rb"))
                        labels3 = cPickle.load(open(args["labels3"], "rb"))
                        labels4 = cPickle.load(open(args["labels4"], "rb"))
                        shape_predictor = dlib.shape_predictor(
                            "models/"
                            "shape_predictor_5_face_landmarks.dat")
                        face_recognizer = dlib.face_recognition_model_v1(
                            "models/"
                            "dlib_face_recognition_resnet_model_v1.dat")

                        while (not off):
                            imgResp = urllib.request.urlopen(url)
                            imgNp = np.array(bytearray(imgResp.read()),
                                             dtype=np.uint8)
                            image = cv2.imdecode(imgNp, -1)
                            image_original = image.copy()
                            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

                            faces = detect_faces(image)
                            for face in faces:
                                embedding = extract_face_embeddings(
                                    image, face, shape_predictor,
                                    face_recognizer)
                                label = recognize_face(embedding, embeddings,
                                                       labels, labels1,
                                                       labels2, labels3,
                                                       labels4)
                                label1 = recognize_face(
                                    embedding, embeddings, labels1, labels,
                                    labels2, labels3, labels4)
                                label2 = recognize_face(
                                    embedding, embeddings, labels2, labels,
                                    labels1, labels3, labels4)
                                label3 = recognize_face(
                                    embedding, embeddings, labels3, labels,
                                    labels1, labels2, labels4)
                                label4 = recognize_face(
                                    embedding, embeddings, labels4, labels,
                                    labels1, labels2, labels3)
                                (x1, y1, x2, y2) = face.left(), face.top(
                                ), face.right(), face.bottom()
                                cv2.rectangle(image_original, (x1, y1),
                                              (x2, y2), (255, 120, 120), 2)
                                cv2.putText(image_original, label4[0],
                                            (x1, y1 - 10),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                                            (255, 255, 0), 2)
                                file1 = open(a + "\\firebase1.txt", "w")
                                file2 = open(a + "\\firebase2.txt", "w")
                                file3 = open(a + "\\firebase3.txt", "w")
                                file4 = open(a + "\\firebase4.txt", "w")
                                file5 = open(a + "\\firebase5.txt", "w")
                                file6 = open(a + "\\firebase6.txt", "w")
                                file1.write(str(label1[0]))
                                file2.write(label[0])
                                file3.write(label4[0])
                                file4.write(label2[0])
                                file5.write(label3[0])
                                file6.write(
                                    now.strftime("%Y-%m-%d " + "%H:%M:%S"))
                                file1.close()
                                file2.close()
                                file3.close()
                                file4.close()
                                file5.close()
                                file6.close()
                            cv2.imshow("Image", image_original)
                            cv2.waitKey(1)
                            if off == True:
                                break

                        cv2.destroyAllWindows()