Beispiel #1
0
def match(clf, filename):

    # Load the test image with unknown faces into a numpy array
    test_image = face_recognition.load_image_file(filename)

    # Find all the faces in the test image using the default HOG-based model
    face_locations = face_recognition.face_locations(test_image)
    no = len(face_locations)
    print("\nNumber of faces detected: ", no)

    # Predict all the faces in the test image using the trained classifier
    firstname = None
    print("\n(☞゚ヮ゚)☞   ☜(゚ヮ゚☜)\n")
    print("Found:")
    for i in range(no):
        test_image_enc = face_recognition.face_encodings(test_image)[i]
        name = clf.predict([test_image_enc])
        probs = clf.predict_proba([test_image_enc])
        print(*name)
        firstname = (str(name[0]))
    return (firstname, no)
def train():
    # Training the SVC classifier

    # The training data would be all the face encodings from all the known images and the labels are their names
    encodings = []
    names = []

    # Training directory
    train_dir = os.listdir("train_dir/")

    # Loop through each person in the training directory
    for person in train_dir:
        pix = os.listdir("train_dir/" + person)
        # pix = [item for item in pix if not item.startswith('.') and os.path.isfile(os.path.join(root, item))]

        # Loop through each training image for the current person
        for person_img in pix:
            # Get the face encodings for the face in each image file
            if person_img == ".DS_Store":
                continue
            face = face_recognition.load_image_file(
                "train_dir/" + person + "/" + person_img
            )
            face_bounding_boxes = face_recognition.face_locations(face)

            if len(face_bounding_boxes) == 1:
                face_enc = face_recognition.face_encodings(face)[0]
                # Add face encoding for current image with corresponding label (name) to the training data
                encodings.append(face_enc)
                names.append(person)

    # Create and train the SVC classifier
    clf = svm.SVC(gamma="scale", probability=True)
    clf.fit(encodings, names)
    dump(clf, 'clf.joblib')
    return clf
Beispiel #3
0
    def Face_Recognition_(self, frame):
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)
        face_names = []
        face_titles = []
        for face_encoding in face_encodings:
            matches = face_recognition.compare_faces(knownFE, face_encoding)
            name = "Unknown"
            title = "Unknown"

            if True in matches:
                first_match_index = matches.index(True)
                name = knownN[first_match_index]
                title = knownT[first_match_index]

            face_names.append(name)
            face_titles.append(title)

        for (top, right, bottom,
             left), name, title in zip(face_locations, face_names,
                                       face_titles):
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name + ": " + title, (left + 6, bottom - 6),
                        font, 1.0, (255, 255, 255), 1)

        return frame, face_names, face_titles
Beispiel #4
0
# loop over the image paths
for (i, user) in enumerate(allUsers):
    print("[INFO] processing user {}/{}".format(i + 1, len(allUsers)))
    name = users.get_name_from_id(user)

    for imagePath in users.get_pictures_from_id(user):

        # load the input image and convert it from BGR (OpenCV ordering)
        # to dlib ordering (RGB)
        image = cv2.imread(imagePath)
        rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # detect the (x, y)-coordinates of the bounding boxes
        # corresponding to each face in the input image
        boxes = face_recognition.face_locations(rgb, \
            model=args["detection_method"])

        # compute the facial embedding for the face
        encodings = face_recognition.face_encodings(rgb, boxes)

        # loop over the encodings
        for encoding in encodings:
            # add each encoding + user(name) to our lists of known users and
            # encodings
            knownEncodings.append(encoding)
            knownUsers.append(user)
            # knownNames.append(name)

# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "users": knownUsers}