def __init__(
     self,
     desiredFaceWidth=160,
     predictor_path='models/shape_predictor_68_face_landmarks.dat',
 ):
     self.predictor = dlib.shape_predictor(predictor_path)
     self.fa = FaceAligner(self.predictor,
                           desiredFaceWidth=desiredFaceWidth,
                           desiredLeftEye=(0.32, 0.35))
     self.encoder = Encoder()
Exemple #2
0
def detect_face(img):
    sp = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    fa = FaceAligner(sp)
    #img = imutils.resize(img, width=500)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    detector = dlib.get_frontal_face_detector()
    rects = detector(gray, 2)
    num_faces = len(rects)
    if num_faces == 0:
        return None
    faceAligned = fa.align(img, gray, rects[0])
    image = cv2.cvtColor(faceAligned, cv2.COLOR_BGR2GRAY)
    return image
Exemple #3
0
class EnhancedFacenet:
    def __init__(self,desiredFaceWidth=160, predictor_path='models/shape_predictor_68_face_landmarks.dat', ):
        self.predictor = dlib.shape_predictor(predictor_path)
        self.fa = FaceAligner(self.predictor, desiredFaceWidth=desiredFaceWidth, desiredLeftEye=(0.37, 0.33))
        self.encoder = Encoder()


    def alignAndEncode(self, img, gray, face_rect):
        face = self.fa.align(img, gray, face_rect )
        face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
        return face, self.encoder.generate_embedding(face_rgb)
Exemple #4
0
def align_faces(imagePaths: list):
    """
	Align faces found in imagePaths, 
	overwrite original with the aligned face
    - imagePaths: list of paths of images to align

	"""
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        "models/shape_predictor_68_face_landmarks.dat")
    fa = FaceAligner(predictor, desiredFaceHeight=256)

    for i, imagePath in enumerate(imagePaths):
        print("[INFO] - Aligning face #{}".format(i))
        image = cv2.imread(imagePath)
        image = imutils.resize(image, width=800)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        rects = detector(gray, 2)
        for rect in rects:
            image = fa.align(image, gray, rect)
            cv2.imwrite(imagePath, image)
def detect_face(img):
    sp = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    fa = FaceAligner(sp)
    #img = imutils.resize(img, width=500)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    detector = dlib.get_frontal_face_detector()
    rects = detector(gray, 2)
    num_faces = len(rects)
    if num_faces == 0:
        return None
    b = 0
    l = 0
    for rect in rects:
        # extract the ROI of the *original* face, then align the face
        # using facial landmarks
        (x, y, w, h) = face_utils.rect_to_bb(rect)
        #print(w," ",h)
        if w > b and h > l:
            b = w
            l = h
            faceAligned = fa.align(img, gray, rect)

    image = cv2.cvtColor(faceAligned, cv2.COLOR_BGR2GRAY)
    return image
Exemple #6
0
import dlib
import cv2
import os
import glob
from skimage import io
import imutils
from imutils import face_utils
import cv2
import os
import numpy as np
from imutils.face_utils.facealigner import FaceAligner

sp = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
fa = FaceAligner(sp)

# load the input image, resize it, and convert it to grayscale
image = cv2.imread('cr71.png')
image = imutils.resize(image, width=800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# show the original input image and detect faces in the grayscale
# image
cv2.imshow("Input", image)
detector = dlib.get_frontal_face_detector()
rects = detector(gray, 2)
for rect in rects:
    # extract the ROI of the *original* face, then align the face
    # using facial landmarks
    (x, y, w, h) = face_utils.rect_to_bb(rect)
    faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
    faceAligned = fa.align(image, gray, rect)