示例#1
0
	def __init__(self,landmark_model="large"):
		"""
			Load previously encoded faces and their features
		"""
		with open('../../data/face_recognize_features.json') as f:
			faces = json.load(f)

		face_list = []
		face_name_list = []
		for face in faces: 
			for encoding in face['encodings']:
				face_list.append(np.array(encoding))
				face_name_list.append(face['name'])

		self.face_list = np.array(face_list)
		self.face_name_list = face_name_list

		if landmark_model == "small":
			predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
			self.pose_predictor = dlib.shape_predictor(predictor_5_point_model)
		else:
			predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
			self.pose_predictor = dlib.shape_predictor(predictor_68_point_model)
		
		face_recognition_model = face_recognition_models.face_recognition_model_location()
		self.face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
示例#2
0
    def align_face(self, image):
        faces = dlib.full_object_detections()
        (h, w) = image.shape[:2]
        loc = dlib.rectangle(h, w, 0, 0)
        model = FRM.pose_predictor_five_point_model_location()
        sp = dlib.shape_predictor(model)
        faces.append(sp(image, loc))
        aligned = dlib.get_face_chip(image, faces[0], size=160)

        return aligned
示例#3
0
def init_engine(mode='image', num_face=10, mask='all'):
    if mask == 'recognize':
        predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
        pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
        #predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
        #pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
        face_recognition_model = face_recognition_models.face_recognition_model_location()
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
        return pose_predictor_5_point, face_encoder
    else:
        face_detector = dlib.get_frontal_face_detector()
        return face_detector
示例#4
0
    def __init__(self,
                 employees,
                 resolution=(320, 240),
                 model_type="small",
                 nb_iters=1):
        # nb_iters - the higher is more percise but slower
        # model_type - "small" / "large" large is more accurate but slower
        self.camera = picamera.PiCamera()
        self.camera.resolution = resolution
        self.np_output = np.zeros((resolution[1], resolution[0], 3),
                                  dtype=np.uint8)
        self.model = frm.face_recognition_model_location()
        self.encoder = dlib.face_recognition_model_v1(self.model)
        self.face_detector = dlib.get_frontal_face_detector()
        self.model_type = model_type
        self.nb_iters = nb_iters
        self.employees = employees

        self.predictor = frm.pose_predictor_five_point_model_location()
        if self.model_type == "large":
            self.predictor = frm.face_recognition_models.pose_predictor_model_location(
            )

        self.load_faces()
示例#5
0
    print(
        "Please install `face_recognition_models` with this command before using `face_recognition`:\n"
    )
    print(
        "pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

ImageFile.LOAD_TRUNCATED_IMAGES = True

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
)
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
)
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
)
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location(
)
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
示例#6
0
import numpy as np

try:
    import face_recognition_models
except:
    print("Please install `face_recognition_models` with this command before using `face_recognition`:")
    print()
    print("pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
import scipy.misc
import dlib
import numpy as np
import face_recognition_models as frm

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = frm.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = frm.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = frm.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = frm.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def css_to_rect(css):
    return dlib.rectangle(css[3], css[0], css[1], css[2])


def trim_css_to_bounds(css, image_shape):
    return max(css[0],
               0), min(css[1],
    def saveEncodings(self, verbose=True):

        # initialize face encoding parameters
        ImageFile.LOAD_TRUNCATED_IMAGES = True
        predictor_68_point_model = face_recognition_models.pose_predictor_model_location(
        )
        pose_predictor_68_point = dlib.shape_predictor(
            predictor_68_point_model)
        predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
        )
        pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

        pose_predictor = pose_predictor_68_point

        # initialize a text file for saving images where faces are not found
        with open('waste_files.txt', 'w') as waste:
            waste.write("Images which are not suitable for training are-\n")

            try:
                # load previously saved encodings
                pickle_in = open(self.base_dir + "known_face_encodings.pickle",
                                 "rb")
                known_face_encodings = pickle.load(pickle_in)
                pickle_in.close()

                pickle_in = open(self.base_dir + "known_face_names.pickle",
                                 "rb")
                known_face_names = pickle.load(pickle_in)
                pickle_in.close()
                # filter out faces which are already trained
                temp = []
                for name in self.names:
                    if name not in known_face_names:
                        temp.append(name)

                self.names = temp

            except:
                # declare encodings as empty
                known_face_encodings = []
                known_face_names = []
                #self.names = ['1231', '1232', '1234', '1238', '1242']#'1237', '1239','1235',

            print("[INFO] Encoding... ", self.names)
            if self.names != []:
                # looping through names to be trained
                for name in tqdm(self.names):
                    # clear the lists for new user
                    face_images = []
                    faces_locations = []
                    face_names = []

                    # load images of person to be trained
                    base = self.user_images
                    name = name.strip()
                    base = os.path.join(base, name)
                    # looping through images of person to be trained
                    for img_path in glob.glob(os.path.join(base, "*.jpg")):
                        # read image
                        image_data = cv2.imread(img_path)

                        ###############
                        # check face using resnet
                        ###############
                        blob = cv2.dnn.blobFromImage(image_data, 1.0,
                                                     (300, 300),
                                                     (104.0, 177.0, 123.0))
                        self.net2.setInput(blob)
                        detections = self.net2.forward()
                        (h, w) = image_data.shape[:2]
                        confidences = []
                        boxes = []
                        for i in range(0, detections.shape[2]):
                            confidence = detections[0, 0, i, 2]
                            if confidence > 0.98:
                                box = detections[0, 0, i, 3:7] * np.array(
                                    [w, h, w, h])
                                box = box.astype("int")
                                # startX, startY, endX, endY
                                confidences.append(float(confidence))
                                boxes.append([
                                    box[0], box[1], box[2] - box[0],
                                    box[3] - box[1]
                                ])

                        if len(boxes) > 0:
                            pass
                        else:
                            waste.write(img_path + "\n")
                            #print("[INFO] Image {} not suitable for training: Resnet filtered out".format(img_path))
                            continue

                        ###############
                        # face detection using yolo
                        ###############
                        # load model parameters
                        blob = cv2.dnn.blobFromImage(image_data,
                                                     1 / 255, (416, 416),
                                                     [0, 0, 0],
                                                     1,
                                                     crop=False)
                        self.net.setInput(blob)
                        # fetch predictions from model/network
                        layers_names = self.net.getLayerNames()
                        outs = self.net.forward([
                            layers_names[i[0] - 1]
                            for i in self.net.getUnconnectedOutLayers()
                        ])
                        # fetch size of image
                        (frame_height, frame_width) = image_data.shape[:2]
                        # declare overall confidence list
                        confidences = []
                        # declare bounding boxes list
                        boxes = []
                        face_locations = []
                        # looping through model predictions/ predictions for each grid cell
                        for out in outs:
                            # looping through detectors outputs for grid cell
                            for detection in out:
                                # fetch classifier probabilities for different classes
                                scores = detection[5:]
                                # fetch maximum probabilty class
                                class_id = np.argmax(scores)
                                # define confidence as maximum probability
                                confidence = scores[class_id]
                                # filter predictions based on confidence threshold
                                if confidence > self.yolo_conf_threshold:
                                    # fetch bounding box dimensions
                                    center_x = int(detection[0] * frame_width)
                                    center_y = int(detection[1] * frame_height)
                                    width = int(detection[2] * frame_width)
                                    height = int(detection[3] * frame_height)
                                    left = int(center_x - width / 2)
                                    top = int(center_y - height / 2)
                                    # append confidence in confidences list
                                    confidences.append(float(confidence))
                                    # append bounding box in bounding boxes list
                                    boxes.append([left, top, width, height])

                        # perform non maximum suppression of overlapping images
                        indices = cv2.dnn.NMSBoxes(boxes, confidences,
                                                   self.yolo_conf_threshold,
                                                   self.nms_threshold)

                        # fetch faces bounding boxes
                        for i in indices:
                            i = i[0]
                            box = boxes[i]
                            left = box[0]
                            top = box[1]
                            width = box[2]
                            height = box[3]
                            face_locations.append(
                                np.array([
                                    top, left + width +
                                    (width * self.margin // 100), top + height,
                                    left - (width * self.margin // 100)
                                ]))

                        if len(face_locations) != 1:
                            waste.write(img_path + "\n")
                            # If there are no people (or too many people) in a training image, skip the image.
                            if verbose:
                                pass
                                #print("[INFO] Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_locations) < 1 else "Found more than one face"))
                        else:
                            for face_location in face_locations:
                                if min(face_location) < 0:
                                    pass
                                    #print("[INFO] Image {} not suitable for training: Face is not in Boundary of Image".format(img_path))
                                else:
                                    ######################################################
                                    # histogram equalization
                                    frame1 = image_data[
                                        face_location[0]:face_location[2],
                                        face_location[3]:face_location[1], :]
                                    img_to_yuv = cv2.cvtColor(
                                        frame1, cv2.COLOR_BGR2YUV)
                                    img_to_yuv[:, :, 0] = cv2.equalizeHist(
                                        img_to_yuv[:, :, 0])
                                    frame1 = cv2.cvtColor(
                                        img_to_yuv, cv2.COLOR_YUV2BGR)

                                    image_data[
                                        face_location[0]:face_location[2],
                                        face_location[3]:
                                        face_location[1], :] = frame1
                                    ###################################################
                                    # Add face encoding for current image to the training set
                                    faces_locations.append(face_locations[0])
                                    face_images.append(image_data)
                                    face_names.append(name)

                    faces_locations = [
                        self._css_to_rect(face_location)
                        for face_location in faces_locations
                    ]

                    raw_landmarks = []
                    for face_image, face_location in zip(
                            face_images, faces_locations):
                        faces = dlib.full_object_detections()
                        faces.append(pose_predictor(face_image, face_location))
                        raw_landmarks.append(faces)

                    if len(faces_locations) < 120:
                        print(
                            "[INFO]: %s skipped as total number of proper images are <120."
                            % name)
                        continue

                    # generate encoding for captured faces of user
                    encodings = list(np.array(face_encoder.compute_face_descriptor(batch_img=face_images, batch_faces=raw_landmarks, num_jitters=20))\
                                     .reshape((len(face_images), 128)))

                    # append new data in old data
                    known_face_encodings += encodings
                    known_face_names += face_names

                    # save the encodings after every iteration of distinct class
                    pickle_out = open(
                        self.base_dir + "known_face_names.pickle", "wb")
                    pickle.dump(known_face_names, pickle_out)
                    pickle_out.close()
                    pickle_out = open(
                        self.base_dir + "known_face_encodings.pickle", "wb")
                    pickle.dump(known_face_encodings, pickle_out)
                    pickle_out.close()
                    print("[INFO]: %s saved!" % name)

            else:
                print("Encoding Skipped!\n")
示例#9
0
# -*- coding: utf-8 -*-
import PIL.Image
import dlib
import numpy as np
import face_recognition_models as models
# import models

# ====================================================================================================================
# models에 있는 모델 변수 적용
face_detector = dlib.get_frontal_face_detector()

point_68_predictor = models.pose_predictor_model_location()
point_68_pose = dlib.shape_predictor(point_68_predictor)

point_5_predictor = models.pose_predictor_five_point_model_location()
point_5_pose = dlib.shape_predictor(point_5_predictor)

face_detection_model = models.cnn_face_detector_model_location()
face_detector_tool = dlib.cnn_face_detection_model_v1(face_detection_model)

face_recognition_model = models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


# ====================================================================================================================
# dlib 'rect' 객체를 top, right, bottom, left 순서로 변환
# param rect : dlib 'rect' 오브젝트
# return : tuple (top, right, bottom, left)
def _rect_to_css(rect):
    return rect.top(), rect.right(), rect.bottom(), rect.left()
示例#10
0
def main(mode='test', img_path='def'):
    t = time.clock()
    classes = [
        'MXG', 'Sanaken', 'Zofinka', 'Toalk', 'Zissxzirsziiss', 'kiasummer'
    ]

    known_face_encodes = [
        np.loadtxt(MAIN_PATH + '/persons/MXG/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Sanaken/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Zofinka/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Toalk/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Zissxzirsziiss/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/kiasummer/fv.txt')
    ]

    known_face_encodes = np.reshape(known_face_encodes, (6, 5, 128))

    # get image
    if img_path == 'def':
        image = cv2.imread('team.jpg', 1)
    else:
        image = cv2.imread(img_path, 1)

    # output
    init_align_faces = []
    out_arr = []

    # get bboxes
    fd = FaceDetector()

    conf, faceboxes, mboxes = fd.get_faceboxes(image)

    color = [255, 255, 255]

    # border widths; I set them all to 150
    top, bottom, left, right = [120] * 4

    image = cv2.copyMakeBorder(image,
                               top,
                               bottom,
                               left,
                               right,
                               cv2.BORDER_CONSTANT,
                               value=color)
    bbox_mark_image = image.copy()

    # get alignment model
    predictor_model = MAIN_PATH + "/models/shape_predictor_68_face_landmarks.dat"
    face_pose_predictor = dlib.shape_predictor(predictor_model)
    face_aligner = openface.AlignDlib(predictor_model)

    # init predection model
    predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
    )
    pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
    face_recognition_model = face_recognition_models.face_recognition_model_location(
    )
    face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

    for i in range(len(faceboxes)):
        # get dlib rectangle from facebox
        face_rect = dlib.rectangle(faceboxes[i][0], faceboxes[i][1],
                                   faceboxes[i][2], faceboxes[i][3])

        # Get the the face's pose
        pose_landmarks = face_pose_predictor(image, face_rect)

        # Use openface to calculate and perform the face alignment
        alignedFace = face_aligner.align(
            534,
            image,
            face_rect,
            landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        alignedFace_out = cv2.resize(alignedFace,
                                     (faceboxes[i][2] - faceboxes[i][0],
                                      faceboxes[i][3] - faceboxes[i][1]))
        initFace_out = image[faceboxes[i][1]:faceboxes[i][3],
                             faceboxes[i][0]:faceboxes[i][2]]
        init_align_faces.append([initFace_out, alignedFace_out])

        # draw marks
        parts = dlib.full_object_detection.parts(pose_landmarks)
        FaceDetector.draw_marks(bbox_mark_image, parts)

        # get face landmarks for feature extraction
        landmark_set = pose_predictor_5_point(
            alignedFace,
            dlib.rectangle(0, 0, alignedFace.shape[0], alignedFace.shape[1]))

        # get feature vector
        feature_vector = np.array(
            face_encoder.compute_face_descriptor(alignedFace, landmark_set, 1))

        # known_face_encode = np.loadtxt('persons/MXG/fv.txt')
        ind = compare_faces(known_face_encodes, feature_vector)
        if (ind != -1):
            face_class = classes[ind]
            colour = (0, 255, 0)
        else:
            face_class = "Unknown"
            colour = (0, 0, 255)
        sh = max(image.shape[0], image.shape[1])
        mult = sh / 500
        if mult < 1:
            mult = 1
        cv2.putText(image, face_class, (faceboxes[i][0], faceboxes[i][1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5 * mult, colour, 1)
        cv2.rectangle(bbox_mark_image, (faceboxes[i][0], faceboxes[i][1]),
                      (faceboxes[i][2], faceboxes[i][3]), (0, 255, 0))
        cv2.rectangle(image, (faceboxes[i][0], faceboxes[i][1]),
                      (faceboxes[i][2], faceboxes[i][3]), (0, 255, 0))
        #cv2.rectangle(image, (mboxes[i][0], mboxes[i][1]), (mboxes[i][2], mboxes[i][3]), (0, 255, 0))
        #out_arr.append({'x1': faceboxes[i][0], 'y1': faceboxes[i][1], 'x2': faceboxes[i][2], 'y2': faceboxes[i][3], 'class': face_class, 'conf': conf[i]})
        out_arr.append({
            'x1': mboxes[i][0],
            'y1': mboxes[i][1],
            'x2': mboxes[i][2],
            'y2': mboxes[i][3],
            'class': face_class,
            'conf': conf[i]
        })

    t = time.clock() - t
    out_imgs = [image, bbox_mark_image, init_align_faces, t]
    if mode == 'test':
        cv2.imshow("Preview", image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    if mode == 'metr':
        #cv2.imshow("Preview", image)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()
        return out_arr
    if mode == 'process':
        return out_imgs
    if mode == 'def':
        return out_imgs