コード例 #1
0
def recognize(frame: np.ndarray):
    recognized_faces = []
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = detector(gray)

    for face in faces:
        label = 'Unknown'
        color = (75, 74, 224)
        shape = shape_predictor(gray, face)  # 5 points
        aligned_face = dlib.get_face_chip(frame, shape)
        face_descriptor = model.compute_face_descriptor(aligned_face)
        face_descriptor = np.array(face_descriptor)
        distance = 1
        for name in db_faces:
            verified_face = db_faces[name]
            distance = get_euclidean_distance(verified_face, face_descriptor)
            if distance <= THRESHOLD:
                label = name
                color = (229, 160, 21)
                break
        recognized_faces.append({
            'bbox': face,
            'descriptor': face_descriptor,
            'distance': distance,
            'color': color,
            'name': label,
            'shape': shape
        })

    return recognized_faces
コード例 #2
0
def align_faces(image: np.ndarray, padding: float=0.4, size: int=140, predictor_path: str=os.path.dirname(
        os.path.dirname(__file__)) + '/model/shape_predictor_5_face_landmarks.dat'):
    """
    Get aligned faces from image if face is detected, else just resize
    Parameters
    ----------
    image   : numpy array -> with dtype uint8 and shape (W, H, 3)
        Image to be processed
    padding : float
        Padding for aligned face
    size    : int
        Size of image to be returned
    path    : string
        Path to dlib facial landmark detector
    Returns
    ----------
    result    : numpy array -> with dtype uint8 and shape (size, size, 3)
        Processed image
    """
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    rects = detector(image, 1)
    result = None

    # if detect exactly 1 face, get aligned face
    if len(rects) == 1:
        shape = predictor(image, rects[0])
        result = dlib.get_face_chip(image, shape, padding=padding, size=size)

    # else use resized full image
    else:
        result = resize_square_image(image, size)
    return result
コード例 #3
0
def get_face_descriptor(image, box, show_face_chip=False):
    x, y, w, h = box
    shape = sp(image, dlib.rectangle(x, y, x + w, y + h))
    face_chip = dlib.get_face_chip(image, shape)
    if show_face_chip:
        image[0:150, 0:150] = face_chip
    return facerec.compute_face_descriptor(face_chip)
コード例 #4
0
ファイル: data.py プロジェクト: BSolut/faceautoencoder
    def process(self, file_name, show_debug = True):
        img = cv2.imread(file_name)
        if self.is_gray(img):
            return False

        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        dets = self.faca_detector(img_rgb)
        if len(dets) == 0:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
            dets = []
            for (x,y,w,h) in faces:
                dets.append( dlib.rectangle(left=x, top=y, right=x+w, bottom=y+h) )

        result = []
        for i, det in enumerate(dets):
            if det.right() - det.left() < self.cfg.IMAGE_SIZE-20:
                continue

            faces = dlib.full_object_detections()
            faces.append(self.shape_predictor(img_rgb, det))

            target = dlib.get_face_chip(img_rgb, faces[0], size=self.cfg.IMAGE_SIZE, padding=self.cfg.IMAGE_PADDING)
            target = cv2.cvtColor(target, cv2.COLOR_RGB2BGR)

            result.append(target)
            if show_debug:
                cv2.imshow('frame', cv2.resize(target, (self.cfg.IMAGE_SIZE*2, self.cfg.IMAGE_SIZE*2)))
                cv2.waitKey(1)

        return result
コード例 #5
0
 def get_crop_faces_(self, img, boxes_set):
     crop_faces = []
     for boxes in boxes_set:
         rect = dlib.rectangle(boxes[0], boxes[1], boxes[2], boxes[3])
         full_object_detection = self.landmarks_5_detector(img, rect)
         crop_faces.append(dlib.get_face_chip(img, full_object_detection))
     return crop_faces
コード例 #6
0
ファイル: data.py プロジェクト: ankur-rc/facerec_dml
def get_jittered_images(image_path, num_jitters=5, disturb_colors=False):

    face_detector = FaceDetector(face_area_threshold=0.0)
    landmark_predictor = LandmarkDetector()

    rgbImg = cv2.imread(image_path)
    grayImg = None
    if rgbImg is None:
        raise Exception("Image could not be loaded!")
    elif rgbImg.shape[2] == 3:
        grayImg = cv2.cvtColor(rgbImg, cv2.COLOR_BGR2GRAY)
    else:
        grayImg = rgbImg

    # Ask the detector to find the bounding boxes of each face.
    face_bb = face_detector.detect_unbounded(grayImg)

    landmarks = landmark_predictor.predict(bounding_box=face_bb[0],
                                           grayImg=grayImg)

    aligned_face = dlib.get_face_chip(cv2.cvtColor(grayImg,
                                                   cv2.COLOR_GRAY2RGB),
                                      landmarks,
                                      size=320,
                                      padding=1.0)

    jittered_images = dlib.jitter_image(aligned_face,
                                        num_jitters=num_jitters,
                                        disturb_colors=disturb_colors)

    return jittered_images
コード例 #7
0
    def get_faces2(self, image, get_largest=True, return_shape=False):
        res = None
        shp = None
        _, _, pts = self.detector.detect(Image.fromarray(image), landmarks=True)
        if pts is not None:
            if get_largest:
                pts = [pts[0]]

            res = np.zeros((len(pts), self.out_size, self.out_size, 3), dtype=np.uint8)
            shp = []
            for i, pt in enumerate(pts):
                face = np.array((min(pt[:, 0]), min(pt[:, 1]), max(pt[:, 0]), max(pt[:, 1])))
                w, h = face[2] - face[0], face[3] - face[1]
                ex = np.abs(h - w) / 2.
                if h > w:
                    face[[0, 2]] = face[0] - ex, face[2] + ex
                else:
                    face[[1, 3]] = face[1] - ex, face[3] + ex

                ex = max(h, w) * 0.5
                face[:] = face[0] - ex, face[1] - ex, face[2] + ex, face[3] + ex

                rec = dlib.rectangle(*face)
                shaped = self.shaper(image, rec)
                shp.append(shaped)
                aligned = dlib.get_face_chip(image, shaped, size=self.out_size)
                res[i] = aligned

        if return_shape:
            return res, shp, pts
        return res
コード例 #8
0
    def __getitem__(self, idx):
        i = idx * batch_size

        length = min(batch_size, (len(self.samples) - i))
        batch_inputs = np.empty((3, length, img_size, img_size, channel), dtype=np.float32)
        batch_dummy_target = np.zeros((length, embedding_size * 3), dtype=np.float32)

        for i_batch in range(length):
            sample = self.samples[i + i_batch]
            for j, role in enumerate(['a', 'p', 'n']):
                image_name = sample[role]
                filename = os.path.join(self.image_folder, image_name)
                image = cv.imread(filename)  # BGR
                image = image[:, :, ::-1]  # RGB
                dets = self.detector(image, 1)

                num_faces = len(dets)
                if num_faces > 0:
                    # Find the 5 face landmarks we need to do the alignment.
                    faces = dlib.full_object_detections()
                    for detection in dets:
                        faces.append(self.sp(image, detection))
                    image = dlib.get_face_chip(image, faces[0], size=img_size)
                else:
                    image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)

                if self.usage == 'train':
                    image = aug_pipe.augment_image(image)

                batch_inputs[j, i_batch] = preprocess_input(image)

        return [batch_inputs[0], batch_inputs[1], batch_inputs[2]], batch_dummy_target
def extract_algned_face(face_file_path):
    predictor_path = 'C:\\Dlib\\dlib-19.15.0\\python_examples\\shape_predictor_5_face_landmarks.dat'

    # Load all the models we need: a detector to find the faces, a shape predictor
    # to find face landmarks so we can precisely localize the face
    detector = dlib.get_frontal_face_detector()
    sp = dlib.shape_predictor(predictor_path)

    # Load the image using Dlib
    img = dlib.load_rgb_image(face_file_path)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)

    num_faces = len(dets)
    if num_faces == 0:
        print(
            "Sorry, there were no faces found in '{}'".format(face_file_path))
        exit()

    # Find the 5 face landmarks we need to do the alignment.
    faces = dlib.full_object_detections()
    for detection in dets:
        faces.append(sp(img, detection))

    image = dlib.get_face_chip(img, faces[0], size=320)
    return image
コード例 #10
0
    def get_faces(self, image, get_largest=True, return_shape=False):
        res = None
        shp = None
        faces, _, pts = self.detector.detect(Image.fromarray(image), landmarks=True)
        if faces is not None:
            if get_largest:
                faces = [faces[0]]

            res = np.zeros((len(faces), self.out_size, self.out_size, 3), dtype=np.uint8)
            shp = []
            for i, face in enumerate(faces):
                # Adjust to square.
                w = face[2] - face[0]
                h = face[3] - face[1]
                ex = np.abs(h - w) / 2.
                if h > w:
                    face[[0, 2]] = face[0] - ex, face[2] + ex
                else:
                    face[[1, 3]] = face[1] - ex, face[3] + ex

                # Move rec to center of the face.
                cent_face = np.array((sum(pts[i, :, 0]) / 5, sum(pts[i, :, 1]) / 5))
                cent_rect = np.array(((face[0] + face[2]) / 2, (face[1] + face[3]) / 2))
                d = cent_face - cent_rect
                face[:] = face[0] + d[0], face[1] + d[1], face[2] + d[0], face[3] + d[1]

                rec = dlib.rectangle(*face)
                shaped = self.shaper(image, rec)
                shp.append(shaped)
                aligned = dlib.get_face_chip(image, shaped, size=self.out_size)
                res[i] = aligned

        if return_shape:
            return res, shp, pts
        return res
コード例 #11
0
 def face_update(self, _image_base64, _info):
     """
     Reregisters only one picture.
     :param _image_base64: image encoded in base64
     :param _info: a diction E.X.:{"user_id": "10098440", "group_id": "staff", "gender": "女", "user_info": "康佳慧"}
     :return: None, results will be written into mysql database
     """
     self.cursor.execute('use face_rec;')
     image = base64_to_image(_image_base64)
     user_id = _info["user_id"]
     group_id = _info["group_id"]
     gender = _info["gender"]
     user_info = _info["user_info"]
     image_path = os.path.join(os.getcwd(), 'register_img', user_id + ".jpg")
     cv2.imwrite(image_path, image)
     # get the face's feature
     faces = self.detector(image, 1)
     if len(faces) != 1:
         print("There must be one and only one face in the image!")
         return 0
     shape = self.sp(image, faces[0])
     face_chip = dlib.get_face_chip(image, shape)
     face_descriptor = np.array(self.facerec.compute_face_descriptor(face_chip)).tostring()
     statement = """update users set group_id=%s, gender=%s, user_info=%s, face_feature=%s, image_path=%s, 
     latest_modify_time=NOW() where user_id=%s;"""
     self.cursor.execute(statement, (group_id, gender, user_info, face_descriptor, image_path, user_id))
     self.conn.commit()
コード例 #12
0
def face_detection_alignmen(original_image):
    # Load the image using OpenCV
    bgr_img = cv2.imread(original_image)

    # Convert to RGB since dlib uses RGB images
    img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)

    # first traditional detector for efficient
    dets = detector(img, 1)
    num_faces = len(dets)

    # If there is no face detected in the image
    if num_faces == 0:
        # second use cnn detector
        cnn_dets = cnn_face_detector(img, 1)
        dets = dlib.rectangles()
        dets.extend([d.rect for d in cnn_dets])
        num_faces = len(dets)
        if num_faces == 0:
            print('There is no face in the image')

    # Find the 5 face landmarks we need to do the alignment.
    faces = dlib.full_object_detections()
    for detection in dets:
        faces.append(sp(img, detection))

    image = dlib.get_face_chip(img, faces[0], size=224, padding=0.3)
    cv_bgr_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    # cv2.imshow('annotShow', cv_bgr_img)
    # cv2.waitKey(0)

    return cv_bgr_img
コード例 #13
0
def compute_facial_landmarks(image):
    global face_predictor
    global face_detector

    if face_predictor is None:
        #face_detector = dlib.cnn_face_detection_model_v1("resources/mmod_human_face_detector.dat")
        face_detector = dlib.get_frontal_face_detector()
        face_predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")

    faces = face_detector(image, 1)
    blank_image = np.full(image.shape, 255, np.uint8)
    face_chip = None
    for (i, face) in enumerate(faces):
        face_chip = np.full((150, 150, 3), 255, np.uint8)
        #shape = face_predictor(image, face.rect)
        shape = face_predictor(image, face)
        shape_np = face_utils.shape_to_np(shape)

        #for (x, y) in shape_np:
            #cv2.circle(blank_image, (x,y), 0, (0, 0, 0))

        draw_facial_landmarks(blank_image, shape_np)

        face_chip = dlib.get_face_chip(blank_image, shape, 150, 0.33)
        face_chip = cv2.cvtColor(face_chip, cv2.COLOR_RGB2GRAY)
        
    return face_chip
コード例 #14
0
def test_face_jitter():
    """
    This example shows how faces were jittered and augmented to create training
    data for dlib's face recognition model.  It takes an input image and
    disturbs the colors as well as applies random translations, rotations, and
    scaling.
    :return:
    """
    img_path = 'data/running_man.jpg'
    img = cv2.imread(img_path)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # get face
    hog_face_detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor('shape_predictor_5_face_landmarks.dat')

    rects, scores, idx = hog_face_detector.run(img_rgb, 2, 0)
    faces = dlib.full_object_detections()
    for rect in rects:
        faces.append(shape_predictor(img_rgb, rect))

    face_image = dlib.get_face_chip(img_rgb, faces[0], size=80)

    # jitter face
    jittered_images = dlib.jitter_image(face_image, num_jitters=4, disturb_colors=True)
    for idx, image in enumerate(jittered_images):
        image_brg = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow('jittered_image_{}'.format(idx), image_brg)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #15
0
def test_face_alignment():
    """
    input: image including faces
    output:aligned faces

    You can get the shape_predictor_5_face_landmarks.dat from:
    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2
    :return:
    """
    img_path = 'data/running_man.jpg'
    img = cv2.imread(img_path)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    hog_face_detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor('shape_predictor_5_face_landmarks.dat')

    rects, scores, idx = hog_face_detector.run(img_rgb, 2, 0)
    faces = dlib.full_object_detections()
    for rect in rects:
        faces.append(shape_predictor(img_rgb, rect))

    # get the aligned face images
    images = dlib.get_face_chips(img, faces, size=320)
    for ind, image in enumerate(images):
        print(ind)
        # image_patch = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow('patch{}'.format(ind), image)

    # get a single chip
    print('faces[0]', faces[0])
    image = dlib.get_face_chip(img, faces[0], size=80)
    cv2.imshow('single chip', image)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #16
0
def detect_face(detector, img, align=True):

    import dlib  #this requirement is not a must that's why imported here

    home = str(Path.home())

    sp = detector["sp"]

    detected_face = None
    img_region = [0, 0, img.shape[0], img.shape[1]]

    face_detector = detector["face_detector"]
    detections = face_detector(img, 1)

    if len(detections) > 0:

        for idx, d in enumerate(detections):
            left = d.left()
            right = d.right()
            top = d.top()
            bottom = d.bottom()
            detected_face = img[top:bottom, left:right]
            img_region = [left, top, right - left, bottom - top]
            break  #get the first one

        if align:
            img_shape = sp(img, detections[0])
            detected_face = dlib.get_face_chip(img,
                                               img_shape,
                                               size=detected_face.shape[0])

    return detected_face, img_region
コード例 #17
0
def cropFace(root, target_path, Use_Count=False):
    format = ['jpg', 'png', 'jepg']
    for root, dirs, files in os.walk(root):
        for i in dirs:
            if not os.path.exists(os.path.join(target_path + i)):
                print('create dir:', i)
                os.mkdir(os.path.join(target_path, i))
        print('crop face and save....')
        if Use_Count:
            count = 0
        for i in files:
            path = os.path.join(root, i)
            emotion = path.split('/')[-2]
            if path.split('/')[-1].split('.')[-1].lower() in format:
                img = cv2.imread(path)
                face_area = detector(img, 1)
                if len(face_area):
                    face_area = face_area[0]
                    # faces = dlib.full_object_detections()
                    # faces.append(sp(img,face_area))
                    face = dlib.get_face_chip(img, sp(img, face_area), size=224)
                    cv2.imwrite(os.path.join(target_path, emotion, i), face)
                    print('crop face: ', os.path.join(target_path, emotion, i))
                    # face.save(os.path.join(target_path,i))
            if Use_Count:
                count += 1
                if count == 200:
                    break
コード例 #18
0
ファイル: DlibWrapper.py プロジェクト: mwaiton/deepface
def detect_face(face_detector, img):

	import dlib #this requirement is not a must that's why imported here

	home = str(Path.home())

	sp = dlib.shape_predictor(home+"/.deepface/weights/shape_predictor_5_face_landmarks.dat")

	detected_face = None
	img_region = [0, 0, img.shape[0], img.shape[1]]

	detections = face_detector(img, 1)

	if len(detections) > 0:

		for idx, d in enumerate(detections):
			left = d.left(); right = d.right()
			top = d.top(); bottom = d.bottom()
			detected_face = img[top:bottom, left:right]
			img_region = [left, top, right - left, bottom - top]
			break #get the first one

		img_shape = sp(img, detections[0])
		detected_face = dlib.get_face_chip(img, img_shape, size = detected_face.shape[0])

	return detected_face, img_region
    def getfacefeature(self, img):
        import pdb
        pdb.set_trace()
        image = dlib.load_rgb_image(img)
        ## 人脸对齐、切图
        # 人脸检测
        dets = self.detector(image, 1)
        if len(dets) == 1:
            # faces = dlib.full_object_detections()
            # 关键点提取
            shape = self.predictor(image, dets[0])
            print("Computing descriptor on aligned image ..")
            #人脸对齐 face alignment
            images = dlib.get_face_chip(image, shape, size=self.img_size)

            self.point_draw(image, shape, 'before_image_warping', save=True)
            shapeimage = np.array(images).astype(np.uint8)
            dets = self.detector(shapeimage, 1)
            if len(dets) == 1:
                point68 = self.predictor(shapeimage, dets[0])
                self.point_draw(shapeimage,
                                point68,
                                'after_image_warping',
                                save=True)

            #计算对齐后人脸的128维特征向量
            face_descriptor_from_prealigned_image = self.recognition.compute_face_descriptor(
                images)
        return face_descriptor_from_prealigned_image
コード例 #20
0
    def extract_feature(self, img, show=False):
        has_face, package = self.get_face_locations(img)

        if not has_face:
            if show:
                show_image(img, None, 'No face')
            return
        else:
            rects, scores, idx = package
            rect_item = dict()
            for cur_rect, score, idx in zip(rects, scores, idx):
                size = cur_rect.bottom() - cur_rect.top()
                if not rect_item or size > rect_item['size']:
                    rect_item = {
                        'size': size,
                        'rect': cur_rect,
                        'score': score,
                        'direction': FaceDirection(idx).name,
                        'detect_obj': self.shape_predictor(img, cur_rect)
                    }

            if show:
                crop_img = dlib.get_face_chip(img,
                                              rect_item['detect_obj'],
                                              size=320)
                cv2.imshow('crop-align', crop_img)

                text = 'Score: {:.5f}, Direction: {}'.format(
                    rect_item['score'], rect_item['direction'])
                bbox = self.fix_bbox(img, rect_item['rect'])
                show_image(img, bbox, text)

            return self.get_face_encoding(img, rect_item['detect_obj'])
def face(img, alignment=False):
    '''
    Can be used to detect,Crop and Normalize the face. 
    Assume the image only contains one single frontal face
   
    Parameters
    ----------
    img       : array, shape (height,width,3), the img in RGB representation which contains the face
    alignment : boolean, default False, whether to normalize the face
    '''

    rect = None

    #santiy check, if the detector can not find the faces, return None
    try:
        rect = dlib_detector(img, 0)[0]
    except IndexError:
        return None

    if alignment:

        return img[rect.top():rect.bottom(), rect.left():rect.right()]

    faces = dlib.full_object_detections()
    faces.append(predictor(img, rect))
    img = dlib.get_face_chip(img, faces[0], 224)
    return img
コード例 #22
0
ファイル: main.py プロジェクト: ahmetsina/Proje
def a_crop(imagePath):
    img = cv2.imread(imagePath)
    dets = detector(img, 1)

    num_faces = len(dets)
    if num_faces == 0:
        print("Sorry, there were no faces found in '{}'".format(imagePath))

    else:
        faces = dlib.full_object_detections()
        for detection in dets:
            faces.append(predictor(img, detection))
        filename = str(imagePath).split("/")[-1]
        print(filename)
        fname, ext = os.path.splitext(filename)
        # It is also possible to get a single chip
        image = dlib.get_face_chip(img, faces[0], size=64, padding=0.40)
        #file = "/Users/ahmetsina/PycharmProjects/GenderAnalyzer/FeatureExtraction/images/" +fname+ext
        cv2.imwrite(
            "./images/test_org_images/{}_aligned_cropped.jpg".format(fname),
            image)
        #cv2.imwrite(fname + ext, image)
        #os.system("mv {} FeatureExtraction/images/a_test_images/".format(fname+ext))

        cv2.waitKey()
    return num_faces
コード例 #23
0
def detect_face(image_file_name):
    """
    Helper function to run the facial detection and alignment process using
    dlib.  Detects a given face and aligns it using dlib's 5 point landmark
    detector.


    Parameters
    ----------
    image_file_name : str
        image file location

    Returns
    -------
    list : [PIL.Image, tuple]
        Resized face image and nose tip location
    """
    detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor(
        './shape_predictor_5_face_landmarks.dat')
    image = dlib.load_rgb_image(image_file_name)
    dets = detector(image, 1)

    faces = dlib.full_object_detections()
    for detection in dets:
        faces.append(shape_predictor(image, detection))

    face_image = Image.fromarray(dlib.get_face_chip(image, faces[0], size=300))
    landmarks = fr.face_landmarks(np.array(face_image))

    return [face_image, landmarks[0]['nose_tip'][2]]
コード例 #24
0
    def detection(self, f):  #resmin pathi
        self.setup()

        print("Processing file: {}".format(f))
        img = io.imread(f)  #pathi okuyo img değişkenin içine atıyo
        dets = self.detector(img, 1)  #1 rsim renkli/0 gri
        print("Number of faces detected: {}".format(
            len(dets)))  #bulunan yüzlerin sayısını veriyor

        for k, d in enumerate(dets):  #dets yüz bilgisi
            data = []
            data.append(d.left())  #yüzün koordinatlarıı
            data.append(d.top())
            data.append(d.right())
            data.append(d.bottom())

            self.imageDataList.append(data)
            #yüz embed bilgisi cıkarıyo 128elemanlı dizi
            shape = self.sp(img, d)
            face_descriptor = self.facerec.compute_face_descriptor(img, shape)
            face_chip = dlib.get_face_chip(img, shape)
            face_descriptor_from_prealigned_image = self.facerec.compute_face_descriptor(
                face_chip)

            self.listToStr(face_descriptor_from_prealigned_image, f)
コード例 #25
0
def processOneImage(filename):
    predictor_path, face_rec_model_path, faces_folder_path, detector, sp, facerec, win = initializeFaceRecognition(
    )
    img = dlib.load_rgb_image(filename)

    win.clear_overlay()
    win.set_image(img)
    dets = detector(img, 1)
    #print("Number of faces detected: {}".format(len(dets)))

    # Now process each face we found.
    for k, d in enumerate(dets):
        #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        #   k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = sp(img, d)
        # Draw the face landmarks on the screen so we can see what face is currently being processed.
        win.clear_overlay()
        win.add_overlay(d)
        win.add_overlay(shape)
        face_descriptor = facerec.compute_face_descriptor(img, shape)
        #print(face_descriptor)
        #print("Computing descriptor on aligned image ..")

        # Let's generate the aligned image using get_face_chip
        face_chip = dlib.get_face_chip(img, shape)

        # Now we simply pass this chip (aligned image) to the api
        face_descriptor_from_prealigned_image = facerec.compute_face_descriptor(
            face_chip)
        return face_descriptor_from_prealigned_image
コード例 #26
0
ファイル: prep_data.py プロジェクト: xieguochen/Agendernet
def align_and_save(path: str):
    """
    Get aligned face and save to disk

    Parameters
    ----------
    path : string
        path to image

    Returns
    -------
    integer
        flag to mark. 1 if success detect face, 0 if fail
    """

    RES_DIR = '{}_aligned'.format(DATASET)
    if os.path.exists(os.path.join(RES_DIR, path)):
        return 1
    flname = os.path.join(DATASET, path)
    image = cv2.imread(flname)
    detector = dlib.get_frontal_face_detector()
    rects = detector(image, 0)
    # if detect exactly 1 face, get aligned face
    if len(rects) == 1:
        shape = predictor(image, rects[0])
        result = dlib.get_face_chip(image, shape, padding=0.4, size=140)
        folder = os.path.join(RES_DIR, path.split('/')[0])
        if not os.path.exists(folder):
            os.makedirs(folder, exist_ok=True)
        flname = os.path.join(RES_DIR, path)
        if not os.path.exists(flname):
            cv2.imwrite(flname, result)
        return 1
    return 0
コード例 #27
0
 def face_aligment(self,img_path,padding):
     img = cv2.imread(str(img_path), 1)
     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
     dets = self.detector(img,1)
     if len(dets)==0:
         return img
     face = self.sp(img,dets[0])
     return dlib.get_face_chip(img,face,224,padding=padding)
コード例 #28
0
def cropped_and_aligned(img,
                        detector,
                        shape_predictor,
                        size=224,
                        method='hog'):
    if method == 'hog':
        faceRects = detector(img, 0)
        if len(faceRects) < 1:
            raise ValueError('Could not find any faces.')
        elif len(faceRects) > 1:
            warn('Found {} faces, using the most central one'.format(
                len(faceRects)))

            h, w, d = img.shape
            best = 0
            best_distance = 1000
            for i in range(len(faceRects)):
                x1 = faceRects[i].left()
                y1 = faceRects[i].top()
                x2 = faceRects[i].right()
                y2 = faceRects[i].bottom()
                x = (x1 + x2) / 2
                y = (y1 + y2) / 2
                dist = (h / 2 - x)**2 + (w / 2 - y)**2
                if dist < best_distance:
                    best_distance = dist
                    best = i

            rect = faceRects[best]
        else:
            rect = faceRects[0]

    elif method == 'cnn':
        dets = detector(img, 0)
        if len(dets) < 1:
            raise ValueError('Could not find any faces.')
        elif len(dets) > 1:
            warn(
                'Found {} faces, using the one with the highest confidence score.'
                .format(len(dets)))
            best = 0
            best_score = -999
            for i in range(len(dets)):
                if dets[i].confidence > best_score:
                    best = i
                    best_score = dets[i].confidence

            rect = dets[best].rect

        else:
            rect = dets[0].rect

    else:
        raise ValueError('unknown method: {}'.format(method))

    detection = rect
    image = dlib.get_face_chip(img, shape_predictor(img, detection), size=size)
    return image
コード例 #29
0
    def __extract_landmarks(self, train_image, detected_face):
        # Get the landmarks/parts for the face in box d.
        # shape = self.__shape_predictor_5_point(train_image, detected_face)
        shape = self.__shape_predictor_68_point(train_image, detected_face)

        # Let's generate the aligned image using get_face_chip
        face_chip = dlib.get_face_chip(train_image, shape)

        return face_chip
コード例 #30
0
def detect(x):
    dets = detector(x, 1)
    boxes = dlib.full_object_detections()
    for i, detection in enumerate(dets):
        boxes.append(sp(x, detection))
    faces = []
    for i in range(len(boxes)):
        faces.append(dlib.get_face_chip(x, boxes[i], size=160))
    return {"faces": np.array(faces).astype('uint8')}
コード例 #31
0
ファイル: test_numpy_returns.py プロジェクト: 20337112/dlib
def get_test_face_chip():
    rgb_img, shape = get_test_image_and_shape()
    return dlib.get_face_chip(rgb_img, shape)
コード例 #32
0
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)

num_faces = len(dets)
if num_faces == 0:
    print("Sorry, there were no faces found in '{}'".format(face_file_path))
    exit()

# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
    faces.append(sp(img, detection))

window = dlib.image_window()

# Get the aligned face images
# Optionally: 
# images = dlib.get_face_chips(img, faces, size=160, padding=0.25)
images = dlib.get_face_chips(img, faces, size=320)
for image in images:
    window.set_image(image)
    dlib.hit_enter_to_continue()

# It is also possible to get a single chip
image = dlib.get_face_chip(img, faces[0])
window.set_image(image)
dlib.hit_enter_to_continue()
コード例 #33
0
ファイル: face_jitter.py プロジェクト: DEVESHTARASIA/dlib
    print("Sorry, we could not load '{}' as an image".format(face_file_path))
    exit()

# Convert to RGB since dlib uses RGB images
img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)

# Ask the detector to find the bounding boxes of each face.
dets = detector(img)

num_faces = len(dets)

# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
    faces.append(sp(img, detection))

# Get the aligned face image and show it
image = dlib.get_face_chip(img, faces[0], size=320)
cv_bgr_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('image',cv_bgr_img)
cv2.waitKey(0)

# Show 5 jittered images without data augmentation
jittered_images = dlib.jitter_image(image, num_jitters=5)
show_jittered_images(jittered_images)

# Show 5 jittered images with data augmentation
jittered_images = dlib.jitter_image(image, num_jitters=5, disturb_colors=True)
show_jittered_images(jittered_images)
cv2.destroyAllWindows()
コード例 #34
0
#
#   This utility generates the test data required for the tests contained in test_numpy_returns.py
#
#   Also note that this utility requires Numpy which can be installed
#   via the command:
#       pip install numpy
import sys
import dlib
import numpy as np
import utils

if len(sys.argv) != 2:
    print(
        "Call this program like this:\n"
        "   ./generate_numpy_returns_test_data.py shape_predictor_5_face_landmarks.dat\n"
        "You can download a trained facial shape predictor from:\n"
        "    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
    exit()


detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(sys.argv[1])

img = dlib.load_rgb_image("../../../examples/faces/Tom_Cruise_avp_2014_4.jpg")
dets = detector(img)
shape = predictor(img, dets[0])

utils.save_pickled_compatible(shape, "shape.pkl")

face_chip = dlib.get_face_chip(img, shape)
np.save("test_face_chip", face_chip)