예제 #1
0
    def enumerate_faces(self, img, resize_wh=128):
        # Padding the image
        img = cv2.copyMakeBorder(img,
                                 PAD,
                                 PAD,
                                 PAD,
                                 PAD,
                                 cv2.BORDER_CONSTANT,
                                 value=255)
        grey = rgb2grey(img)

        # Larger faces has higher priority to solve the problem one person
        # detected as two or more faces
        faces = self.face_cascade.detectMultiScale(grey, 1.3, 5)
        faces = sorted(faces, key=lambda tup: tup[2], reverse=True)

        for face in faces:
            aligned_face = self.face_align(grey, face)
            aligned_face = cv2.bilateralFilter(aligned_face, 0, 5, 2)
            equaliser = cv2.createCLAHE(clipLimit=1.5)
            aligned_face = equaliser.apply(aligned_face)
            resized_aligned_face = cv2.resize(aligned_face,
                                              (resize_wh, resize_wh),
                                              interpolation=cv2.INTER_CUBIC)
            # remove the PAD pixels we put in around the main image so
            # coordinates are right
            unpad_face = (face[0] - PAD, face[1] - PAD, face[2], face[3])

            # give back position (for plotting) and cropped image
            yield unpad_face, resized_aligned_face
예제 #2
0
def img2vec(img):
    """Convert a 2D image into a flat vector."""
    assert 2 <= img.ndim <= 3, "Image must be 2D grey or RGB"
    assert img.shape[0] == img.shape[1], "Image must be square"
    img = rgb2grey(im2double(img))
    assert img.ndim == 2, \
        "Need grey image; got array with shape {}".format(img.shape)
    return img.flatten()
예제 #3
0
 def __call__(self, image):
     assert 2 <= image.ndim <= 3, \
         "Expecting 2D or 3D image, got shape %s" % (image.shape,)
     if image.ndim == 2:
         rv = rgb2grey(image)
     else:
         rv = image
     if self.trailing_dim and rv.ndim < 3:
         # add a singleton dimension on the end
         rv = rv[..., None]
     return rv
예제 #4
0
          (args.input_dir, args.output_dir))
    try:
        os.makedirs(args.output_dir)
    except FileExistsError:
        pass
    input_dir = os.path.abspath(args.input_dir)
    image_paths = get_paths(input_dir)
    pipe = FacePipe()
    for num, input_path in enumerate(image_paths, start=1):
        filename = os.path.basename(input_path)
        input_path_dirname = os.path.dirname(input_path)
        subdir = input_path_dirname[len(input_dir):]
        subdir = subdir.lstrip(os.path.sep)
        output_subdir = os.path.join(args.output_dir, subdir)
        os.makedirs(output_subdir, exist_ok=True)
        output_path = os.path.join(output_subdir, filename)
        print('\n%d. Processing "%s", storing in "%s"' %
              (num, input_path, output_path))
        im = imread(input_path)
        faces = list(pipe.enumerate_faces(im, args.size))
        if len(faces) == 0:
            print("Didn't detect any faces (!); not cropping at all")
            # this will stretch faces sometimes
            out_im = imresize(im, (args.size, args.size))
        else:
            if len(faces) > 1:
                print('Got %d faces; using the first only' % len(faces))
            _, out_im = faces[0]
        out_im = rgb2grey(out_im)
        imsave(output_path, out_im)
예제 #5
0
    def face_align(self, img, face):

        image_width = np.size(img, 1)
        image_height = np.size(img, 0)
        (x, y, w, h) = face

        # Get Target eye width
        # EYEW_TARGET_RATIO = .25
        EYEW_TARGET_RATIO = .25
        EYEW_TARGET = h * EYEW_TARGET_RATIO

        # Get target mouth and eye height
        MOUTH_EYE_TARGET_RATIO = .19
        MOUTH_EYE_TARGET = h * MOUTH_EYE_TARGET_RATIO

        # Get target nose and eye height
        NOSE_EYE_TARGET_RATIO = .12
        NOSE_EYE_TARGET = h * NOSE_EYE_TARGET_RATIO

        grey = rgb2grey(img)
        roi_grey = crop(grey, x, y, w, h)

        eye_pair = self._getEyePair(roi_grey)
        lEye, rEye = self._getEyes(roi_grey, face, eye_pair)
        mouth = self._getMouth(roi_grey)
        nose = self._getNose(roi_grey)

        # cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

        # If two eyes are detected

        if lEye is not None and rEye is not None:
            eyeAngle = np.degrees(
                np.arctan((rEye.center[1] - lEye.center[1]) /
                          (rEye.center[0] - lEye.center[0])))
            mid_eye = np.mean([lEye.center, rEye.center], axis=0)
            eye_width = np.linalg.norm(lEye.center - rEye.center)

        # if not two eyes are detected
        else:
            eyeAngle = 0
            if eye_pair is not None:
                mid_eye = eye_pair.center
                eye_width = eye_pair.w * EYEPAIR_WIDTH_TO_EYE_WIDTH
            else:
                mid_eye = np.array([w * EYE_RATIO_WIDTH, h * EYE_RATIO_HEIGHT])
                eye_width = w * FACE_WIDTH_TO_EYE_WIDTH

        # Convert relative coordinate to absolute coordinate
        mid_eye_x = mid_eye[0] + x
        mid_eye_y = mid_eye[1] + y

        mouth_eye_dist = 0

        if mouth is not None:
            mouth_eye_dist = np.linalg.norm(mouth.center - mid_eye)

        nose_eye_dist = 0
        if nose is not None:
            nose_eye_dist = np.linalg.norm(nose.center - mid_eye)

        # Get the maximal width and height based on the eyewidth,
        # mouth_eye_distance and nose_eye_distance
        new_w = max(int((eye_width / EYEW_TARGET) * w),
                    int((mouth_eye_dist / MOUTH_EYE_TARGET) * w),
                    int((nose_eye_dist / NOSE_EYE_TARGET) * w))
        new_h = max(int((eye_width / EYEW_TARGET) * h),
                    int((mouth_eye_dist / MOUTH_EYE_TARGET) * h),
                    int((nose_eye_dist / NOSE_EYE_TARGET) * h))

        # Get homography matrix
        pts_src = np.array([[x, y], [x + w, y], [x, y + h], [x + w, y + h]])
        pts_dst = np.array([[
            mid_eye_x - new_w * EYE_RATIO_WIDTH,
            mid_eye_y - new_h * EYE_RATIO_HEIGHT
        ],
                            [
                                mid_eye_x + new_w * (1 - EYE_RATIO_WIDTH),
                                mid_eye_y - new_h * EYE_RATIO_HEIGHT
                            ],
                            [
                                mid_eye_x - new_w * EYE_RATIO_WIDTH,
                                mid_eye_y + new_h * (1 - EYE_RATIO_HEIGHT)
                            ],
                            [
                                mid_eye_x + new_w * (1 - EYE_RATIO_WIDTH),
                                mid_eye_y + new_h * (1 - EYE_RATIO_HEIGHT)
                            ]])

        h**o, status = cv2.findHomography(pts_src, pts_dst)
        img = cv2.warpPerspective(img, h**o, (image_width, image_height))

        # Rotation
        if eyeAngle != 0:
            rotMatrix = cv2.getRotationMatrix2D((mid_eye_x, mid_eye_y),
                                                eyeAngle, 1)
            img = cv2.warpAffine(img, rotMatrix, (image_width, image_height))

        return crop(img, int(mid_eye_x - new_w * EYE_RATIO_WIDTH),
                    int(mid_eye_y - new_h * EYE_RATIO_HEIGHT), new_w, new_h)