def extract_eyes(cv2_image): """Returns a list of images that contain the eyes extracted from the original image. First result is the left eye, second result is the right eye.""" global _face_detector, _face_predictor if _detectors_are_initialised() == False: _initialize_detectors() gray_image = Utils.convert_to_gray_image(cv2_image) rects = _face_detector(gray_image, 0) if len(rects) > 0: shape = _face_predictor(gray_image, rects[0]) shape = face_utils.shape_to_np(shape) eyes = [] for eye in ["left_eye", "right_eye"]: # get the points for the contour (eye_start, eye_end) = face_utils.FACIAL_LANDMARKS_IDXS[eye] contour = shape[eye_start:eye_end] # get the upper left point, lower right point for this eye start = [ min(contour, key=lambda x: x[0])[0], min(contour, key=lambda x: x[1])[1] ] end = [ max(contour, key=lambda x: x[0])[0], max(contour, key=lambda x: x[1])[1] ] # extract the current eye eyes.append(cv2_image[start[1]:end[1], start[0]:end[0]]) return eyes return None
def extract_face(cv2_image): """Returns the face part extracted from the image""" global _face_detector if _detectors_are_initialised() == False: _initialize_detectors() gray_image = Utils.convert_to_gray_image(cv2_image) rects = _face_detector(gray_image, 0) if len(rects) > 0: # only for the first face found (x, y, w, h) = face_utils.rect_to_bb(rects[0]) return cv2_image[y:y + h, x:x + w] return None
def extract_eye_strips(X): """This extracts eye strips from the images. Returns a `np.array`.""" for i in range(0, len(X)): X[i] = face_detector.extract_eye_strip(X[i]) # in case no face was detected if X[i] is None: continue # resize this image X[i] = resize_cv2_image(X[i], fixed_dim=( Config.EYE_STRIP_WIDTH, Config.EYE_STRIP_HEIGHT)) if X[i] is None: continue # also convert to grayscale, for now X[i] = convert_to_gray_image(X[i]) # normalise X[i] = np.true_divide(X[i], 255) return np.array(X)
def extract_faces(X): """ Does all the processing related to face extraction. Return a `np.array`.""" for i in range(0, len(X)): X[i] = face_detector.extract_face(X[i]) # in case no face was detected if X[i] is None: continue # resize this image X[i] = resize_cv2_image(X[i], fixed_dim=( Config.FACE_WIDTH, Config.FACE_HEIGHT)) if X[i] is None: continue # also convert to grayscale, for now X[i] = convert_to_gray_image(X[i]) # normalise X[i] = np.true_divide(X[i], 255) return np.array(X)
def get_img_info(cv2_image): """Returns a dictionary with necessary info about the image: eye strip, if mouth is opened, if eyes are opened.""" global _face_detector, _face_predictor res = { "image": cv2_image, "mouth_is_opened": (None, 0), "eyes_are_opened": ((None, None), (0, 0)), } if _detectors_are_initialised() == False: _initialize_detectors() gray_image = Utils.convert_to_gray_image(cv2_image) rects = _face_detector(gray_image, 0) if len(rects) > 0: shape = _face_predictor(gray_image, rects[0]) shape = face_utils.shape_to_np(shape) res["mouth_is_opened"] = _is_mouth_opened(shape) res["eyes_are_opened"] = _are_eyes_opened(shape) return res
def extract_eye_strip(cv2_image): """Returns a horizontal image containing the two eyes extracted from the image""" global _face_detector, _face_predictor if _detectors_are_initialised() == False: _initialize_detectors() gray_image = Utils.convert_to_gray_image(cv2_image) rects = _face_detector(gray_image, 0) if len(rects) > 0: # only for the first face found shape = _face_predictor(gray_image, rects[0]) shape = face_utils.shape_to_np(shape) (left_eye_start, left_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] (right_eye_start, right_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # get the contour start, end = min(left_eye_start, right_eye_start), max(left_eye_end, right_eye_end) strip = shape[start:end] # get the upper left point, lower right point start = [ min(strip, key=lambda x: x[0])[0], min(strip, key=lambda x: x[1])[1] ] end = [ max(strip, key=lambda x: x[0])[0], max(strip, key=lambda x: x[1])[1] ] # go a little outside the bounding box, to capture more details distance = (end[0] - start[0], end[1] - start[1]) # 20 percent more details on the X axis, 60% more details on the Y axis percents = [20, 60] for i in range(0, 2): start[i] -= int(percents[i] / 100 * distance[i]) end[i] += int(percents[i] / 100 * distance[i]) return cv2_image[start[1]:end[1], start[0]:end[0]] return None
def extract_eyes_for_heatmap(cv2_image): global _face_detector, _face_predictor if _detectors_are_initialised() == False: _initialize_detectors() gray_image = Utils.convert_to_gray_image(cv2_image) rects = _face_detector(gray_image, 0) if len(rects) > 0: shape = _face_predictor(gray_image, rects[0]) shape = face_utils.shape_to_np(shape) eyes = [] for eye in ["left_eye", "right_eye"]: # get the points for the contour (eye_start, eye_end) = face_utils.FACIAL_LANDMARKS_IDXS[eye] # increase a little bit the size of the eye contour = shape[eye_start:eye_end] # get the upper left point, lower right point for this eye start = [ min(contour, key=lambda x: x[0])[0], min(contour, key=lambda x: x[1])[1] ] end = [ max(contour, key=lambda x: x[0])[0], max(contour, key=lambda x: x[1])[1] ] # increase a little bit the size of the eye distance = (end[0] - start[0], end[1] - start[1]) percents = [40, 40] for i in range(0, 2): start[i] -= int(percents[i] / 100 * distance[i]) end[i] += int(percents[i] / 100 * distance[i]) # extract the current eye eyes.append(cv2_image[start[1]:end[1], start[0]:end[0]]) return eyes return None