Exemple #1
0
def construct_face_points(face: Face) -> None:
    shape = face.gir_img.shape
    landmarks = face.landmarks

    avgl = lambda x: tuple([int(sum(y) / len(y)) for y in zip(*x)])
    get_pixl = lambda x0, x1: face.depth_img[max(0, min(shape[0] - 1, x0)), max(0, min(shape[1] - 1, x1))]
    to3d = lambda x: (x[0] / shape[0], x[1] / shape[1], get_pixl(x[0], x[1]))

    def avgl3d(l: list((int, int))) -> np.ndarray((1, 3)):
        # Map 2D points to 3D by applying depth
        points3d = np.array([(x / shape[0], y / shape[1], get_pixl(x, y)) for (x, y) in l])
        # Filter out the points too far from mean depth
        me = np.mean(points3d[:, 2])
        stdev = np.std(points3d[:, 2])
        depths_good = np.abs(points3d[:, 2] - me) <= stdev
        points3d = points3d[depths_good]
        return points3d.sum(axis=0) / points3d.shape[0]

    chin_bottom = landmarks["chin"][6:11]
    # chin_left = landmarks["chin"][:6]
    # chin_right = landmarks["chin"][11:]

    right_brow = avgl(landmarks["right_eyebrow"])
    left_brow = avgl(landmarks["left_eyebrow"])
    forehead = avgl([right_brow, left_brow])
    # top_chin = avgl(landmarks["bottom_lip"] + chin_bottom + chin_bottom + chin_bottom)
    # left_cheek = avgl(chin_left + landmarks["left_eyebrow"] + landmarks["nose_tip"])
    # right_cheek = avgl(chin_right + landmarks["right_eyebrow"] + landmarks["nose_tip"])
    # face_center = avgl([right_brow] + [left_brow] + [top_chin])
    right_brow3d = avgl3d(landmarks["right_eyebrow"])
    left_brow3d = avgl3d(landmarks["left_eyebrow"])
    top_chin3d = avgl3d(landmarks["bottom_lip"] + chin_bottom + chin_bottom + chin_bottom)

    face.face_center = to3d(forehead)
    face.face_points = {"right_brow": right_brow3d, "left_brow": left_brow3d, "top_chin": top_chin3d}
Exemple #2
0
def drop_corner_values(face: Face) -> None:
    """
        Erase those pixels which are too close or to far to be treated as
        valuable data.
    """
    depth_median = np.median(face.depth_img[face.mask])
    face.depth_img[face.mask] -= depth_median
    depth_stdev = np.std(face.depth_img[face.mask])
    # |allowed_dist| is the distance from the center in the maximum metric
    # which allows the any depth values
    allowed_dist = 6
    for i in range(IMG_SIZE):
        for j in range(IMG_SIZE):
            if abs(i - IMG_SIZE // 2) < allowed_dist and abs(j - IMG_SIZE //
                                                             2) < allowed_dist:
                # Too close to the center of the face to drop those values
                # (might be a very pointy nose).
                continue
            if abs(face.depth_img[i, j]) >= 2. * depth_stdev:
                face.depth_img[i, j] = 0

    depth_stdev = np.std(face.depth_img[face.mask])
    face.depth_img /= depth_stdev * 4

    mx = face.depth_img.max()
    mi = face.depth_img.min()
    if abs(mi) > abs(mx):
        face.depth_img = np.maximum(-abs(mx), face.depth_img)
    else:
        face.depth_img = np.minimum(abs(mi), face.depth_img)

    # Scale each dimension into interval 0..1
    rescale_one_dim(face.depth_img)
    rescale_one_dim(face.gir_img)
    face.depth_img *= DEPTH_TO_WIDTH_RATIO
def photo_to_greyd_face(gir_photo: np.ndarray, depth_photo: np.ndarray) -> Face:
    """ Converts full photo to just face image """
    # Locate face
    face_coords = face_recognition.face_locations((gir_photo * 256).astype(np.uint8))
    # Process face detected by the library
    if len(face_coords) == 1:
        (x1, y1, x2, y2) = face_coords[0]
        margin = int(abs(x2-x1) * MARGIN_COEF)
        # Cut out RGB & D face images with margin |margin|
        guard_x = lambda x: max(0, min(x, depth_photo.shape[0]))
        guard_y = lambda y: max(0, min(y, depth_photo.shape[1]))
        x1 = guard_x(x1-margin)
        x2 = guard_x(x2+margin)
        y1, y2 = y2, y1
        y1 = guard_y(y1-margin)
        y2 = guard_y(y2+margin)
        if x1 >= x2 or y1 >= y2:
            logging.warning("Face has non-positive area, returning (None, None)")
            return Face(None, None)
        depth_face = depth_photo[x1:x2, y1:y2]
        gir_face = gir_photo[x1:x2, y1:y2]
        depth_face = tools.gray_image_resize(depth_face, (IMG_SIZE, IMG_SIZE))
        depth_face = depth_face/np.max(depth_face)
        gir_face = tools.gray_image_resize(gir_face, (IMG_SIZE, IMG_SIZE))
        gir_face /= np.max(gir_face)

        # Check if we can find any landmarks on the face. If not, it is useless.
        if not face_recognition.face_landmarks((gir_face * 256).astype(np.uint8)):
            return Face(None, None)

        return Face(gir_face, depth_face)
    else:
        # Face couldn't be detected
        return Face(None, None)
Exemple #4
0
def cut_around_mask(face: Face, color: float = BGCOLOR) -> None:
    """
        Erases contents of original image around mask.
        :param face:
        :param color to fill around mask
    """
    for x in range(IMG_SIZE):
        for y in range(IMG_SIZE):
            if not face.mask[x, y]:
                face.depth_img[x, y] = color
                face.gir_img[x, y] = color
Exemple #5
0
 def face_add(self, face_info):
     """
     添加人脸到人脸库
     :return:
     """
     user_id = face_info.get('user_id')
     face_name = face_info.get('face_name')
     face_class = face_info.get('face_class')
     base64_code = face_info.get('base64_code')
     # 录入前先校验该人脸是否已经录入
     result = self.search_face_info(base64_code, user_id)
     if result and result.get('user_list'):
         for res in result.get('user_list'):
             if res.get('score') > FACE_ACCESS:
                 raise Exception("该人脸已经存在, 请勿重复录入")
     face = FaceDao.get_by_user_id_and_face_name(user_id, face_name)
     if face:
         raise Exception('face is exist')
     try:
         # 上传图片
         face_url = FaceAuthUtils.base642imag(base64_code)
         face = Face.create(user_id, face_name, face_url, face_class)
         FaceDao.insert(face)
         params = {'image': base64_code, 'image_type': 'BASE64', 'group_id': user_id, 'user_id': face.id
             , 'quality_control': 'NORMAL'}
         access_token = RequestUtil.get_access_token()
         url = FACE_LIB_USER_ADD + "?access_token=" + str(access_token)
         resp = RequestUtil.send_post(url=url, params=json.dumps(params),
                                      headers={'content-type': 'application/json'})
         self.check_response(resp)
         db.session.commit()
     except Exception as e:
         db.session.rollback()
         FaceAuthUtils.save_exception(traceback.format_exc())
         raise Exception(e.message)
Exemple #6
0
def generate_mask_from_skin(face: Face) -> None:
    from itertools import product

    mask = np.zeros((IMG_SIZE, IMG_SIZE), dtype=np.bool)
    mark = np.zeros((IMG_SIZE, IMG_SIZE, 3), dtype=np.float)

    for x, y in product(range(IMG_SIZE), range(IMG_SIZE)):
        mark[x][y] = tools.rgb_skin_mark(*(face.rgb_img[x][y]))

    probe = [
        mark[x][y]
        for x, y in product(range(2 * IMG_SIZE // 4, 3 * IMG_SIZE // 4, 8),
                            range(2 * IMG_SIZE // 4, 3 * IMG_SIZE // 4, 8))
    ]
    probe.sort(key=(lambda x: x[0]**2 + x[1]**2 + x[2]**2))

    # Mark of middle [probably] SKIN element
    mid_y, mid_cr, mid_cy = probe[len(probe) // 2]

    for x, y in product(range(IMG_SIZE), range(IMG_SIZE)):
        y, cr, cy = mark[x][y]
        l_bound = 0.918
        u_bound = 1.092
        mask[x][y] = (0.3 * mid_y <= y <= 4 * mid_y) and (
            l_bound * mid_cr <= cr <=
            u_bound * mid_cr) and (l_bound * mid_cy <= cy <= u_bound * mid_cy)

    face.mask = mask

    # Display the mask if you want
    tools.show_image(tools.pic_with_applied_mask(face.rgb_img, mask))
def angle_from(face: Face) -> np.ndarray:
    rotation, azimuth = calculate_rotation_matrix(
        face.face_points["right_brow"],
        face.face_points["top_chin"],
        face.face_points["left_brow"])
    face.azimuth = azimuth
    return rotation
Exemple #8
0
    def handle_user_session(self, id):

        try:
            face = Face(unionId=None, faceCode=None, profile=None, openId=id)
            if self.face_dao.count_face_by_open_id(id) is 0:
                return self.face_dao.add_face(face)

        except Exception as e:
            print("Happen:" + str(e))
Exemple #9
0
def hog_and_entropy(face: Face) -> Face:
    face.hog_gir_image, fdg = get_hog_of(face.gir_img)
    face.hog_gir_fd = fdg
    face.entropy_map_gir_image = get_entropy_map_of(face.gir_img)
    face.hog_depth_image, fdd = get_hog_of(face.depth_img)
    face.hog_depth_fd = fdd
    face.entropy_map_depth_image = get_entropy_map_of(face.depth_img)
    return face
Exemple #10
0
def recentre(face: Face) -> None:
    assert face.depth_img.shape == face.gir_img.shape
    logging.debug("\n\nRECENTRE")
    move_x = int((CENTER_DEST[0] - face.face_center[0]) * IMG_SIZE)
    move_y = int((CENTER_DEST[1] - face.face_center[1]) * IMG_SIZE)

    logging.debug("MOVE X MOVE Y %d %d" % (move_x, move_y))
    face.gir_img = np.roll(face.gir_img, move_x, axis=1)
    face.gir_img = np.roll(face.gir_img, move_y, axis=0)
    face.depth_img = np.roll(face.depth_img, move_x, axis=1)
    face.depth_img = np.roll(face.depth_img, move_y, axis=0)
    if move_x >= 0:
        face.gir_img[:, move_x] = 0
        face.depth_img[:, move_x] = 0
    else:
        face.gir_img[:, move_x:] = 0
        face.depth_img[:, move_x:] = 0
    if move_y >= 0:
        face.gir_img[:move_y, :] = 0
        face.depth_img[:move_y, :] = 0
    else:
        face.gir_img[move_y:, :] = 0
        face.depth_img[move_y:, :] = 0
Exemple #11
0
def generate_mask(face: Face, points: list((float, float, float))) -> None:
    mask = np.zeros((IMG_SIZE, IMG_SIZE), dtype=np.bool)
    for (xs, ys, _) in points:
        mask[int(xs), int(ys)] = True
    starting_point = (IMG_SIZE // 2, IMG_SIZE // 2
                      )  # TODO: maybe some specific landmark (like nose)

    sys.setrecursionlimit(100000)

    def _helper(px, py):
        if min(px, py) < 0 or max(px, py) >= IMG_SIZE:
            return
        if mask[px, py]:
            return
        mask[px, py] = True
        _helper(px - 1, py)
        _helper(px + 1, py)
        _helper(px, py - 1)
        _helper(px, py + 1)

    _helper(starting_point[0], starting_point[1])
    face.mask = mask
Exemple #12
0
def preprocessing(face: Face,
                  trim_method: str = 'convex_hull') -> None:
    if face.preprocessed:
        return
    face.preprocessed = True

    # Display the original photo
    # face.show_grey_or_ir()
    # face.show_depth()

    # Trim face
    trim_gird(face, method=trim_method)

    # Display trimmed photo
    # face.show_grey_or_ir()
    # face.show_depth()

    # Drop corner values and rescale to 0...1
    drop_corner_values(face)

    # Calculate face center and points defining face surface
    construct_face_points(face)
Exemple #13
0
def show_all_faces():
    faces = Face.find_all(face_collection)
    return render_template('show_all_faces.html', faces=faces)
Exemple #14
0
def rotate_gird_img(face: Face, rotation_matrix: np.ndarray):
    face_points = face.face_points
    face_points["center"] = face.face_center

    # First, we prepare the matrix X of points (x, y, z, grey or ir)
    points = _to_one_matrix(face)

    # Normalize x an y dimensions of |points|
    rescale_one_dim(points[:, :, 0])
    rescale_one_dim(points[:, :, 1])

    # Rotate around each axis
    # rotation_matrix = np.matmul(_rx(theta_x), np.matmul(_ry(theta_y), _rz(theta_z)))
    for i in range(IMG_SIZE):
        for j in range(IMG_SIZE):
            points[i, j, :3] = np.dot(rotation_matrix,
                                      points[i, j, :3].reshape(3,
                                                               1)).reshape(3)

    # Normalize once more after rotation
    face_points = normalize_face_points(points, face_points, rotation_matrix)
    _rescale(points)

    # Apply rotated image to (grey or ir) and depth photo
    gir_rotated = np.zeros((IMG_SIZE, IMG_SIZE))
    depth_rotated = np.zeros((IMG_SIZE, IMG_SIZE))
    for i in range(IMG_SIZE):
        for j in range(IMG_SIZE):
            if np.isnan(points[i, j, 0]) or np.isnan(points[i, j, 1]):
                logging.warning(
                    "Unexpected NaN in rotated image -- skipping invalid pixel"
                )
                continue
            x = int(points[i, j, 0] * (IMG_SIZE - 1))
            y = int(points[i, j, 1] * (IMG_SIZE - 1))

            if not face.mask[x, y]:
                continue
            if x < 0 or y < 0 or x >= IMG_SIZE or y >= IMG_SIZE:
                continue
            g = points[i, j, 3]
            z = points[i, j, 2]
            if depth_rotated[x, y] < z:
                gir_rotated[x, y] = g
                depth_rotated[x, y] = z

    for i in range(SMOOTHEN_ITER):
        gir_rotated = _smoothen(gir_rotated)
        depth_rotated = _smoothen(depth_rotated)

    # If you want to view the rotated image, use the following:
    # tools.show_image(gir_rotated)
    # tools.show_image(depth_rotated)
    # Or:
    # tools.show_3d_plot(points)

    rotated_face = Face(gir_rotated, depth_rotated)
    rotated_face.face_center = face_points["center"]
    del face_points["center"]
    rotated_face.face_points = face_points
    return rotated_face