def construct_face_points(face: Face) -> None: shape = face.gir_img.shape landmarks = face.landmarks avgl = lambda x: tuple([int(sum(y) / len(y)) for y in zip(*x)]) get_pixl = lambda x0, x1: face.depth_img[max(0, min(shape[0] - 1, x0)), max(0, min(shape[1] - 1, x1))] to3d = lambda x: (x[0] / shape[0], x[1] / shape[1], get_pixl(x[0], x[1])) def avgl3d(l: list((int, int))) -> np.ndarray((1, 3)): # Map 2D points to 3D by applying depth points3d = np.array([(x / shape[0], y / shape[1], get_pixl(x, y)) for (x, y) in l]) # Filter out the points too far from mean depth me = np.mean(points3d[:, 2]) stdev = np.std(points3d[:, 2]) depths_good = np.abs(points3d[:, 2] - me) <= stdev points3d = points3d[depths_good] return points3d.sum(axis=0) / points3d.shape[0] chin_bottom = landmarks["chin"][6:11] # chin_left = landmarks["chin"][:6] # chin_right = landmarks["chin"][11:] right_brow = avgl(landmarks["right_eyebrow"]) left_brow = avgl(landmarks["left_eyebrow"]) forehead = avgl([right_brow, left_brow]) # top_chin = avgl(landmarks["bottom_lip"] + chin_bottom + chin_bottom + chin_bottom) # left_cheek = avgl(chin_left + landmarks["left_eyebrow"] + landmarks["nose_tip"]) # right_cheek = avgl(chin_right + landmarks["right_eyebrow"] + landmarks["nose_tip"]) # face_center = avgl([right_brow] + [left_brow] + [top_chin]) right_brow3d = avgl3d(landmarks["right_eyebrow"]) left_brow3d = avgl3d(landmarks["left_eyebrow"]) top_chin3d = avgl3d(landmarks["bottom_lip"] + chin_bottom + chin_bottom + chin_bottom) face.face_center = to3d(forehead) face.face_points = {"right_brow": right_brow3d, "left_brow": left_brow3d, "top_chin": top_chin3d}
def rotate_gird_img(face: Face, rotation_matrix: np.ndarray): face_points = face.face_points face_points["center"] = face.face_center # First, we prepare the matrix X of points (x, y, z, grey or ir) points = _to_one_matrix(face) # Normalize x an y dimensions of |points| rescale_one_dim(points[:, :, 0]) rescale_one_dim(points[:, :, 1]) # Rotate around each axis # rotation_matrix = np.matmul(_rx(theta_x), np.matmul(_ry(theta_y), _rz(theta_z))) for i in range(IMG_SIZE): for j in range(IMG_SIZE): points[i, j, :3] = np.dot(rotation_matrix, points[i, j, :3].reshape(3, 1)).reshape(3) # Normalize once more after rotation face_points = normalize_face_points(points, face_points, rotation_matrix) _rescale(points) # Apply rotated image to (grey or ir) and depth photo gir_rotated = np.zeros((IMG_SIZE, IMG_SIZE)) depth_rotated = np.zeros((IMG_SIZE, IMG_SIZE)) for i in range(IMG_SIZE): for j in range(IMG_SIZE): if np.isnan(points[i, j, 0]) or np.isnan(points[i, j, 1]): logging.warning( "Unexpected NaN in rotated image -- skipping invalid pixel" ) continue x = int(points[i, j, 0] * (IMG_SIZE - 1)) y = int(points[i, j, 1] * (IMG_SIZE - 1)) if not face.mask[x, y]: continue if x < 0 or y < 0 or x >= IMG_SIZE or y >= IMG_SIZE: continue g = points[i, j, 3] z = points[i, j, 2] if depth_rotated[x, y] < z: gir_rotated[x, y] = g depth_rotated[x, y] = z for i in range(SMOOTHEN_ITER): gir_rotated = _smoothen(gir_rotated) depth_rotated = _smoothen(depth_rotated) # If you want to view the rotated image, use the following: # tools.show_image(gir_rotated) # tools.show_image(depth_rotated) # Or: # tools.show_3d_plot(points) rotated_face = Face(gir_rotated, depth_rotated) rotated_face.face_center = face_points["center"] del face_points["center"] rotated_face.face_points = face_points return rotated_face