frontal_pic_name = '00029ba010_960521' profile_pic_name = '00029pr010_940128' frontal_img = cv2.imread(os.path.join(r'..\data', frontal_pic_name + '.tif')) profile_img = cv2.imread(os.path.join(r'..\data', profile_pic_name + '.tif')) width = np.shape(frontal_img)[1] height = np.shape(frontal_img)[0] # marker.frontal_face_marker(os.path.join(r'..\data', frontal_pic_name + '.tif')) # s = 2 s = 2000 / height if height >= width else 2000 / width scale_param = 900 / height if height >= width else 900 / width # scale_param = 1 morphable_model = MorphableModel.load_model( r"..\py_share\py_sfm_shape_3448.bin") blendshapes = Blendshape.load_blendshapes( r"..\py_share\py_expression_blendshapes_3448.bin") landmark_mapper = LandmarkMapper.LandmarkMapper(r'..\py_share\ibug_to_sfm.txt') edge_topology = EdgeTopology.load_edge_topology( r'..\py_share\py_sfm_3448_edge_topology.json') contour_landmarks = contour_correspondence.ContourLandmarks() contour_landmarks.load(r'..\py_share\ibug_to_sfm.txt') model_contour = contour_correspondence.ModelContour() model_contour.load(r'..\py_share\sfm_model_contours.json') profile_landmark_mapper = LandmarkMapper.ProfileLandmarkMapper( r'..\py_share\profile_to_sfm.txt') frontal_landmarks = [] landmark_ids = list(map(str, range( 1, 69))) # generates the numbers 1 to 68, as strings landmarks = utils.read_pts(os.path.join(r'..\data', frontal_pic_name + '.pts')) for i in range(68):
def fit_blendshapes_to_landmarks_nnls(blendshapes, face_instance, affine_camera_matrix, landmarks, vertex_ids): """ Fits blendshape coefficients to given 2D landmarks, given a current face shape instance. Uses non-negative least-squares (NNLS) to solve for the coefficients. The NNLS algorithm used doesn't support any regularisation. This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear. Instead of the PCA basis, the blendshapes are used, and instead of the mean, a current face instance is used to do the fitting from. Args: blendshapes: A vector with blendshapes to estimate the coefficients for. face_instance: A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix. affine_camera_matrix: A 3x4 affine camera matrix from model to screen-space. landmarks: 2D landmarks from an image to fit the blendshapes to. vertex_ids: The vertex ids in the model that correspond to the 2D points. Returns: The estimated blendshape-coefficients. """ assert len(landmarks) == len(vertex_ids) num_blendshapes = len(blendshapes) num_landmarks = len(landmarks) # Copy all blendshapes into a "basis" matrix with each blendshape being a column: blendshapes_as_basis = Blendshape.to_matrix(blendshapes) # $\hat{V} \in R^{3N\times m-1}$, subselect the rows of the eigenvector matrix $V$ associated # with the $N$ feature points # And we insert a row of zeros after every third row, resulting in matrix $\hat{V}_h \in # R^{4N\times m-1}$: v_hat_h = np.zeros([4 * num_landmarks, num_blendshapes]) for i in range(num_landmarks): v_hat_h[i * 4: i * 4 + 3, :] = blendshapes_as_basis[vertex_ids[i] * 3: vertex_ids[i] * 3 + 3, :] # Form a block diagonal matrix $P \in R^{3N\times 4N}$ in which the camera matrix C # (P_Affine, affine_camera_matrix) is placed on the diagonal: p_row = [] p_col = [] p_data = [] for i in range(num_landmarks): for x in range(np.shape(affine_camera_matrix)[0]): for y in range(np.shape(affine_camera_matrix)[1]): p_row.append(3 * i + x) p_col.append(4 * i + y) p_data.append(affine_camera_matrix[x, y]) p = sparse.coo_matrix((p_data, (p_row, p_col)), shape=(3 * num_landmarks, 4 * num_landmarks)) # The landmarks in matrix notation (in homogeneous coordinates), $3N\times 1$ y = np.ones([3 * num_landmarks]) for i in range(num_landmarks): y[3 * i: 3 * i + 2] = landmarks[i][:] # The mean, with an added homogeneous coordinate (x_1, y_1, z_1, 1, x_2, ...)^t v_bar = np.ones([4 * num_landmarks]) for i in range(num_landmarks): v_bar[4 * i: 4 * i + 3] = face_instance[vertex_ids[i] * 3: vertex_ids[i] * 3 + 3] # Bring into standard regularised quadratic form: a = p.dot(v_hat_h) # camera matrix times the basis b = p.dot(v_bar) - y # camera matrix times the mean, minus the landmarks coefficients = optimize.nnls(a, -b)[0] return coefficients
color_orthonormal_pca_basis = color_model.get_orthonormal_pca_basis() color_eigenvalues = color_model.get_eigenvalues() color_triangle_list = color_model.get_triangle_list() py_color_model = PcaModel.PcaModel(color_mean, color_orthonormal_pca_basis, color_eigenvalues, color_triangle_list) py_texture_coordinates = model.get_texture_coordinates() py_model = MorphableModel.MorphableModel(py_shape_model, py_color_model, py_texture_coordinates) MorphableModel.save_model(py_model, 'py_share/py_sfm_shape_3448.bin') py_blendshapes = [] for blendshape in blendshapes: py_blendshape = Blendshape.Blendshape(blendshape.name, blendshape.deformation) py_blendshapes.append(py_blendshape) o = open('py_share/py_expression_blendshapes_3448.bin', 'wb', -1) pickle.dump(py_blendshapes, o) o.close() # 转换share/sfm_3448_edge_topology.json文件 file = open('share/sfm_3448_edge_topology.json', 'r') outer_dict = json.load(file) inner_dict = outer_dict['edge_topology'] aj = [] av = [] for dic in inner_dict['adjacent_faces']: aj.append([dic['value0'], dic['value1']])
def fit_blendshapes_to_landmarks_linear(blendshapes, face_instance, affine_camera_matrix, landmarks, vertex_ids, lambda_p=500.0): """ Fits blendshape coefficients to given 2D landmarks, given a current face shape instance. It's a linear, closed-form solution fitting algorithm, with regularisation (constraining the L2-norm of the coefficients). However, there is no constraint on the coefficients, so negative coefficients are allowed, which, with linear blendshapes (offsets), will most likely not be desired. Thus, prefer the function below. This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear. Instead of the PCA basis, the blendshapes are used, and instead of the mean, a current face instance is used to do the fitting from. Args: blendshapes: A vector with blendshapes to estimate the coefficients for. face_instance: A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix. affine_camera_matrix: A 3x4 affine camera matrix from model to screen-space. landmarks: 2D landmarks from an image to fit the blendshapes to. vertex_ids: The vertex ids in the model that correspond to the 2D points. lambda_p: A regularisation parameter, constraining the L2-norm of the coefficients. Returns: The estimated blendshape-coefficients. """ assert len(landmarks) == len(vertex_ids) num_blendshapes = len(blendshapes) num_landmarks = len(landmarks) # Copy all blendshapes into a "basis" matrix with each blendshape being a column: blendshapes_as_basis = Blendshape.to_matrix(blendshapes) # $\hat{V} \in R^{3N\times m-1}$, subselect the rows of the eigenvector matrix # $V$ associated with the $N$ feature points. # And we insert a row of zeros after every third row, resulting in matrix # $\hat{V}_h \in R^{4N\times m-1}$: v_hat_h = np.zeros([4 * num_landmarks, num_blendshapes]) for i in range(num_landmarks): v_hat_h[i * 4: i * 4 + 3, :] = blendshapes_as_basis[vertex_ids[i] * 3: vertex_ids[i] * 3 + 3, :] # Form a block diagonal matrix $P \in R^{3N\times 4N}$ in which the camera # matrix C (P_Affine, affine_camera_matrix) is placed on the diagonal: p = np.zeros([3 * num_landmarks, 4 * num_landmarks]) for i in range(num_landmarks): p[3 * i: 3 * i + 3, 4 * i: 4 * i + 4] = affine_camera_matrix # The landmarks in matrix notation (in homogeneous coordinates), $3N\times 1$ y = np.ones([3 * num_landmarks]) for i in range(num_landmarks): y[3 * i] = landmarks[i][0] y[3 * i + 1] = landmarks[i][1] # The mean, with an added homogeneous coordinate (x_1, y_1, z_1, 1, x_2, ...)^t v_bar = np.ones([4 * num_landmarks]) for i in range(num_landmarks): v_bar[4 * i] = face_instance[vertex_ids[i] * 3] v_bar[4 * i + 1] = face_instance[vertex_ids[i] * 3 + 1] v_bar[4 * i + 2] = face_instance[vertex_ids[i] * 3 + 2] # Bring into standard regularised quadratic form: a = p.dot(v_hat_h) # camera matrix times the basis b = p.dot(v_bar) - y # camera matrix times the mean, minus the landmarks at_a_reg = a.T.dot(a) + lambda_p * np.eye(num_blendshapes) rhs = -a.T.dot(b) coefficients = np.linalg.lstsq(at_a_reg, rhs)[0] return coefficients