예제 #1
0
def facial_landmarks_torch(alpha, delta, w, t, LM=True):
    """
    Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.

    :param alpha: array, 30dim
    :param delta: array, 20dim
    :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].
    :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]
    :return:
    """
    landmarks_idx = np.loadtxt("Landmarks68_model2017-1_face12_nomouth.anl",
                               dtype=int)

    pca = read_pca_model()

    if LM:
        G = get_face_point_cloud_torch(pca, alpha, delta)[landmarks_idx].t()
    else:
        G = get_face_point_cloud_torch(pca, alpha, delta).t()

    G_h = [G, torch.ones(G.shape[1]).view((1, -1))]
    G_h = torch.cat(G_h, dim=0)

    # get T matrix
    T = torch.eye(4)
    T[:3, :3] = rotation_matrix(
        w
    )  #rotation_tensor(w, 1)#get_rotation_matrix_torch(w)  #torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])#
    T[:3, 3] = t

    # Get V and P matrices
    W = 172
    H = 162

    image_aspect_ratio = W / H
    angle = 10
    near = .1
    far = 10

    right, left, top, bottom = get_perspective(image_aspect_ratio, angle, near,
                                               far)

    V = get_V(right, left, top, bottom)

    [V] = list(map(torch.from_numpy, [V]))
    V = V.to(dtype=torch.float32)
    n, f, t, b = near, far, top, bottom
    P = torch.Tensor([[(2 * n) / (t - b), 0, 0, 0],
                      [0, (2 * n) / (t - b), 0, 0],
                      [0, 0, -(f + n) / (f - n), -(2 * f * n) / (f - n)],
                      [0, 0, -1, 0]])
    i = V @ P @ T @ G_h

    # h**o to cartesian
    i = i / i[3, :].clone()

    # two-dimensional
    return i[:2, :].t()
예제 #2
0
def facial_landmarks(alpha, delta, w, t):
    """
    Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.

    :param alpha: array, 30dim
    :param delta: array, 20dim
    :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].
    :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]
    :return:
    """
    landmarks_idx = np.loadtxt("Landmarks68_model2017-1_face12_nomouth.anl",
                               dtype=int)

    pca = read_pca_model()
    G = get_face_point_cloud(pca, alpha, delta)[landmarks_idx].T

    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    # get T matrix
    T = np.eye(4)
    T[:3, :3] = get_rotation_matrix(w)
    T[:3, 3] = t

    # Get V and P matrices
    W = 255
    H = 255

    image_aspect_ratio = W / H
    angle = 10
    near = 300
    far = 2000

    right, left, top, bottom = get_perspective(image_aspect_ratio, angle, near,
                                               far)

    V = get_V(right, left, top, bottom)

    P = get_P(near, far, right, left, top, bottom)

    i = V @ P @ T @ G_h

    # cartesian
    i /= i[3, :]

    # two-dimensional
    return i[:2, :]
예제 #3
0
def main():
    ## def texturing():
    im = Image.open('faces_sparser_sampling.gif')
    frames = np.array([
        np.array(frame.copy().convert('RGB').getdata(),
                 dtype=np.uint8).reshape(frame.size[1], frame.size[0], 3)
        for frame in ImageSequence.Iterator(im)
    ])
    image = frames[0]  #this is the image

    target_landmarks = torch.from_numpy(
        detect_landmark(image)).to(dtype=torch.float)

    alpha, delta, w, t = [
        i for i in np.load("best_params.npy", allow_pickle=True)
    ]

    estimated_all = get_final_landmarks(alpha, delta, w, t, target_landmarks)

    #print(estimated_landmarks.shape)
    print(estimated_all.shape)

    plt.imshow(image)
    plt.show()

    plt.scatter(estimated_all.T[0], estimated_all.T[1])
    plt.imshow(image)
    plt.show()

    plt.scatter(estimated_landmarks.T[0], estimated_landmarks.T[1])
    plt.imshow(image)
    plt.show()

    tex = find_corresponding_texture(estimated_all, image)

    # get 3D point cloud
    p = read_pca_model()
    G = get_face_point_cloud(p, alpha.detach().numpy(), delta.detach().numpy())
    mesh = trimesh.base.Trimesh(vertices=G, faces=triangles, vertex_colors=tex)
    mesh_to_png("mesh.png", mesh)
    t = mesh_to_png("mesh.png", mesh)
    return t
예제 #4
0
def rotate_face(angles):
    """
    Task 3.1
    :param angles: list of angles. each angle has three entries [theta_x, theta_y, theta_z]
    :return:
    """
    # sample face
    pca = read_pca_model()
    G = random_face_point_cloud(pca).T

    # transform to homogeneous coordinates
    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    for w in angles:
        w = np.array(w)
        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(w, is_numpy=True)

        # save resulting rotated face
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=mean_tex, triangles=triangles)
        mesh_to_png("./results/rotation/"+str(w)+".png", mesh)
    return
예제 #5
0
def facial_landmarks(alpha, delta, w, t):
    """
    Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.

    :param alpha: array, 30dim
    :param delta: array, 20dim
    :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].
    :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]
    :return:
    """
    landmarks_idx = np.loadtxt("./models/Landmarks68_model2017-1_face12_nomouth.anl", dtype=int)

    pca = read_pca_model()
    G = get_face_point_cloud(pca, alpha, delta).reshape((-1, 3))[landmarks_idx].T

    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    # get T matrix
    T = np.eye(4)
    T[:3, :3] = rotation_matrix(w, is_numpy=True)
    T[:3, 3] = t

    # Get V and P matrices
    W = H = 255

    # angle 10
    P = perspective_projection_matrix(W, H, 300, 2000, is_numpy=True)

    V = viewport_matrix(right=W, left=0, top=H, bottom=0, is_numpy=True)

    i =  V @ P @ T @ G_h

    # cartesian
    i /= i[3,:]

    # two-dimensional
    return i[:2, :]
예제 #6
0
def read_pca_model_torch():
    p = read_pca_model()
    for i in p:
        p[i] = torch.from_numpy(p[i])
    return p