Exemplo n.º 1
0
    def forward(self, only_lm=True):
        """
        Forward pass.
        Aka: compute 2D landmarks with current Variables for input point cloud.

        :param input:  68, 3
        :param target: 68, 2
        :return:
        """
        # calculate current face point cloud
        G = get_face_point_cloud(self.p, self.alpha, self.delta).view(
            (-1, 3))  # 28588, 3

        # get current landmarks
        if only_lm:
            G = G[self.lm_indices]
        G_lm_h = torch.cat((G, torch.zeros(G.shape[0], 1)),
                           dim=1)  # homogeneous

        lm = self.matrix_transformation() @ G_lm_h.t()

        lm /= lm.clone()[3, :]  # cartesian
        lm = lm.clone()[:2, :]  # two-dimensional

        return lm.t()
Exemplo n.º 2
0
def test_optimization_multiple_images():
    image = Image.open('./images/faces_sparser_sampling.gif')

    lambda_alpha = 50
    lambda_delta = 0.3
    lr = .128
    num_epochs = 300
    num_frames = 100

    trained_model, frames = optimization(
        num_epochs=num_epochs,
        path='./images/faces_sparser_sampling.gif',
        shape=(image.width, image.height),
        num_frames=num_frames,
        lambda_alpha=lambda_alpha,
        lambda_delta=lambda_delta,
        lr=lr)

    pca = trained_model.p

    for frame_idx, frame in enumerate(frames):
        # obtain texture from mask on image
        points_2d = trained_model.forward(
            frame_idx, only_lm=False).detach().numpy()  # 28588, 2
        tex = np.array(texture(np.array(frame), points_2d))

        # look at 3D visualization of the estimated face shape
        points_3d = get_face_point_cloud(pca, trained_model.alpha,
                                         trained_model.delta[frame_idx]).view(
                                             (-1, 3))
        """    
        mesh = trimesh.base.Trimesh(vertices=points_3d.detach().numpy(), faces=triangles, vertex_colors=tex)
        mesh.show()
        """

        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(np.array([0, 0, 0]), is_numpy=True)
        # save resulting rotated face
        G = points_3d.detach().numpy().T
        G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=tex, triangles=triangles)
        mesh_to_png(f"./results/optimization/multiple/shapes/{frame_idx}.png",
                    mesh,
                    z_camera_translation=280)

        # show estimated landmarks
        landmarks = trained_model.forward(frame_idx)
        landmarks = landmarks.detach().numpy().T
        plt.figure(figsize=(600 / 100, 600 / 100), dpi=100)
        #plt.scatter(landmarks[0], landmarks[1])
        plt.imshow(np.array(frame))
        plt.axis('off')
        plt.savefig(
            f"./results/optimization/multiple/landmarks/{frame_idx}.png",
            bbox_inches='tight',
            pad_inches=0,
            dpi=100)
Exemplo n.º 3
0
def test_optimization_single_frame(path='./images/first_frame.png'):
    image = Image.open(path)

    lambda_alpha = 30
    lambda_delta = 0.3
    lr = .128
    num_epochs = 400
    num_frames = 1

    trained_model, frames = optimization(num_epochs=num_epochs,
                                         path=path,
                                         shape=(image.width, image.height),
                                         num_frames=num_frames,
                                         lambda_alpha=lambda_alpha,
                                         lambda_delta=lambda_delta,
                                         lr=lr)

    pca = trained_model.p

    # look at 3D visualization of the estimated face shape
    points_3d = get_face_point_cloud(pca, trained_model.alpha,
                                     trained_model.delta[0]).view(
                                         (-1, 3))  # 28588, 3

    # obtain texture from mask on image

    points_2d = trained_model.forward(
        0, only_lm=False).detach().numpy()  # 28588, 2
    tex = np.array(texture(np.array(image), points_2d))

    #mesh = trimesh.base.Trimesh(vertices=points_3d.detach().numpy(), faces=triangles, vertex_colors=tex)
    #mesh.show()

    # show estimated landmarks
    landmarks = trained_model.forward(0)
    landmarks = landmarks.detach().numpy().T
    plt.scatter(landmarks[0], landmarks[1])
    plt.imshow(np.array(frames[0]))
    plt.axis('off')
    plt.savefig(f"./results/optimization/landmarks.png",
                bbox_inches='tight',
                pad_inches=0,
                dpi=100)

    # show from different angles
    G = points_3d.detach().numpy().T
    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    for w in [[0, 0, 0], [0, -30, 0], [0, -45, 0], [0, -90, 0]]:
        w = np.array(w)
        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(w, is_numpy=True)
        # save resulting rotated face
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=tex, triangles=triangles)
        mesh_to_png(f"./results/optimization/single/tex_{w}.png",
                    mesh,
                    z_camera_translation=280)
Exemplo n.º 4
0
def texturing():
    # Task 5
    image = Image.open('./images/first_frame.png')

    trained_model = optimization_one_image(300,
                                           image,
                                           lambda_alpha=45,
                                           lambda_delta=15,
                                           lr=.128)

    pca = trained_model.p

    # show estimated landmarks
    landmarks = trained_model.forward()
    landmarks = landmarks.detach().numpy().T
    plt.scatter(landmarks[0], landmarks[1])
    plt.imshow(np.array(image))
    plt.axis('off')
    plt.show()

    # show estimated total mask
    points_3d = get_face_point_cloud(pca, trained_model.alpha,
                                     trained_model.delta).view(
                                         (-1, 3))  # 28588, 3
    points_2d = trained_model.forward(
        only_lm=False).detach().numpy()  # 28588, 2

    #plt.scatter(points_2d.T[0], points_2d.T[1])
    plt.imshow(np.array(image))
    plt.axis('off')
    plt.show()
    """
    # obtain texture from mask on image
    tex = np.array(texture(np.array(image), points_2d))

    # look at 3D visualization
    # mesh = trimesh.base.Trimesh(vertices=points_3d.detach().numpy(),faces=triangles,vertex_colors=new_texture)
    # mesh.show()

    # show from different angles
    G = points_3d.detach().numpy().T
    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    for w in [[0,10,0], [0,0,0], [0,-10,0]]:
        w = np.array(w)
        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(w, is_numpy=True)

        # save resulting rotated face
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=tex, triangles=triangles)
        mesh_to_png(f"./results/texturing/tex_{w}.png", mesh)

    #mesh = Mesh(vertices=, colors=tex, triangles=triangles)
    #mesh_to_png("./results/texturing.png", mesh)
    """
    return
Exemplo n.º 5
0
def facial_landmarks(alpha, delta, w, t):
    """
    Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.

    :param alpha: array, 30dim
    :param delta: array, 20dim
    :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].
    :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]
    :return:
    """
    landmarks_idx = np.loadtxt("Landmarks68_model2017-1_face12_nomouth.anl",
                               dtype=int)

    pca = read_pca_model()
    G = get_face_point_cloud(pca, alpha, delta)[landmarks_idx].T

    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    # get T matrix
    T = np.eye(4)
    T[:3, :3] = get_rotation_matrix(w)
    T[:3, 3] = t

    # Get V and P matrices
    W = 255
    H = 255

    image_aspect_ratio = W / H
    angle = 10
    near = 300
    far = 2000

    right, left, top, bottom = get_perspective(image_aspect_ratio, angle, near,
                                               far)

    V = get_V(right, left, top, bottom)

    P = get_P(near, far, right, left, top, bottom)

    i = V @ P @ T @ G_h

    # cartesian
    i /= i[3, :]

    # two-dimensional
    return i[:2, :]
Exemplo n.º 6
0
def main():
    ## def texturing():
    im = Image.open('faces_sparser_sampling.gif')
    frames = np.array([
        np.array(frame.copy().convert('RGB').getdata(),
                 dtype=np.uint8).reshape(frame.size[1], frame.size[0], 3)
        for frame in ImageSequence.Iterator(im)
    ])
    image = frames[0]  #this is the image

    target_landmarks = torch.from_numpy(
        detect_landmark(image)).to(dtype=torch.float)

    alpha, delta, w, t = [
        i for i in np.load("best_params.npy", allow_pickle=True)
    ]

    estimated_all = get_final_landmarks(alpha, delta, w, t, target_landmarks)

    #print(estimated_landmarks.shape)
    print(estimated_all.shape)

    plt.imshow(image)
    plt.show()

    plt.scatter(estimated_all.T[0], estimated_all.T[1])
    plt.imshow(image)
    plt.show()

    plt.scatter(estimated_landmarks.T[0], estimated_landmarks.T[1])
    plt.imshow(image)
    plt.show()

    tex = find_corresponding_texture(estimated_all, image)

    # get 3D point cloud
    p = read_pca_model()
    G = get_face_point_cloud(p, alpha.detach().numpy(), delta.detach().numpy())
    mesh = trimesh.base.Trimesh(vertices=G, faces=triangles, vertex_colors=tex)
    mesh_to_png("mesh.png", mesh)
    t = mesh_to_png("mesh.png", mesh)
    return t
Exemplo n.º 7
0
def facial_landmarks(alpha, delta, w, t):
    """
    Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.

    :param alpha: array, 30dim
    :param delta: array, 20dim
    :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].
    :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]
    :return:
    """
    landmarks_idx = np.loadtxt("./models/Landmarks68_model2017-1_face12_nomouth.anl", dtype=int)

    pca = read_pca_model()
    G = get_face_point_cloud(pca, alpha, delta).reshape((-1, 3))[landmarks_idx].T

    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    # get T matrix
    T = np.eye(4)
    T[:3, :3] = rotation_matrix(w, is_numpy=True)
    T[:3, 3] = t

    # Get V and P matrices
    W = H = 255

    # angle 10
    P = perspective_projection_matrix(W, H, 300, 2000, is_numpy=True)

    V = viewport_matrix(right=W, left=0, top=H, bottom=0, is_numpy=True)

    i =  V @ P @ T @ G_h

    # cartesian
    i /= i[3,:]

    # two-dimensional
    return i[:2, :]
Exemplo n.º 8
0
from PIL import Image, ImageSequence
from optimization import optimization_one_image
from mesh_to_png import triangles, mean_tex, mesh_to_png
from data_def import Mesh

from morphable_model import get_face_point_cloud

source_image = Image.open('./images/expression.gif')
target = Image.open('./images/first_frame.png')

target_model = optimization_one_image(
    300,  # Can be changed to multiple images
    target,
    lambda_alpha=45,
    lambda_delta=15,
    lr=.128)
alpha = target_model.alpha

for i, frame in enumerate(ImageSequence.Iterator(source_image)):
    frame = frame.convert('RGB')
    model = optimization_one_image(300, frame, lr=.128)
    points = get_face_point_cloud(model.p, alpha, model.delta).view((-1, 3))
    mesh = Mesh(vertices=points.detach().numpy(),
                colors=mean_tex,
                triangles=triangles)
    mesh_to_png("./results/expression/frame_{}.png".format(i), mesh)