예제 #1
0
def test_optimization_multiple_images():
    image = Image.open('./images/faces_sparser_sampling.gif')

    lambda_alpha = 50
    lambda_delta = 0.3
    lr = .128
    num_epochs = 300
    num_frames = 100

    trained_model, frames = optimization(
        num_epochs=num_epochs,
        path='./images/faces_sparser_sampling.gif',
        shape=(image.width, image.height),
        num_frames=num_frames,
        lambda_alpha=lambda_alpha,
        lambda_delta=lambda_delta,
        lr=lr)

    pca = trained_model.p

    for frame_idx, frame in enumerate(frames):
        # obtain texture from mask on image
        points_2d = trained_model.forward(
            frame_idx, only_lm=False).detach().numpy()  # 28588, 2
        tex = np.array(texture(np.array(frame), points_2d))

        # look at 3D visualization of the estimated face shape
        points_3d = get_face_point_cloud(pca, trained_model.alpha,
                                         trained_model.delta[frame_idx]).view(
                                             (-1, 3))
        """    
        mesh = trimesh.base.Trimesh(vertices=points_3d.detach().numpy(), faces=triangles, vertex_colors=tex)
        mesh.show()
        """

        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(np.array([0, 0, 0]), is_numpy=True)
        # save resulting rotated face
        G = points_3d.detach().numpy().T
        G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=tex, triangles=triangles)
        mesh_to_png(f"./results/optimization/multiple/shapes/{frame_idx}.png",
                    mesh,
                    z_camera_translation=280)

        # show estimated landmarks
        landmarks = trained_model.forward(frame_idx)
        landmarks = landmarks.detach().numpy().T
        plt.figure(figsize=(600 / 100, 600 / 100), dpi=100)
        #plt.scatter(landmarks[0], landmarks[1])
        plt.imshow(np.array(frame))
        plt.axis('off')
        plt.savefig(
            f"./results/optimization/multiple/landmarks/{frame_idx}.png",
            bbox_inches='tight',
            pad_inches=0,
            dpi=100)
예제 #2
0
def test_optimization_single_frame(path='./images/first_frame.png'):
    image = Image.open(path)

    lambda_alpha = 30
    lambda_delta = 0.3
    lr = .128
    num_epochs = 400
    num_frames = 1

    trained_model, frames = optimization(num_epochs=num_epochs,
                                         path=path,
                                         shape=(image.width, image.height),
                                         num_frames=num_frames,
                                         lambda_alpha=lambda_alpha,
                                         lambda_delta=lambda_delta,
                                         lr=lr)

    pca = trained_model.p

    # look at 3D visualization of the estimated face shape
    points_3d = get_face_point_cloud(pca, trained_model.alpha,
                                     trained_model.delta[0]).view(
                                         (-1, 3))  # 28588, 3

    # obtain texture from mask on image

    points_2d = trained_model.forward(
        0, only_lm=False).detach().numpy()  # 28588, 2
    tex = np.array(texture(np.array(image), points_2d))

    #mesh = trimesh.base.Trimesh(vertices=points_3d.detach().numpy(), faces=triangles, vertex_colors=tex)
    #mesh.show()

    # show estimated landmarks
    landmarks = trained_model.forward(0)
    landmarks = landmarks.detach().numpy().T
    plt.scatter(landmarks[0], landmarks[1])
    plt.imshow(np.array(frames[0]))
    plt.axis('off')
    plt.savefig(f"./results/optimization/landmarks.png",
                bbox_inches='tight',
                pad_inches=0,
                dpi=100)

    # show from different angles
    G = points_3d.detach().numpy().T
    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    for w in [[0, 0, 0], [0, -30, 0], [0, -45, 0], [0, -90, 0]]:
        w = np.array(w)
        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(w, is_numpy=True)
        # save resulting rotated face
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=tex, triangles=triangles)
        mesh_to_png(f"./results/optimization/single/tex_{w}.png",
                    mesh,
                    z_camera_translation=280)
예제 #3
0
def rotate_face(angles):
    """
    Task 3.1
    :param angles: list of angles. each angle has three entries [theta_x, theta_y, theta_z]
    :return:
    """
    # sample face
    pca = read_pca_model()
    G = random_face_point_cloud(pca).T

    # transform to homogeneous coordinates
    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    for w in angles:
        w = np.array(w)
        # get T matrix for only rotation
        T = np.eye(4)
        T[:3, :3] = rotation_matrix(w, is_numpy=True)

        # save resulting rotated face
        mesh = Mesh(vertices=(T @ G_h)[:3].T, colors=mean_tex, triangles=triangles)
        mesh_to_png("./results/rotation/"+str(w)+".png", mesh)
    return
예제 #4
0
def facial_landmarks(alpha, delta, w, t):
    """
    Construct facial landmarks from facial geometry latent parameters alpha, delta and object transformation w, t.

    :param alpha: array, 30dim
    :param delta: array, 20dim
    :param w: rotation angles around x,y, z. Given as list [theta_x, theta_y, theta_z].
    :param t: translation in x,y,z space. Given as list [translation_x, translation_y, translation_z]
    :return:
    """
    landmarks_idx = np.loadtxt("./models/Landmarks68_model2017-1_face12_nomouth.anl", dtype=int)

    pca = read_pca_model()
    G = get_face_point_cloud(pca, alpha, delta).reshape((-1, 3))[landmarks_idx].T

    G_h = np.append(G, np.ones(G.shape[1]).reshape((1, -1)), axis=0)

    # get T matrix
    T = np.eye(4)
    T[:3, :3] = rotation_matrix(w, is_numpy=True)
    T[:3, 3] = t

    # Get V and P matrices
    W = H = 255

    # angle 10
    P = perspective_projection_matrix(W, H, 300, 2000, is_numpy=True)

    V = viewport_matrix(right=W, left=0, top=H, bottom=0, is_numpy=True)

    i =  V @ P @ T @ G_h

    # cartesian
    i /= i[3,:]

    # two-dimensional
    return i[:2, :]
예제 #5
0
# transformation = matrices.translation_matrix(-200, 0, -300)
# wireframes[0].transform(transformation)

FOCAL_LIMITS = 20., 500.
FOCAL_STEP = 2.
TRANSLATION_STEP = 10.
ROTATION_STEP = np.radians(0.8)

left_translation = matrices.translation_matrix(TRANSLATION_STEP, 0, 0)
right_translation = matrices.translation_matrix(-TRANSLATION_STEP, 0, 0)
forwart_translation = matrices.translation_matrix(0, -TRANSLATION_STEP, 0)
backward_translation = matrices.translation_matrix(0, +TRANSLATION_STEP, 0)
up_translation = matrices.translation_matrix(0, 0, -TRANSLATION_STEP)
down_translation = matrices.translation_matrix(0, 0, +TRANSLATION_STEP)

cunter_clockwise_rotation = matrices.rotation_matrix(ROTATION_STEP, 'z')
clokcwise_rotation = matrices.rotation_matrix(-ROTATION_STEP, 'z')
up_rotation = matrices.rotation_matrix(ROTATION_STEP, 'x')
down_rotation = matrices.rotation_matrix(-ROTATION_STEP, 'x')
left_rotation = matrices.rotation_matrix(ROTATION_STEP * 2, 'y')
right_rotation = matrices.rotation_matrix(-ROTATION_STEP * 2, 'y')

running = True
while running:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            running = False
        keys = pygame.key.get_pressed()
        if keys[pygame.K_MINUS]:
            if focal-FOCAL_STEP > FOCAL_LIMITS[0]:
                focal -= FOCAL_STEP