def plot_error_hsv(all_images, test_images, test_landmarks, mean_image,
                   mean_landmarks, eig_faces, eig_landmarks, K, step):
    x = []
    y = []
    test_images_original = all_images[800:]
    p = 1
    while p <= K:
        tmp_eig_faces = eig_faces[:p, ]
        pca_faces = np.matmul((test_images - mean_image), tmp_eig_faces.T)
        pca_landmarks = np.matmul((test_landmarks - mean_landmarks),
                                  eig_landmarks.T)
        recon_faces = np.matmul(pca_faces, tmp_eig_faces) + mean_image
        recon_landmarks = np.matmul(pca_landmarks,
                                    eig_landmarks) + mean_landmarks
        final_recon_faces = []
        for i in range(len(recon_faces)):
            final_recon_faces.append(
                mywarper.warp(recon_faces[i].reshape(128, 128, 1),
                              mean_landmarks.reshape(68, 2),
                              recon_landmarks[i].reshape(68, 2)))
        final_recon_faces = np.array(final_recon_faces)
        final_recon_faces = final_recon_faces.reshape(200, 128 * 128)
        error = np.mean(((final_recon_faces - test_images_original) / 255)**2)
        x.append(p)
        y.append(error)
        if (p == 1 and step > 1):
            p -= 1
        p += step
    plt.scatter(x, y)
    plt.plot(x, y)
    plt.show()
Beispiel #2
0
def warp_img_to_interpolated_landmarks(images, img_landmark, recon_landmarks):
    imgs = []
    for i, landmark in enumerate(recon_landmarks):
        recon_landmark = np.reshape(landmark, (68, 2))
        recon_img = warp(images[i], img_landmark, recon_landmark)
        imgs.append(recon_img)
    return imgs
Beispiel #3
0
def generate_aligned_images(image_folder, landmark_folder, im_file, LM_file,
                            aligned_folder, target_LM):
    assert len(im_file) == len(
        LM_file), "Image number and landmark number don't match!"
    for i in range(len(im_file)):
        im = skimage.io.imread(join(image_folder, im_file[i]))
        org_LM = scipy.io.loadmat(join(landmark_folder, LM_file[i]))['lms']
        warp_im = mywarper.warp(im, org_LM, target_LM)
        skimage.io.imsave(join(aligned_folder, im_file[i]), warp_im)
Beispiel #4
0
def warp_imgs_to_landmarks(recon_appear_imgs, landmark_src, recon_landmarks):
    imgs = []
    for batch_i, batch in enumerate(recon_appear_imgs):
        for i, img in enumerate(batch):
            img = img.permute(1, 2, 0).detach().numpy()
            recon_landmark = np.reshape(
                recon_landmarks[batch_i][i].detach().numpy(), (68, 2))
            recon_img = warp(img, landmark_src, recon_landmark)
            imgs.append(recon_img)
    return imgs
test_landmarks = all_landmarks[800:, ]
mean_landmarks = np.mean(train_landmarks, axis=0)
file_path = "eig_landmarks.npy"
if os.path.exists(file_path):
    all_eig_landmarks = np.load(file_path)
else:
    u, s, v = np.linalg.svd((train_landmarks - mean_landmarks).T)
    all_eig_landmarks = u
    np.save(file_path, all_eig_landmarks)
all_eig_landmarks = all_eig_landmarks.T
eig_landmarks = all_eig_landmarks[:10, ]
warp_all_images = []
for i in range(len(all_images)):
    warp_all_images.append(
        mywarper.warp(all_images[i].reshape(128, 128, 1),
                      all_landmarks[i].reshape(68, 2),
                      mean_landmarks.reshape(68, 2)))
warp_all_images = np.array(warp_all_images)
warp_all_images = warp_all_images.reshape(1000, 128 * 128)
train_images = warp_all_images[:800, ]
test_images = warp_all_images[800:, ]
mean_image = np.mean(train_images, axis=0)
file_path = "aligned_eig_faces.npy"
if os.path.exists(file_path):
    all_eig_faces = np.load(file_path)
else:
    u, s, v = np.linalg.svd((train_images - mean_image).T)
    all_eig_faces = u
    np.save(file_path, all_eig_faces)
all_eig_faces = all_eig_faces.T
eig_faces = all_eig_faces[:50, ]
train_img_v.shape,test_img_v.shape,train_lm.shape,test_lm.shape

"""**Fisher Face Clasification with Geometry and Appearance together**"""

# PCA on Landmarks
mean_lm = train_lm.mean(axis=0)
train_lm = train_lm - mean_lm
principles_lm,e_value_lm = pca(train_lm)
trainlm_projection = np.dot(train_lm,principles_lm)[:,:10]
trainlm_projection.shape

# PCA on Images
for i in range(800):
  img = np.zeros([128,128,3])
  img[:,:,2] = np.reshape(train_img_v[i],(128,128))
  img = warp(img,np.reshape((train_lm+mean_lm)[i,:],(68,2)),mean_lm.reshape(68,2))
  train_img_v[i] = img[:,:,2].reshape(1,-1)

mean_img = train_img_v.mean(axis=0)
train_img_v = train_img_v-mean_img
principles_img,e_value_img = pca(train_img_v)
trainImg_projection = np.dot(train_img_v,principles_img)[:,:50]
trainlm_projection.shape,trainImg_projection.shape

## Fisher Face for Landmarks
mu_1 = trainlm_projection[:330,:].mean(axis=0)
mu_2 = trainlm_projection[330:800,:].mean(axis=0)
mu = trainlm_projection.mean(axis=0)

Sw = np.dot((trainlm_projection[:330,:]-mu_1).T,(trainlm_projection[:330,:]-mu_1))+np.dot((trainlm_projection[330:800,:]-mu_2).T,(trainlm_projection[330:800,:]-mu_2))
Sb = np.matmul((mu_1-mu).reshape((10,1)),(mu_1-mu).reshape((1,10)))+np.matmul((mu_2-mu).reshape((10,1)),(mu_2-mu).reshape((1,10)))
all_male_landmarks = ld_landmarks("male_landmarks")
all_male_landmarks = all_male_landmarks.reshape(412, 68 * 2)

all_female_landmarks = ld_landmarks("female_landmarks")
all_female_landmarks = all_female_landmarks.reshape(588, 68 * 2)

all_landmarks = np.append(all_male_landmarks, all_female_landmarks, axis=0)
mean_landmarks = np.mean(all_landmarks, axis=0)

# Remove this part of the code if you want non-warped images for the first question. Feed non-warped images into all_images instead

warped_male_images = []
for i in range(len(all_male_images)):
    warped_male_images.append(
        mywarper.warp(all_male_images[i].reshape(128, 128, 1),
                      all_male_landmarks[i].reshape(68, 2),
                      mean_landmarks.reshape(68, 2)))
warped_male_images = np.array(warped_male_images)
warped_male_images = warped_male_images.reshape(412, 128 * 128)

warped_female_images = []
for i in range(len(all_female_images)):
    warped_female_images.append(
        mywarper.warp(all_female_images[i].reshape(128, 128, 1),
                      all_female_landmarks[i].reshape(68, 2),
                      mean_landmarks.reshape(68, 2)))
warped_female_images = np.array(warped_female_images)
warped_female_images = warped_female_images.reshape(588, 128 * 128)

###################################################################################################################################
def warp_imgs_to_recon_pos(imgs, landmark_mean, recon_test_landmarks):
    warped_imgs = []
    for i, img in enumerate(imgs):
        img = np.expand_dims(img, axis=2)
        warped_imgs.append(warp(img, landmark_mean, recon_test_landmarks[i]))
    return np.squeeze(warped_imgs)
def warp_imgs_to_mean(imgs, landmarks, landmark_mean):
    warped_imgs = []
    for i, img in enumerate(imgs):
        img = np.expand_dims(img, axis=2)
        warped_imgs.append(warp(img, landmarks[i], landmark_mean))
    return np.squeeze(warped_imgs)
Beispiel #10
0
plt.show()

err_lm = np.zeros(200)
for i in range(200):
    _, err_lm[i] = reconst_face(test_lm[i,:],principles_lm,50,mean_lm)

## 3

from mywarper import warp
from mywarper import plot

# Assigne the training and testing images to the mean landmark
train_lm_img = []
test_lm_img = []
for i in range(800):
    train_lm_img.append(warp(img_train[i],lm_train[i],mean_lm_pl))
for i in range(200):
    test_lm_img.append(warp(img_test[i],lm_test[i],mean_lm_pl))

train_v_lm = np.zeros((800,128*128))
test_v_lm = np.zeros((200,128*128))
for i in range(800):
    v = train_lm_img[i]
    train_v_lm[i,:] = np.reshape(v[:,:,2],(1,-1))
for i in range(200):
    v = test_lm_img[i]
    test_v_lm[i,:] = np.reshape(v[:,:,2],(1,-1))

# Do the PCA on the training images V channel
mean_train_lm = train_v_lm.mean(axis=0)
train_v_lm = train_v_lm - mean_train_lm
USE_GPU = True
device = select_device(use_gpu=USE_GPU)
  
all_images_rgb = ld_images_drive("drive/My Drive/Project_1/images")
all_images = []
for i in range(len(all_images_rgb)):
    all_images.append(cv2.cvtColor(all_images_rgb[i], cv2.COLOR_BGR2RGB))
all_landmarks = ld_landmarks_drive("drive/My Drive/Project_1/landmarks")
all_landmarks = all_landmarks.reshape(1000,136)
train_landmarks = all_landmarks[:800,]
mean_landmarks = np.mean(train_landmarks, axis=0)

warp_all_images = []
for i in range(len(all_images)):
    warp_all_images.append(mywarper.warp(all_images[i].reshape(128,128,3),all_landmarks[i].reshape(68,2),mean_landmarks.reshape(68,2)))
warp_all_images = np.array(warp_all_images)
warp_all_images = warp_all_images / 255
train_images = warp_all_images[:800,]
test_images = warp_all_images[800:,]
all_landmarks = all_landmarks / 128
train_landmarks = all_landmarks[:800,]
test_landmarks = all_landmarks[800:,]

learning_rate = 7e-4
num_steps = 300
batch_size = 100
disp_step = 10

tf.reset_default_graph()
Beispiel #12
0
        recon = self.landmark_model.given_latent(fc)
        self.landmark_fc_recon.append(recon.data.cpu().numpy())

face_images_reader = data_reader(args.image_dir, 6, '000000', '.jpg')
face_landmark_reader = data_reader(args.landmark_dir, 6, '000000', '.mat')
face_images_train, face_images_test = face_images_reader.read(split=800,read_type='image')
face_landmark_train, face_landmark_test = face_landmark_reader.read(split=800,read_type='landmark')

face_landmark_train_copy = face_landmark_train
mean_lm_train=np.asarray(face_landmark_train_copy).reshape((800,-1)).mean(axis=0)

warp_face_images_train = []
warp_face_images_test = []

for i in range(800):
  warp_face_images_train.append(warp(face_images_train[i],face_landmark_train[i],mean_lm_train.reshape(68,2)))

for i in range(200):
  warp_face_images_test.append(warp(face_images_test[i],face_landmark_test[i],mean_lm_train.reshape(68,2)))

face_trainset = dataset_constructor(warp_face_images_train, transform=transforms.Compose([ImgToTensor()]))
face_testset = dataset_constructor(warp_face_images_test, transform=transforms.Compose([ImgToTensor()]))
face_trainloader = torch.utils.data.DataLoader(face_trainset,batch_size=args.batch_size,shuffle=True,num_workers=0)
face_testloader = torch.utils.data.DataLoader(face_testset,batch_size=args.batch_size,shuffle=False,num_workers=0)

landmark_trainset = dataset_constructor(face_landmark_train, transform=transforms.Compose([LandmarkToTensor()]))
landmark_testset = dataset_constructor(face_landmark_test, transform=transforms.Compose([LandmarkToTensor()]))
landmark_trainloader = torch.utils.data.DataLoader(landmark_trainset,batch_size=args.batch_size,shuffle=True,num_workers=0)
landmark_testloader = torch.utils.data.DataLoader(landmark_testset,batch_size=args.batch_size,shuffle=False,num_workers=0)
a = autoencoder(args.appear_lr,args.landmark_lr,args.cuda)
Beispiel #13
0
def warp_imgs_to_mean(imgs, landmarks, landmark_mean):
    return [
        warp(img, landmarks[i], landmark_mean) for i, img in enumerate(imgs)
    ]