def meta_train(datapath=None, epochs=10, size=0): if datapath is None: return files= [os.path.join(path, filename) for path, dirs, files in os.walk(datapath) for filename in files if filename.endswith('.vid') ] if not size==0: size = min(len(files), size) else: size = min(len(files)) files = files[:size] frames_p = [] x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name='x_t') y = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name='y_t') i_p = tf.placeholder(tf.float32, [1]) for i in range(K): frames_p.append([tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name='x'+str(i)), tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name='y'+str(i))]) embedder = Embedder() generator = Generator() discriminator = Discriminator(size) e_h = tf.reduce_mean(tf.concat([embedder(frames_p[i][0], frames_p[i][1]) for i in range(K)], axis=0), axis=0, keepdims=True) g = generator(y, e_h) r, d = discriminator(x, y, e_h, i_p) r_hat, d_hat = discriminator(g, y, e_h, i) loss_ge = lossGenerator(x, g, r_hat, e, discriminator.get_W(), d, d_hat, i) loss_d = lossDiscriminator(r, r_hat) GE_opt = tf.train.AdamOptimizer(loss_ge, var_list = generator.var_list()+embedder.var_list()) D_opt = tf.train.AdamOptimizer(loss_d, var_list = discriminator.var_list()) sess = tf.Session() sess.run(tf.global_variables_initializer()) for file in files: dct = pickle.load(open(file, 'rb')) for i in range(8): dct[i]['landmark'] = plot_landmarks(dct[i]['frame'], dct[i]['landmark']) dct[i]['landmark'] = normalize(dct[i]['landmark']) dct[i]['frame'] = normalize(dct[i]['frame'])
""" import pandas as pd import numpy as np import matplotlib.pyplot as plt from utils import get_landmark_matrix, get_procrustes, plot_landmarks plt.close('all') df_data = pd.read_csv('../data/landmark_dataset.csv') ls_coord_all = [] for i in range(len(df_data)): print('Processing face', i, '/', len(df_data), ' [', round((100 * i) / len(df_data), 2), '%]') ls_coord = list(df_data.iloc[i, 6:].values) landmarks = get_landmark_matrix(ls_coord) landmarks_standard = get_procrustes(landmarks) ls_coord_all.append( list(landmarks_standard[:, 0]) + list(landmarks_standard[:, 1])) coord_all = np.array(ls_coord_all) coord_all_mean = np.mean(coord_all, axis=0) landmarks_mean_face = get_landmark_matrix(coord_all_mean) plot_landmarks(landmarks_mean_face, axis=None, title='Mean face') np.save('../data/landmarks_mean_face.npy', landmarks_mean_face, allow_pickle=True, fix_imports=True)
while (f < 300): # for 300 frames print('frame', f) if cv2.waitKey(1) & 0xFF == ord('q'): break ret, frame = cap.read() # capture a frame image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = detector(image) # detect faces for face in faces: landmarks_raw = predictor(image, face) # detect landmarks landmarks = get_landmark_array(landmarks_raw) landmarks_frontal = frontalize_landmarks(landmarks, frontalization_weights) if landmarks is not None: axes[0].clear() plot_landmarks(landmarks, axis=axes[0], title='Original') axes[1].clear() plot_landmarks(landmarks_frontal, axis=axes[1], title='Frontalized') plt.pause(0.001) f += 1 # When everything done, release resources cap.release() cv2.destroyAllWindows()
landmarks_frontal = frontalize_landmarks(landmarks, frontalization_weights) if landmarks is not None: # initialize new image fig = plt.figure(figsize=(7, 3)) plt.subplot(1, 3, 1) plt.title('Detected face') x1 = landmarks_raw.rect.left() y1 = landmarks_raw.rect.top() x2 = x1 + landmarks_raw.rect.width() y2 = y1 + landmarks_raw.rect.height() plt.imshow(image[y1:y2, x1:x2, :]) plt.axis(False) plt.subplot(1, 3, 2) plt.title('Original landmarks') plt.subplot(1, 3, 3) plt.title('Frontalized landmarks') plt.suptitle('Face ' + str(i + 1)) plt.tight_layout() axes = fig.get_axes() plot_landmarks(landmarks, axis=axes[1]) plot_landmarks(landmarks_frontal, axis=axes[2]) plt.show()
def main(): source_path = None # Source path target_path = None # Target path source_landmarks_path = None # Source landmarks path target_landmarks_path = None # Target landmarks path source_landmarks = utils.load_landmarks(source_landmarks_path) target_landmarks = utils.load_landmarks(target_landmarks_path) source = utils.load_image(source_path) target = utils.load_image(target_path) p_source, p_target, ia_source, ng_source, nr_source, i_u_x, i_u_y, u_x_nr, u_y_nr, warp_resampled_landmarks, warp_original_landmarks, return_dict = am.anhir_method( target, source) transformed_source_landmarks = warp_original_landmarks(source_landmarks) resampled_source_landmarks, transformed_resampled_source_landmarks, resampled_target_landmarks = warp_resampled_landmarks( source_landmarks, target_landmarks) y_size, x_size = np.shape(target) print("Initial original rTRE: ") utils.print_rtre(source_landmarks, target_landmarks, x_size, y_size) print("Transformed original rTRE: ") utils.print_rtre(transformed_source_landmarks, target_landmarks, x_size, y_size) y_size, x_size = np.shape(p_target) print("Initial resampled rTRE: ") utils.print_rtre(resampled_source_landmarks, resampled_target_landmarks, x_size, y_size) print("Transformed resampled rTRE: ") utils.print_rtre(transformed_resampled_source_landmarks, resampled_target_landmarks, x_size, y_size) print(return_dict) plt.figure() plt.subplot(1, 2, 1) plt.imshow(source, cmap='gray') colors = utils.plot_landmarks(source_landmarks, "*") plt.axis('off') plt.subplot(1, 2, 2) plt.imshow(target, cmap='gray') utils.plot_landmarks(target_landmarks, "*", colors) utils.plot_landmarks(transformed_source_landmarks, ".", colors) plt.axis('off') plt.figure() plt.subplot(1, 3, 1) plt.imshow(p_target, cmap='gray') colors = utils.plot_landmarks(resampled_source_landmarks, "*") plt.axis('off') plt.subplot(1, 3, 2) plt.imshow(p_source, cmap='gray') utils.plot_landmarks(resampled_target_landmarks, "*", colors) utils.plot_landmarks(transformed_resampled_source_landmarks, ".", colors) plt.axis('off') plt.subplot(1, 3, 3) plt.imshow(nr_source, cmap='gray') plt.axis('off') plt.show()
landmarks_pose_flipped = mirror_landmarks(landmarks_pose) ls_matrix_A.append( list(landmarks_pose_flipped[:, 0]) + list(landmarks_pose_flipped[:, 1])) ls_matrix_Y.append( list(landmarks_frontal_flipped[:, 0]) + list(landmarks_frontal_flipped[:, 1])) # prepare matrices matrix_A = np.array(ls_matrix_A) matrix_Y = np.array(ls_matrix_Y) interception = np.ones((len(matrix_A), 1)) matrix_A_1 = np.hstack((matrix_A, interception)) # adding interception # Least Squares fit ls_fit = np.linalg.lstsq(a=matrix_A_1, b=matrix_Y, rcond=None) #TODO: implement ridge regression (did not want to use SKlearn as dependency!) frontalization_weights = ls_fit[0] np.save('../data/frontalization_weights.npy', frontalization_weights, allow_pickle=True, fix_imports=True) # simple test on a ramdom face from the dataset to show the frontalization ls_coord = list(df_data.iloc[30010, 6:].values) landmarks = get_landmark_matrix(ls_coord) plot_landmarks(landmarks, axis=None, title='Original') landmarks_frontal = frontalize_landmarks(ls_coord, frontalization_weights) plot_landmarks(landmarks_frontal, axis=None, title='Frontalized')