def crop_img_by_lmk( lmk2D_txt_path, lmk3D_txt_path, lmk2D_crop_txt_path, lmk3D_crop_txt_path, img_dir, out_crop_dir, orig=False, ): if os.path.exists(out_crop_dir) is False: os.makedirs(out_crop_dir) fopen2D = open(lmk2D_crop_txt_path, "w") fopen3D = open(lmk3D_crop_txt_path, "w") landmarks2D, images_name = load_landmark(lmk2D_txt_path, 68) landmarks3D, images_name = load_landmark(lmk3D_txt_path, 86) count = 0 for i in range(0, len(images_name)): img_name = images_name[i] img = cv2.imread(os.path.join(img_dir, img_name)) ( crop_img, prediction3D, prediction2D, ori_crop_img, ) = crop_image_and_process_landmark(img, landmarks3D[i], landmarks2D[i], size=300, orig=orig) write_lmk(img_name, np.reshape(prediction3D, [86, 2]), fopen3D) write_lmk(img_name, np.reshape(prediction2D, [68, 2]), fopen2D) cv2.imwrite(os.path.join(out_crop_dir, img_name), crop_img) if orig: cv2.imwrite( os.path.join(out_crop_dir, img_name[:-4] + "_ori" + img_name[-4:]), ori_crop_img, ) count = count + 1 if (count % 100 == 0) | (count == len(images_name)): print("has run crop_img_by_lmk: " + str(count) + " / " + str(len(images_name))) fopen2D.close() fopen3D.close() return
def detect_2Dlmk_all_imgs(graph_file, img_dir, lmk3D_txt_path, lmk2D_txt_path): with tf.Graph().as_default(): graph_def = tf.GraphDef() graph_file = graph_file with open(graph_file, "rb") as f: print("hello") graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name="") with tf.Session() as sess: tf.initialize_all_variables().run() fopen = open(lmk2D_txt_path, "w") landmarks3D, images_name = load_landmark(lmk3D_txt_path, 86) count = 0 for i in range(0, len(images_name)): img_name = images_name[i] img = cv2.imread(os.path.join(img_dir, img_name)) lmk3D = landmarks3D[i] LMK2D_batch = detect_2D_landmark.detect_2Dlmk68( np.array([lmk3D]), np.array([img]), sess) write_lmk(img_name, np.reshape(LMK2D_batch[0], [68, 2]), fopen) count = count + 1 if (count % 100 == 0) | (count == len(images_name)): print("has run 68pt lmk: " + str(count) + " / " + str(len(images_name))) fopen.close() return LMK2D_batch
def face_seg(graph_file, lmk3D_crop_txt_path, out_crop_dir, seg_dir): if os.path.exists(seg_dir) is False: os.makedirs(seg_dir) landmarks3D, images_name = load_landmark(lmk3D_crop_txt_path, 86) with tf.Graph().as_default(): graph_def = tf.GraphDef() graph_file = graph_file with open(graph_file, "rb") as f: print("hello") graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name="") with tf.Session() as sess: tf.initialize_all_variables().run() count = 0 for i in range(0, len(images_name)): img_name = images_name[i] crop_img = cv2.imread(os.path.join(out_crop_dir, img_name)) lmk3D = landmarks3D[i] SEG_batch, SEG_color_batch = face_segmentation.run_face_seg( np.array([lmk3D]), np.array([crop_img]), sess ) np.save(os.path.join(seg_dir, img_name[:-3] + "npy"), SEG_batch[0]) cv2.imwrite( os.path.join(seg_dir, img_name[:-4] + "_seg.jpg"), SEG_color_batch[0], ) count = count + 1 if (count % 100 == 0) | (count == len(images_name)): print( "has run face_seg: " + str(count) + " / " + str(len(images_name)) ) return