def align_image(filename): import os import sys import bz2 from keras.utils import get_file from ffhq_dataset.face_alignment import image_align from ffhq_dataset.landmarks_detector import LandmarksDetector LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' def unpack_bz2(src_path): data = bz2.BZ2File(src_path).read() dst_path = src_path[:-4] with open(dst_path, 'wb') as fp: fp.write(data) return dst_path landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_detector = LandmarksDetector(landmarks_model_path) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(filename), start=1): face_img_name = '%s.png' % os.path.splitext(filename)[0] aligned_face_path = face_img_name image_align(filename, aligned_face_path, face_landmarks)
def align(images): landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = './raw_images' ALIGNED_IMAGES_DIR = './aligned_images' landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): print('Aligning %s ...' % img_name) try: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1) if os.path.isfile(fn): continue print('Getting landmarks...') for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): try: print('Starting face alignment...') face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=1024, x_scale=1, y_scale=1, em_scale=0.1, alpha=False) print('Wrote result %s' % aligned_face_path) except: print("Exception in face alignment!") except: print("Exception in landmark detection!")
def main(arg1, arg2): """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ #landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', # LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_model_path = unpack_bz2(LANDMARKS_LOCAL_FILE) RAW_IMAGES_DIR = arg1 ALIGNED_IMAGES_DIR = arg2 landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): if not img_name.startswith('.'): raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) print(raw_img_path) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks) print(aligned_face_path)
def align_module(R_images, A_images, raw_filename): #R_images는 directory("static/image") #raw_filename은 파일 이름 """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ #print(K.tensorflow_backend._get_available_gpus()) landmarks_model_path="./shape_predictor_68_face_landmarks.dat" RAW_IMAGES_DIR = './static/' + R_images ALIGNED_IMAGES_DIR = './static/' + A_images print(RAW_IMAGES_DIR) print(ALIGNED_IMAGES_DIR) landmarks_detector = LandmarksDetector(landmarks_model_path) print("align module is running in GAN!") # for img_name in os.listdir(RAW_IMAGES_DIR): # raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) raw_img_path = os.path.join(RAW_IMAGES_DIR, raw_filename) print("raw_img_path = "+raw_img_path) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): #face_img_name = '%s.png' % (os.path.splitext(raw_filename)[0]) face_img_name = raw_filename print("test face img name is " + face_img_name) #face image name으로 경로를 가르쳐줌 aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) print("align module is running in GAN!4") image_align(raw_img_path, aligned_face_path, face_landmarks) print("align module is finished in GAN!")
def main(args): """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ # landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', # LANDMARKS_MODEL_URL, cache_subdir='temp')) # print('called align_images with args', args) # RAW_IMAGES_DIR = args[0] # ALIGNED_IMAGES_DIR = args[1] # print(RAW_IMAGES_DIR) # print(ALIGNED_IMAGES_DIR) landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in [ x for x in os.listdir(RAW_IMAGES_DIR) if x[0] not in '._' ]: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) image_align(raw_img_path, aligned_face_path, face_landmarks)
def pic_cut(path1, path2): landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_detector = LandmarksDetector(landmarks_model_path) face_landmarks = landmarks_detector.get_landmarks(path1) image_align(path1, path2, face_landmarks)
def align(self, raw_img_dir, aligned_img_dir): RAW_IMAGES_DIR = raw_img_dir ALIGNED_IMAGES_DIR = aligned_img_dir landmarks_detector = LandmarksDetector(self.landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks)
def processimage(img_name,paths = [sys.argv[1],sys.argv[2]] ): LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' landmarks_model_path = 'C:\\Users\\spiorf\\.keras\\temp\\shape_predictor_68_face_landmarks.dat' #unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_detector = LandmarksDetector(landmarks_model_path) raw_img_path = os.path.join(paths[0], img_name) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) print(face_img_name) aligned_face_path = os.path.join(paths[1], face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks) print(face_img_name + "ok") return img_name
def align(self): landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = self.raw_dir ALIGNED_IMAGES_DIR = self.src_dir landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in [f for f in os.listdir(RAW_IMAGES_DIR) if f[0] not in '._']: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) image_align(raw_img_path, aligned_face_path, face_landmarks)
def extract(img_name): try: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) n = 0 for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks) n += 1 print("processed " + img_name + " produced " + str(n) + " aligned images from it") except Exception as e: print(img_name + "failed, too bad") print(e)
def align_images(raw_images_dir: str, aligned_images_dir: str): landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in [ f for f in os.listdir(raw_images_dir) if f[0] not in '._' ]: raw_img_path = os.path.join(raw_images_dir, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(aligned_images_dir, face_img_name) os.makedirs(aligned_images_dir, exist_ok=True) image_align(raw_img_path, aligned_face_path, face_landmarks)
def align_images(raw_dir, aligned_dir): landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = raw_dir ALIGNED_IMAGES_DIR = aligned_dir landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks)
def alignfaces(): u64rawfaces = request.args.get('u64rawfaces') if len(u64rawfaces) > 16 * 1024 * 1024: abort(413) retval = None with tempfile.NamedTemporaryFile(dir=".") as raw_input: PIL.Image.open(io.BytesIO(base64.urlsafe_b64decode(u64rawfaces))).convert("RGB").save(raw_input.name, 'jpeg', quality=90) face_landmarks = [x for x in landmarks_detector.get_landmarks(raw_input.name)] faces = [io.BytesIO() for f in face_landmarks] for i, facepng in enumerate(faces): image_align(raw_input.name, facepng, face_landmarks[i]) facepng.seek(0) webp = io.BytesIO() PIL.Image.open(facepng).save(webp, "webp") webp.seek(0) faces[i] = webp.getvalue() faces.sort(key=len, reverse=True) retval = b"\n".join([base64.urlsafe_b64encode(f) for f in faces]) return retval
def align2(img): """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = sys.argv[1] ALIGNED_IMAGES_DIR = sys.argv[2] landmarks_detector = LandmarksDetector(landmarks_model_path) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(img), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) image_align(raw_img_path, aligned_face_path, face_landmarks)
def align_images(img_name, landmarks_detector): print('Aligning %s ...' % img_name) try: aligned_face_path = None print('Getting landmarks...') raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1) if os.path.isfile(fn): return None print(f'raw_img_path: {raw_img_path}') landmarks = list(landmarks_detector.get_landmarks(raw_img_path)) if len(landmarks) == 0: print("No landmarks detected..... \nExiting.....") return None for i, face_landmarks in enumerate(landmarks, start=1): print("me") try: face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args['model_res'], x_scale=1, y_scale=1, em_scale=0.1, alpha=False) print('Wrote result %s' % aligned_face_path) except: print("Exception in face alignment!") print(f'aligned_face_path: {os.listdir(ALIGNED_IMAGES_DIR)}') print(f'raw_face_path: {os.listdir(RAW_IMAGES_DIR)}') return aligned_face_path except Exception as inst: print(type(inst)) # the exception instance print(inst.args) # arguments stored in .args print(inst) print("Exception in landmark detection!")
def main(): ''' Detect face(s), extract landmarks, and align face(s) from the given raw image(s) parameters: None output: Aligned face image at 1024x1024 resolution ''' parser = argparse.ArgumentParser( description='Extract and align faces from the raw images', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('src_dir', help='Source directory with raw images') parser.add_argument( 'dest_dir', help='Destination directory for storing aligned images') args, other_args = parser.parse_known_args() # Specify paths to the saved 68-Facial Landmark model, raw images directory, and aligned images directory landmarks_model_path = '../cache/shape_predictor_68_face_landmarks.dat' RAW_IMAGES_DIR = os.path.join('../', args.src_dir) ALIGNED_IMAGES_DIR = os.path.join('../', args.dest_dir) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) # Try detecting faces and extracting landmarks; throw an error, otherwise landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): print('Aligning %s ...' % img_name) try: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) print('Getting landmarks...') face_landmarks = landmarks_detector.get_landmarks(raw_img_path) try: print('Starting face alignment...') aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, img_name) image_align(raw_img_path, aligned_face_path, face_landmarks) print('Wrote result at %s' % aligned_face_path) except: print("\n\tException in face alignment!\n") except ValueError: print( "\n\tEither failed to detect landmarks or NO faces present!\n")
def run(LANDMARKS_MODEL_URL): landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = "./pytorch_stylegan_encoder/raw_images" ALIGNED_IMAGES_DIR = "./pytorch_stylegan_encoder/aligned_images" faces = [] landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) faces.append(face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks) return faces #if __name__ == "__main__": """
def align(): """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ print("align!") landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = "../media/photos/" ALIGNED_IMAGES_DIR = "../media/aligned_photos/" landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks)
import bz2 from tensorflow.keras.utils import get_file from ffhq_dataset.landmarks_detector import LandmarksDetector from ffhq_dataset.face_alignment import image_align LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' def unpack_bz2(src_path): data = bz2.BZ2File(src_path).read() dst_path = src_path[:-4] with open(dst_path, 'wb') as fp: fp.write(data) return dst_path if __name__ == "__main__": print('Start') # landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', # LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_detector = LandmarksDetector('/Users/michaelko/Code/coreml-playground/lib/shape_predictor_68_face_landmarks.dat') face_landmarks = landmarks_detector.get_landmarks('/Users/michaelko/Downloads/miya.png') for i, face_landmarks in enumerate(landmarks_detector.get_landmarks('/Users/michaelko/Downloads/miya.png'), start=1): image_align('/Users/michaelko/Downloads/miya.png', '/Users/michaelko/Downloads/miya1.png', face_landmarks)
with open(dst_path, 'wb') as fp: fp.write(data) return dst_path if __name__ == "__main__": """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = sys.argv[1] ALIGNED_IMAGES_DIR = sys.argv[2] landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in [ f for f in os.listdir(RAW_IMAGES_DIR) if f[0] not in '._' ]: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) img = image_align(raw_img_path, face_landmarks) img.save(aligned_face_path)
return dst_path if __name__ == "__main__": """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = sys.argv[1] ALIGNED_IMAGES_DIR = sys.argv[2] landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in [ f for f in os.listdir(RAW_IMAGES_DIR) if f[0] not in '._' ]: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=1024)
from PIL import Image import numpy as np LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' def unpack_bz2(src_path): data = bz2.BZ2File(src_path).read() dst_path = src_path[:-4] with open(dst_path, 'wb') as fp: fp.write(data) return dst_path if __name__ == '__main__': img_path = sys.argv[1] file_path = os.path.splitext(img_path)[0] landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) landmarks_detector = LandmarksDetector(landmarks_model_path) face_img_path = file_path + '_face.png' for i, face_landmarks in (enumerate( landmarks_detector.get_landmarks(img_path), start=1)): image_align(img_path, face_img_path, face_landmarks)
def align(args, other_args, path_A, path_B): start_ = time.time() """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = [path_A, path_B] ALIGNED_IMAGES_DIR = args.aligned_dir os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in list(RAW_IMAGES_DIR): print('Aligning %s ...' % img_name) try: if args.find_faces: print('Getting landmarks...') landmarks = list(landmarks_detector.get_landmarks(img_name)) assert len(landmarks) == 1 else: landmarks = [[(89, 230), (90, 258), (91, 287), (93, 317), (104, 344), (122, 368), (144, 387), (171, 406), (203, 414), (236, 409), (262, 392), (284, 370), (302, 345), (310, 317), (312, 289), (312, 260), (311, 233), (114, 214), (129, 199), (149, 192), (170, 193), (190, 202), (228, 201), (248, 192), (268, 190), (287, 196), (299, 210), (210, 222), (211, 241), (212, 260), (212, 280), (184, 290), (197, 294), (211, 300), (225, 294), (238, 288), (144, 227), (155, 223), (167, 222), (179, 228), (167, 232), (154, 231), (241, 227), (251, 222), (264, 221), (275, 226), (265, 230), (252, 230), (153, 323), (174, 321), (194, 320), (211, 323), (226, 319), (243, 320), (261, 323), (244, 344), (227, 350), (211, 352), (194, 350), (173, 343), (159, 324), (195, 326), (211, 327), (226, 326), (255, 324), (226, 340), (211, 342), (194, 341)]] for i, face_landmarks in enumerate(landmarks, start=1): try: face_img_name = '%s.png' % (os.path.basename( os.path.splitext(img_name)[0])) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) print('Starting face alignment...') image_align(img_name, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha, find_faces=args.find_faces) print('Wrote result %s' % aligned_face_path) except Exception as e: print("Exception in face alignment!", str(e)) except Exception as e: print("Exception in landmark detection!", str(e)) end_ = time.time() logging.info( 'The time it takes for the face recognition clipping : %.2f s' % (end_ - start_))
def main(): parser = argparse.ArgumentParser(description='Project real-world images into StyleGAN2 latent space') parser.add_argument('src_dir', help='Directory with aligned images for projection') parser.add_argument('dst_dir', help='Output directory') parser.add_argument('--tmp-dir', default='.stylegan2-tmp', help='Temporary directory for tfrecords and video frames') parser.add_argument('--network-pkl', default='http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl', help='StyleGAN2 network pickle filename') parser.add_argument('--vgg16-pkl', default='vgg16_zhang_perceptual.pkl', help='VGG16 network pickle filename') parser.add_argument('--num-steps', type=int, default=1000, help='Number of optimization steps') parser.add_argument('--initial-learning-rate', type=float, default=0.1, help='Initial learning rate') parser.add_argument('--initial-noise-factor', type=float, default=0.05, help='Initial noise factor') parser.add_argument('--verbose', type=bool, default=False, help='Verbose output') parser.add_argument('--video', type=bool, default=False, help='Render video of the optimization process') parser.add_argument('--video-mode', type=int, default=1, help='Video mode: 1 for optimization only, 2 for source + optimization') parser.add_argument('--video-size', type=int, default=1024, help='Video size (height in px)') parser.add_argument('--video-fps', type=int, default=25, help='Video framerate') parser.add_argument('--video-codec', default='libx264', help='Video codec') parser.add_argument('--video-bitrate', default='5M', help='Video bitrate') args = parser.parse_args() print('1. Align images') """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = 'raw' ALIGNED_IMAGES_DIR = 'aligned' landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in [x for x in os.listdir(RAW_IMAGES_DIR) if x[0] not in '._']: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True) image_align(raw_img_path, aligned_face_path, face_landmarks) print('Loading networks from "%s"...' % args.network_pkl) print('2. Project images') _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl) proj = projector.Projector( vgg16_pkl = args.vgg16_pkl, num_steps = args.num_steps, initial_learning_rate = args.initial_learning_rate, initial_noise_factor = args.initial_noise_factor, verbose = args.verbose ) proj.set_network(Gs) src_files = sorted([os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir) if f[0] not in '._']) for src_file in src_files: project_image(proj, src_file, args.dst_dir, args.tmp_dir, video=args.video) if args.video: render_video( src_file, args.dst_dir, args.tmp_dir, args.num_steps, args.video_mode, args.video_size, args.video_fps, args.video_codec, args.video_bitrate ) shutil.rmtree(args.tmp_dir) latent_dir = Path("generated") latents = latent_dir.glob("*.npy") print('3. Blend networks') # blended_url = 'ffhq-cartoon-blended.pkl' # blended_url ="AlfredENeuman24_ADA-VersatileFaces36_ADA_v2-blended-64.pkl" # ffhq_url = "stylegan2-ffhq-config-f.pkl" _, _, Gs_blended = pretrained_networks.load_networks(blended_url) #_, _, Gs = pretrained_networks.load_networks(ffhq_url) for latent_file in latents: print("latent_file:",latent_file) latent = np.load(latent_file) latent = np.expand_dims(latent,axis=0) synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8) images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs) file_name = latent_file.parent / (f"{latent_file.stem}-toon.jpg") Image.fromarray(images.transpose((0,2,3,1))[0], 'RGB').save(file_name) img = PIL.Image.open(file_name) imgcat(img)
#LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' def unpack_bz2(src_path): data = bz2.BZ2File(src_path).read() dst_path = src_path[:-4] with open(dst_path, 'wb') as fp: fp.write(data) return dst_path if __name__ == "__main__": """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ landmarks_model_path = unpack_bz2('./shape_predictor_68_face_landmarks.dat.bz2') RAW_IMAGES_DIR = sys.argv[1] ALIGNED_IMAGES_DIR = sys.argv[2] landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks)
aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) img.save(aligned_face_path, 'PNG') print('Wrote result %s' % aligned_face_path) else: for i, face_landmarks in enumerate(ld, start=1): try: print('Starting face alignment...') face_img_name = '%s_%02d.png' % ( os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join( ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha) print('Wrote result %s' % aligned_face_path) break #only use first face found! except: print("Exception in face alignment!") except: print("Exception in landmark detection!") #release memory del landmarks_detector gc.collect() ref_images = None if args.use_aligned == 1:
def main(raw_dir, aligned_dir): """ Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images """ parser = argparse.ArgumentParser( description='Align faces from input images', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--output_size', default=1024, help='The dimension of images for input to the model', type=int) parser.add_argument('--x_scale', default=1, help='Scaling factor for x dimension', type=float) parser.add_argument('--y_scale', default=1, help='Scaling factor for y dimension', type=float) parser.add_argument('--em_scale', default=0.1, help='Scaling factor for eye-mouth distance', type=float) parser.add_argument('--use_alpha', default=False, help='Add an alpha channel for masking', type=bool) args, other_args = parser.parse_known_args() landmarks_model_path = unpack_bz2( get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = raw_dir ALIGNED_IMAGES_DIR = aligned_dir landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): print('Aligning %s ...' % img_name) try: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) fn = face_img_name = '%s_%02d.png' % ( os.path.splitext(img_name)[0], 1) if os.path.isfile(fn): continue print('Getting landmarks...') for i, face_landmarks in enumerate( landmarks_detector.get_landmarks(raw_img_path), start=1): try: print('Starting face alignment...') face_img_name = '%s_%02d.png' % ( os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha) print('Wrote result %s' % aligned_face_path) except: print("Exception in face alignment!") except: print("Exception in landmark detection!")