Beispiel #1
0
def align_image(filename):
    import os
    import sys
    import bz2
    from keras.utils import get_file
    from ffhq_dataset.face_alignment import image_align
    from ffhq_dataset.landmarks_detector import LandmarksDetector

    LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'

    def unpack_bz2(src_path):
        data = bz2.BZ2File(src_path).read()
        dst_path = src_path[:-4]
        with open(dst_path, 'wb') as fp:
            fp.write(data)
        return dst_path

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for i, face_landmarks in enumerate(
            landmarks_detector.get_landmarks(filename), start=1):
        face_img_name = '%s.png' % os.path.splitext(filename)[0]
        aligned_face_path = face_img_name
        image_align(filename, aligned_face_path, face_landmarks)
Beispiel #2
0
def main(arg1, arg2):
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """

    #landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
    #                                           LANDMARKS_MODEL_URL, cache_subdir='temp'))

    landmarks_model_path = unpack_bz2(LANDMARKS_LOCAL_FILE)

    RAW_IMAGES_DIR = arg1
    ALIGNED_IMAGES_DIR = arg2

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        if not img_name.startswith('.'):
            raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
            print(raw_img_path)
            for i, face_landmarks in enumerate(
                    landmarks_detector.get_landmarks(raw_img_path), start=1):
                face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0],
                                                 i)
                aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR,
                                                 face_img_name)
                image_align(raw_img_path, aligned_face_path, face_landmarks)
                print(aligned_face_path)
Beispiel #3
0
def align_module(R_images, A_images, raw_filename):
    #R_images는 directory("static/image")
    #raw_filename은 파일 이름

    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """
    #print(K.tensorflow_backend._get_available_gpus())

    landmarks_model_path="./shape_predictor_68_face_landmarks.dat"
    RAW_IMAGES_DIR = './static/' + R_images
    ALIGNED_IMAGES_DIR = './static/' + A_images

    print(RAW_IMAGES_DIR)
    print(ALIGNED_IMAGES_DIR)
    landmarks_detector = LandmarksDetector(landmarks_model_path)
    print("align module is running in GAN!")
    # for img_name in os.listdir(RAW_IMAGES_DIR):
    #     raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
    raw_img_path = os.path.join(RAW_IMAGES_DIR, raw_filename)
    print("raw_img_path = "+raw_img_path)
    for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
        #face_img_name = '%s.png' % (os.path.splitext(raw_filename)[0])
        face_img_name = raw_filename
        print("test face img name is " + face_img_name)
        #face image name으로 경로를 가르쳐줌
        aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)

        print("align module is running in GAN!4")

        image_align(raw_img_path, aligned_face_path, face_landmarks)
    print("align module is finished in GAN!")
Beispiel #4
0
def main(args):
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """

    #     landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
    #                                                LANDMARKS_MODEL_URL, cache_subdir='temp'))
    #     print('called align_images with args', args)
    #     RAW_IMAGES_DIR = args[0]
    #     ALIGNED_IMAGES_DIR = args[1]
    #     print(RAW_IMAGES_DIR)
    #     print(ALIGNED_IMAGES_DIR)

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in [
            x for x in os.listdir(RAW_IMAGES_DIR) if x[0] not in '._'
    ]:
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
            os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)
            image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #5
0
def pic_cut(path1, path2):
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    landmarks_detector = LandmarksDetector(landmarks_model_path)
    face_landmarks = landmarks_detector.get_landmarks(path1)
    image_align(path1, path2, face_landmarks)
    def align(self, raw_img_dir, aligned_img_dir):
        RAW_IMAGES_DIR = raw_img_dir
        ALIGNED_IMAGES_DIR = aligned_img_dir

        landmarks_detector = LandmarksDetector(self.landmarks_model_path)
        for img_name in os.listdir(RAW_IMAGES_DIR):
            raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
            for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
                face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
                aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)

                image_align(raw_img_path, aligned_face_path, face_landmarks)
def processimage(img_name,paths = [sys.argv[1],sys.argv[2]] ):
	LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
	landmarks_model_path = 'C:\\Users\\spiorf\\.keras\\temp\\shape_predictor_68_face_landmarks.dat' #unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',LANDMARKS_MODEL_URL, cache_subdir='temp'))
	landmarks_detector = LandmarksDetector(landmarks_model_path)
	raw_img_path = os.path.join(paths[0], img_name)
	
	for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
		face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
		print(face_img_name)
		aligned_face_path = os.path.join(paths[1], face_img_name)
		image_align(raw_img_path, aligned_face_path, face_landmarks)
		print(face_img_name + "ok")		
	return img_name
Beispiel #8
0
  def align(self):
    landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
                                               LANDMARKS_MODEL_URL, cache_subdir='temp'))
    RAW_IMAGES_DIR = self.raw_dir
    ALIGNED_IMAGES_DIR = self.src_dir

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in [f for f in os.listdir(RAW_IMAGES_DIR) if f[0] not in '._']:
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
            os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)
            image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #9
0
def init_dependencies():
    tfl.init_tf()
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='cache'))
    landmarks_detector = LandmarksDetector(landmarks_model_path)
    if os.path.exists(args['load_resnet']):
        print("Loading ResNet Model:")
        ff_model = load_model(args['load_resnet'])

    with open(args['model_dir'], 'rb') as f:
        generator_network, discriminator_network, Gs_network = pickle.load(f)

    generator = Generator(Gs_network,
                          args['batch_size'],
                          clipping_threshold=args['clipping_threshold'],
                          tiled_dlatent=args['tile_dlatents'],
                          model_res=args['model_res'],
                          randomize_noise=args['randomize_noise'])

    perceptual_model = PerceptualModel(args,
                                       perc_model=None,
                                       batch_size=args['batch_size'])
    perceptual_model.build_perceptual_model(generator, discriminator_network)
    return landmarks_detector, ff_model, generator, perceptual_model
Beispiel #10
0
def align_images(raw_images_dir: str, aligned_images_dir: str):
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in [
            f for f in os.listdir(raw_images_dir) if f[0] not in '._'
    ]:
        raw_img_path = os.path.join(raw_images_dir, img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(aligned_images_dir, face_img_name)
            os.makedirs(aligned_images_dir, exist_ok=True)
            image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #11
0
def align_images(raw_dir, aligned_dir):
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = raw_dir
    ALIGNED_IMAGES_DIR = aligned_dir

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)

            image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #12
0
def main():
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """
    parser = argparse.ArgumentParser(
        description=
        'Find latent representation of reference images using perceptual loss')
    parser.add_argument('src_file', help='Image to replace faces on')
    parser.add_argument('face_path',
                        help='Location of the faces to be replaced')
    parser.add_argument('mask_path', help='Location of the masks for the face')
    parser.add_argument(
        'face_landmarks_path',
        help='File locations of the Numpy Arrays of the face locations')
    parser.add_argument('dst_file', help='Output file location')

    args, other_args = parser.parse_known_args()

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    #landmarks_model_path = unpack_bz2('models/shape_predictor_68_face_landmarks.dat.bz2')
    landmarks_detector = LandmarksDetector(landmarks_model_path)

    #Copy file to new location to work on it
    copyfile(args.src_file, args.dst_file)

    #Replace every face that has a mask
    for f in os.listdir(args.face_path):
        if f.endswith(".png") and f.startswith(
                os.path.splitext(os.path.basename(args.src_file))[0]):
            filename = os.path.splitext(f)[0]
            for i, generated_face_landmarks in enumerate(
                    landmarks_detector.get_landmarks(args.face_path +
                                                     filename + ".png"),
                    start=1):
                face_replace(args.dst_file, args.face_path + filename + ".png",
                             args.mask_path + filename + ".png",
                             args.face_landmarks_path + filename + ".npy",
                             generated_face_landmarks, args.dst_file)
    print("Done!")
Beispiel #13
0
def align2(img):
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = sys.argv[1]
    ALIGNED_IMAGES_DIR = sys.argv[2]

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(img),
                                       start=1):
        face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
        aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
        os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)
        image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #14
0
def main():
    '''
    Detect face(s), extract landmarks, and align face(s) from the given raw image(s)
    parameters: None
    output: Aligned face image at 1024x1024 resolution
    '''
    parser = argparse.ArgumentParser(
        description='Extract and align faces from the raw images',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('src_dir', help='Source directory with raw images')
    parser.add_argument(
        'dest_dir', help='Destination directory for storing aligned images')

    args, other_args = parser.parse_known_args()

    # Specify paths to the saved 68-Facial Landmark model, raw images directory, and aligned images directory
    landmarks_model_path = '../cache/shape_predictor_68_face_landmarks.dat'
    RAW_IMAGES_DIR = os.path.join('../', args.src_dir)
    ALIGNED_IMAGES_DIR = os.path.join('../', args.dest_dir)
    os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)

    # Try detecting faces and extracting landmarks; throw an error, otherwise
    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        print('Aligning %s ...' % img_name)
        try:
            raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
            print('Getting landmarks...')
            face_landmarks = landmarks_detector.get_landmarks(raw_img_path)
            try:
                print('Starting face alignment...')
                aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, img_name)
                image_align(raw_img_path, aligned_face_path, face_landmarks)
                print('Wrote result at %s' % aligned_face_path)
            except:
                print("\n\tException in face alignment!\n")
        except ValueError:
            print(
                "\n\tEither failed to detect landmarks or NO faces present!\n")
def run(LANDMARKS_MODEL_URL):
    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = "./pytorch_stylegan_encoder/raw_images"
    ALIGNED_IMAGES_DIR = "./pytorch_stylegan_encoder/aligned_images"

    faces = []
    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):

            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
            faces.append(face_img_name)
            image_align(raw_img_path, aligned_face_path, face_landmarks)
    return faces

    #if __name__ == "__main__":
    """
Beispiel #16
0
def align():
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """
    print("align!")

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = "../media/photos/"
    ALIGNED_IMAGES_DIR = "../media/aligned_photos/"

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)

            image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #17
0
def align(images):
    RAW_IMAGES_DIR = './raw_images'
    ALIGNED_IMAGES_DIR = './aligned_images'

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        print('Aligning %s ...' % img_name)
        try:
            raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
            fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1)
            if os.path.isfile(fn):
                continue
            print('Getting landmarks...')
            for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
                try:
                    print('Starting face alignment...')
                    face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
                    aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
                    image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=1024, x_scale=1, y_scale=1, em_scale=0.1, alpha=False)
                    print('Wrote result %s' % aligned_face_path)
                except:
                    print("Exception in face alignment!")
        except:
            print("Exception in landmark detection!")
Beispiel #18
0
                        type=float)
    parser.add_argument('--use_alpha',
                        default=False,
                        help='Add an alpha channel for masking',
                        type=bool)

    args, other_args = parser.parse_known_args()

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = args.raw_dir
    ALIGNED_IMAGES_DIR = args.aligned_dir

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        print('Aligning %s ...' % img_name)
        try:
            raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
            fn = face_img_name = '%s_%02d.png' % (
                os.path.splitext(img_name)[0], 1)
            if os.path.isfile(fn):
                continue
            print('Getting landmarks...')
            for i, face_landmarks in enumerate(
                    landmarks_detector.get_landmarks(raw_img_path), start=1):
                try:
                    print('Starting face alignment...')
                    face_img_name = '%s_%02d.png' % (
                        os.path.splitext(img_name)[0], i)
Beispiel #19
0
import bz2
from tensorflow.keras.utils import get_file
from ffhq_dataset.landmarks_detector import LandmarksDetector
from ffhq_dataset.face_alignment import image_align

LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'

def unpack_bz2(src_path):
    data = bz2.BZ2File(src_path).read()
    dst_path = src_path[:-4]
    with open(dst_path, 'wb') as fp:
        fp.write(data)
    return dst_path

if __name__ == "__main__":
    print('Start')

    # landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
    #                                            LANDMARKS_MODEL_URL, cache_subdir='temp'))

    landmarks_detector = LandmarksDetector('/Users/michaelko/Code/coreml-playground/lib/shape_predictor_68_face_landmarks.dat')

    face_landmarks = landmarks_detector.get_landmarks('/Users/michaelko/Downloads/miya.png')

    for i, face_landmarks in enumerate(landmarks_detector.get_landmarks('/Users/michaelko/Downloads/miya.png'), start=1):
        image_align('/Users/michaelko/Downloads/miya.png', '/Users/michaelko/Downloads/miya1.png', face_landmarks)
Beispiel #20
0
    # landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
    #                                            LANDMARKS_MODEL_URL, cache_subdir='temp'))
    RAW_IMAGES_DIR = sys.argv[1]
    ALIGNED_IMAGES_DIR = sys.argv[2]

    log_data = False
    if len(sys.argv) > 3:
        LOG_FILE = sys.argv[3]
        log_data = True

    if log_data:
        logging.basicConfig(level=logging.INFO, filename=LOG_FILE)

    # landmarks_detector = LandmarksDetector(landmarks_model_path)
    landmarks_detector = LandmarksDetector(
        '/Users/michaelko/Code/backup/lib/shape_predictor_68_face_landmarks.dat'
    )
    cnt = -1
    file_list = [x for x in os.listdir(RAW_IMAGES_DIR) if x[0] not in '._']
    file_list.sort()
    if log_data:
        logging.info(' There are ' + str(len(file_list)) + ' files')
    for img_name in file_list:
        cnt = cnt + 1
        res_string = "Processing image num " + str(cnt) + " " + img_name
        if log_data:
            logging.info(res_string)
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        num_landmarks = 0
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
	landmarks_model_path = 'C:\\Users\\spiorf\\.keras\\temp\\shape_predictor_68_face_landmarks.dat' #unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',LANDMARKS_MODEL_URL, cache_subdir='temp'))
	landmarks_detector = LandmarksDetector(landmarks_model_path)
	raw_img_path = os.path.join(paths[0], img_name)
	
	for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
		face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
		print(face_img_name)
		aligned_face_path = os.path.join(paths[1], face_img_name)
		image_align(raw_img_path, aligned_face_path, face_landmarks)
		print(face_img_name + "ok")		
	return img_name

if __name__ == "__main__":


	with concurrent.futures.ProcessPoolExecutor() as executor:
		# Get a list of files to process
		paths = ['','']
		paths[0] = sys.argv[1]
		paths[1] = sys.argv[2]
		LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
		landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
												   LANDMARKS_MODEL_URL, cache_subdir='temp'))
		print(landmarks_model_path)
		RAW_IMAGES_DIR = paths[0]
		ALIGNED_IMAGES_DIR = paths[1]
		landmarks_detector = LandmarksDetector(landmarks_model_path)
		image_files = os.listdir(paths[0])
			#	 Process the list of files, but split the work across the process pool to use all CPUs!
		for image_file in zip(image_files, executor.map(processimage, image_files)):
			print(f"{image_file}")
Beispiel #22
0
def align(args, other_args, path_A, path_B):
    start_ = time.time()
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = [path_A, path_B]
    ALIGNED_IMAGES_DIR = args.aligned_dir
    os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in list(RAW_IMAGES_DIR):
        print('Aligning %s ...' % img_name)
        try:
            if args.find_faces:
                print('Getting landmarks...')
                landmarks = list(landmarks_detector.get_landmarks(img_name))
                assert len(landmarks) == 1
            else:
                landmarks = [[(89, 230), (90, 258), (91, 287), (93, 317),
                              (104, 344), (122, 368), (144, 387), (171, 406),
                              (203, 414), (236, 409), (262, 392), (284, 370),
                              (302, 345), (310, 317), (312, 289), (312, 260),
                              (311, 233), (114, 214), (129, 199), (149, 192),
                              (170, 193), (190, 202), (228, 201), (248, 192),
                              (268, 190), (287, 196), (299, 210), (210, 222),
                              (211, 241), (212, 260), (212, 280), (184, 290),
                              (197, 294), (211, 300), (225, 294), (238, 288),
                              (144, 227), (155, 223), (167, 222), (179, 228),
                              (167, 232), (154, 231), (241, 227), (251, 222),
                              (264, 221), (275, 226), (265, 230), (252, 230),
                              (153, 323), (174, 321), (194, 320), (211, 323),
                              (226, 319), (243, 320), (261, 323), (244, 344),
                              (227, 350), (211, 352), (194, 350), (173, 343),
                              (159, 324), (195, 326), (211, 327), (226, 326),
                              (255, 324), (226, 340), (211, 342), (194, 341)]]
            for i, face_landmarks in enumerate(landmarks, start=1):
                try:
                    face_img_name = '%s.png' % (os.path.basename(
                        os.path.splitext(img_name)[0]))
                    aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR,
                                                     face_img_name)
                    print('Starting face alignment...')
                    image_align(img_name,
                                aligned_face_path,
                                face_landmarks,
                                output_size=args.output_size,
                                x_scale=args.x_scale,
                                y_scale=args.y_scale,
                                em_scale=args.em_scale,
                                alpha=args.use_alpha,
                                find_faces=args.find_faces)
                    print('Wrote result %s' % aligned_face_path)
                except Exception as e:
                    print("Exception in face alignment!", str(e))
        except Exception as e:
            print("Exception in landmark detection!", str(e))

    end_ = time.time()
    logging.info(
        'The time it takes for the face recognition clipping  : %.2f s' %
        (end_ - start_))
Beispiel #23
0
def main():
    parser = argparse.ArgumentParser(description='Project real-world images into StyleGAN2 latent space')
    parser.add_argument('src_dir', help='Directory with aligned images for projection')
    parser.add_argument('dst_dir', help='Output directory')
    parser.add_argument('--tmp-dir', default='.stylegan2-tmp', help='Temporary directory for tfrecords and video frames')
    parser.add_argument('--network-pkl', default='http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl', help='StyleGAN2 network pickle filename')
    parser.add_argument('--vgg16-pkl', default='vgg16_zhang_perceptual.pkl', help='VGG16 network pickle filename')
    parser.add_argument('--num-steps', type=int, default=1000, help='Number of optimization steps')
    parser.add_argument('--initial-learning-rate', type=float, default=0.1, help='Initial learning rate')
    parser.add_argument('--initial-noise-factor', type=float, default=0.05, help='Initial noise factor')
    parser.add_argument('--verbose', type=bool, default=False, help='Verbose output')
    parser.add_argument('--video', type=bool, default=False, help='Render video of the optimization process')
    parser.add_argument('--video-mode', type=int, default=1, help='Video mode: 1 for optimization only, 2 for source + optimization')
    parser.add_argument('--video-size', type=int, default=1024, help='Video size (height in px)')
    parser.add_argument('--video-fps', type=int, default=25, help='Video framerate')
    parser.add_argument('--video-codec', default='libx264', help='Video codec')
    parser.add_argument('--video-bitrate', default='5M', help='Video bitrate')
    args = parser.parse_args()


    print('1. Align images')
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """

    landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
                                               LANDMARKS_MODEL_URL, cache_subdir='temp'))
    RAW_IMAGES_DIR = 'raw'
    ALIGNED_IMAGES_DIR = 'aligned'

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in [x for x in os.listdir(RAW_IMAGES_DIR) if x[0] not in '._']:
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
            os.makedirs(ALIGNED_IMAGES_DIR, exist_ok=True)
            image_align(raw_img_path, aligned_face_path, face_landmarks)


    print('Loading networks from "%s"...' % args.network_pkl)
    print('2. Project images')
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    proj = projector.Projector(
        vgg16_pkl             = args.vgg16_pkl,
        num_steps             = args.num_steps,
        initial_learning_rate = args.initial_learning_rate,
        initial_noise_factor  = args.initial_noise_factor,
        verbose               = args.verbose
    )
    proj.set_network(Gs)

    src_files = sorted([os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir) if f[0] not in '._'])
    for src_file in src_files:
        project_image(proj, src_file, args.dst_dir, args.tmp_dir, video=args.video)
        if args.video:
            render_video(
                src_file, args.dst_dir, args.tmp_dir, args.num_steps, args.video_mode,
                args.video_size, args.video_fps, args.video_codec, args.video_bitrate
            )
        shutil.rmtree(args.tmp_dir)
    latent_dir = Path("generated")
    latents = latent_dir.glob("*.npy")

    print('3. Blend networks')

    # blended_url = 'ffhq-cartoon-blended.pkl' # 
    blended_url ="AlfredENeuman24_ADA-VersatileFaces36_ADA_v2-blended-64.pkl"
   # ffhq_url = "stylegan2-ffhq-config-f.pkl"

    _, _, Gs_blended = pretrained_networks.load_networks(blended_url)
    #_, _, Gs = pretrained_networks.load_networks(ffhq_url)

    for latent_file in latents:
        print("latent_file:",latent_file)
        latent = np.load(latent_file)
        latent = np.expand_dims(latent,axis=0)
        synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8)
        images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs)
        file_name = latent_file.parent / (f"{latent_file.stem}-toon.jpg")
        Image.fromarray(images.transpose((0,2,3,1))[0], 'RGB').save(file_name)
        img = PIL.Image.open(file_name)
        imgcat(img)
Beispiel #24
0
from ffhq_dataset.landmarks_detector import LandmarksDetector

#LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'


def unpack_bz2(src_path):
    data = bz2.BZ2File(src_path).read()
    dst_path = src_path[:-4]
    with open(dst_path, 'wb') as fp:
        fp.write(data)
    return dst_path


if __name__ == "__main__":
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """

    landmarks_model_path = unpack_bz2('./shape_predictor_68_face_landmarks.dat.bz2')
    RAW_IMAGES_DIR = sys.argv[1]
    ALIGNED_IMAGES_DIR = sys.argv[2]

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
        for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)

            image_align(raw_img_path, aligned_face_path, face_landmarks)
Beispiel #25
0
tflib.init_tf()
generator_network, discriminator_network, Gs_network = pretrained_networks.load_networks(model_url)
generator = Generator(Gs_network, 1, randomize_noise=False)

app = Flask(__name__)

def u64latents_to_latents(u64latents):
    if len(u64latents) != 1 * 18 * 512 * 4 * 4 / 3:
        return None
    return np.ndarray((1,18,512), dtype=np.float32, buffer=base64.urlsafe_b64decode(u64latents))

def dict_as_namedtuple(d, name=''):
    return namedtuple(name, [name for name in d])(**d)

landmarks_path = "shape_predictor_68_face_landmarks.dat"
landmarks_detector = LandmarksDetector(landmarks_path)

@app.route('/healthz', methods=['GET'])
@cross_origin()
def healthz():
    return subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE).stdout.decode('utf-8')

@app.route('/alignfaces', methods=['GET'])
@cross_origin()
def alignfaces():
    u64rawfaces = request.args.get('u64rawfaces')
    if len(u64rawfaces) > 16 * 1024 * 1024:
        abort(413)
    retval = None
    with tempfile.NamedTemporaryFile(dir=".") as raw_input:
        PIL.Image.open(io.BytesIO(base64.urlsafe_b64decode(u64rawfaces))).convert("RGB").save(raw_input.name, 'jpeg', quality=90)
    os.mkdir(args.aligned_dir)

# initialize TensorFlow
print('Initializing TensorFlow...')
env = EasyDict()  # Environment variables, set by the main program in train.py.
env.TF_CPP_MIN_LOG_LEVEL = '1'  # Print warnings and errors, but disable debug info.
env.CUDA_VISIBLE_DEVICES = args.gpu  # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use. change to '0' if first GPU is better
os.environ.update(env)
tf_config = EasyDict()  # TensorFlow session config, set by tfutil.init_tf().
tf_config[
    'graph_options.place_pruned_graph'] = True  # False (default) = Check that all ops are available on the designated device.
tf_config['gpu_options.allow_growth'] = True
tfutil.init_tf(tf_config)

if args.use_aligned == 1:
    landmarks_detector = LandmarksDetector(args.landmarks_model_path)
    aligned_face_path = None
    ALIGNED_IMAGES_DIR = args.aligned_dir
    for img_name in os.listdir(args.src_dir):
        print('Aligning %s ...' % img_name)
        try:
            raw_img_path = os.path.join(args.src_dir, img_name)
            fn = face_img_name = '%s_%02d.png' % (
                os.path.splitext(img_name)[0], 1)
            if os.path.isfile(fn):
                continue
            print('Getting landmarks...')
            ld = landmarks_detector.get_landmarks(raw_img_path)
            if len(ld) == 0:
                print(
                    "Cannot get landmarks so use original image as aligned image"
Beispiel #27
0
def main(raw_dir, aligned_dir):
    """
    Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
    python align_images.py /raw_images /aligned_images
    """
    parser = argparse.ArgumentParser(
        description='Align faces from input images',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--output_size',
                        default=1024,
                        help='The dimension of images for input to the model',
                        type=int)
    parser.add_argument('--x_scale',
                        default=1,
                        help='Scaling factor for x dimension',
                        type=float)
    parser.add_argument('--y_scale',
                        default=1,
                        help='Scaling factor for y dimension',
                        type=float)
    parser.add_argument('--em_scale',
                        default=0.1,
                        help='Scaling factor for eye-mouth distance',
                        type=float)
    parser.add_argument('--use_alpha',
                        default=False,
                        help='Add an alpha channel for masking',
                        type=bool)

    args, other_args = parser.parse_known_args()

    landmarks_model_path = unpack_bz2(
        get_file('shape_predictor_68_face_landmarks.dat.bz2',
                 LANDMARKS_MODEL_URL,
                 cache_subdir='temp'))
    RAW_IMAGES_DIR = raw_dir
    ALIGNED_IMAGES_DIR = aligned_dir

    landmarks_detector = LandmarksDetector(landmarks_model_path)
    for img_name in os.listdir(RAW_IMAGES_DIR):
        print('Aligning %s ...' % img_name)
        try:
            raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
            fn = face_img_name = '%s_%02d.png' % (
                os.path.splitext(img_name)[0], 1)
            if os.path.isfile(fn):
                continue
            print('Getting landmarks...')
            for i, face_landmarks in enumerate(
                    landmarks_detector.get_landmarks(raw_img_path), start=1):
                try:
                    print('Starting face alignment...')
                    face_img_name = '%s_%02d.png' % (
                        os.path.splitext(img_name)[0], i)
                    aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR,
                                                     face_img_name)
                    image_align(raw_img_path,
                                aligned_face_path,
                                face_landmarks,
                                output_size=args.output_size,
                                x_scale=args.x_scale,
                                y_scale=args.y_scale,
                                em_scale=args.em_scale,
                                alpha=args.use_alpha)
                    print('Wrote result %s' % aligned_face_path)
                except:
                    print("Exception in face alignment!")
        except:
            print("Exception in landmark detection!")