def main(mode, face_id_model_root, id_features_dir, font_path): print('Loading models...') det_models = load_detect_faces_models() face_id_model = load_face_id_model(model_root=face_id_model_root) id_npy = load_id_files(id_features_dir) crop_size = 112 max_size = 1024 reference = get_reference_facial_points(default_square=True) font = ImageFont.FreeTypeFont(font=font_path, size=24) print('Starting image processing...') if mode == Mode.DEMO: demo(det_models=det_models, face_id_model=face_id_model, reference=reference, crop_size=crop_size, id_npy=id_npy, max_size=max_size, font=font) elif mode == Mode.FILE: process_files(input_dir=STREAM_DIR, output_dir=RESULT_DIR, det_models=det_models, face_id_model=face_id_model, reference=reference, crop_size=crop_size, id_npy=id_npy, max_size=max_size, font=font) else: raise ValueError('Invalid mode: {}'.format(mode))
def detect_one_face_align(img): crop_size = 112 scale = crop_size / 112. reference = get_reference_facial_points(default_square=True) * scale #img = Image.open(os.path.join(image_path)) landmarks = [] bounding_boxes = [] try: bounding_boxes, landmarks = detect_faces(img) except Exception as e: print(e) if len( landmarks ) == 0: # If the landmarks cannot be detected, the img will be discarded return None #print(landmarks) facial5point = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)] #print(facial5points[i]) warped_face = warp_and_crop_face(np.array(img), facial5point, reference, crop_size=(crop_size, crop_size)) img_warped = Image.fromarray(warped_face) # img_warped.save("test.jpg") # img_warped.show() return img_warped, bounding_boxes
def detect_face_align(img): crop_size = 112 #因为backbone的input_size要[112,112]or[224,224] scale = crop_size / 112. reference = get_reference_facial_points(default_square=True) * scale #img = Image.open(os.path.join(image_path)) bounding_boxes = [] warped_face = [] img_warped = [] facial5point = [] landmarks = [] try: bounding_boxes, landmarks = detect_faces(img) except Exception as e: print(e) if len( landmarks ) == 0: # If the landmarks cannot be detected, the img will be discarded return img_warped, bounding_boxes #print(landmarks) for i in range(len(landmarks)): facial5point.append([[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]) #print(facial5points[i]) warped_face.append( warp_and_crop_face(np.array(img), facial5point[i], reference, crop_size=(crop_size, crop_size))) img_warped.append(Image.fromarray(warped_face[i])) # img_warped.save("test.jpg") # img_warped.show() return img_warped, bounding_boxes
def align_68(img, crop_size, model): # check if img is PIL bbox = model.face_detector.detect_from_image(img) if bbox is None: return 4 landmarks = model.get_landmarks(img, bbox) if landmarks is None: return 4 landmarks = landmarks[0] left_eye = np.mean(landmarks[36:42, :], axis=0).astype(np.int) right_eye = np.mean(landmarks[42:48, :], axis=0).astype(np.int) nose = np.mean(landmarks[28:35, :], axis=0).astype(np.int) left_mouth = landmarks[48].astype(np.int) right_mouth = landmarks[54].astype(np.int) # settings scale = crop_size / 112. reference = get_reference_facial_points(default_square=True) * scale facial5points = [left_eye, right_eye, nose, left_mouth, right_mouth] warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(crop_size, crop_size)) img_warped = Image.fromarray(warped_face) return img_warped
def __init__(self, device, crop_size = 112): #default format is N X H X W X D super().__init__() self.detection = Detection(device) self.embedding = Embedding(device) for parameter in self.embedding.parameters(): parameter.requires_grad = False self.embedding = self.embedding.eval() self.crop_size = crop_size self.scale = crop_size / 112. self.reference = get_reference_facial_points(default_square = True) * self.scale
def __init__(self, cpu=True, crop_size=112, weights_path="./weights/backbone_ir50_asia.pth"): self.cpu = cpu self.crop_size = crop_size self.weights_path = weights_path torch.set_grad_enabled(False) self.device = torch.device('cpu' if self.cpu else 'cuda:0') # Feature Extraction Model self.arcface_r50_asian = IR_50([self.crop_size, self.crop_size]) self.arcface_r50_asian.load_state_dict(torch.load(weights_path, map_location='cpu' if self.cpu else 'cuda')) self.arcface_r50_asian.eval() self.arcface_r50_asian.to(self.device) # Align self.scale = self.crop_size / 112. self.reference = get_reference_facial_points(default_square=True) * self.scale # Facial Detection Model self.face_detector = FaceDetector()
def align(img, crop_size, img_landmarks): landmarks = img_landmarks[0] left_eye = np.mean(landmarks[36:42, :], axis=0).astype(np.int) right_eye = np.mean(landmarks[42:48, :], axis=0).astype(np.int) nose = np.mean(landmarks[28:35, :], axis=0).astype(np.int) left_mouth = landmarks[48].astype(np.int) right_mouth = landmarks[54].astype(np.int) # settings scale = crop_size / 112. reference = get_reference_facial_points(default_square=True) * scale facial5points = [left_eye, right_eye, nose, left_mouth, right_mouth] warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(crop_size, crop_size)) img_warped = Image.fromarray(warped_face) return img_warped
def align_(img, crop_size, model_root, filter=False): # check if img is PIL if type(img) == np.ndarray: img = Image.fromarray(img) # path to detector models op = "{}/onet.npy".format(model_root) pp = "{}/pnet.npy".format(model_root) rp = "{}/rnet.npy".format(model_root) # settings scale = crop_size / 112. reference = get_reference_facial_points(default_square=True) * scale try: # Handle exception _, landmarks = detect_faces(img, ppath=pp, opath=op, rpath=rp) except Exception: print("Image is discarded due to exception!") return 4 if len( landmarks ) == 0: # If the landmarks cannot be detected, the img will be discarded print("Image is discarded due to non-detected landmarks!") return 4 if filter: return True else: facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)] warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(crop_size, crop_size)) img_warped = Image.fromarray(warped_face) return img_warped
import argparse from glob import glob if __name__ == '__main__': parser = argparse.ArgumentParser(description = "face alignment") parser.add_argument("-source_root", "--source_root", help = "specify your source dir", default = "./data/test", type = str) parser.add_argument("-dest_root", "--dest_root", help = "specify your destination dir", default = "./data/test_Aligned", type = str) parser.add_argument("-crop_size", "--crop_size", help = "specify size of aligned faces, align and crop with padding", default = 112, type = int) args = parser.parse_args() source_root = args.source_root # specify your source dir dest_root = args.dest_root # specify your destination dir crop_size = args.crop_size # specify size of aligned faces, align and crop with padding scale = crop_size / 112. reference = get_reference_facial_points(default_square = True) * scale cwd = os.getcwd() # delete '.DS_Store' existed in the source_root os.chdir(source_root) os.system("find . -name '*.DS_Store' -type f -delete") os.chdir(cwd) if not os.path.isdir(dest_root): os.mkdir(dest_root) for image_name in tqdm(glob("{}/*".format(source_root))): print("Processing\t{}".format(image_name)) img = Image.open(image_name) try: # Handle exception _, landmarks = detect_faces(img) except Exception:
default="/media/hyo/文档/VIS-NIR/colorferet/VIS_aligned/", type=str) parser.add_argument( "-crop_size", "--crop_size", help="specify size of aligned faces, align and crop with padding", default=112, type=int) args = parser.parse_args() source_root = args.source_root # specify your source dir dest_root = args.dest_root # specify your destination dir crop_size = args.crop_size # specify size of aligned faces, align and crop with padding scale = crop_size / 112. # reference = get_reference_facial_points(default_square = True) * scale original_5_points = get_reference_facial_points(default_square=True) # original_5_points[:, 1] = original_5_points[:, 1] - 15 reference = (original_5_points) * scale cwd = os.getcwd() # delete '.DS_Store' existed in the source_root os.chdir(source_root) os.system("find . -name '*.DS_Store' -type f -delete") os.chdir(cwd) if not os.path.isdir(dest_root): os.mkdir(dest_root) for image_name in tqdm(os.listdir(source_root)): # if not os.path.isdir(os.path.join(dest_root, subfolder)): # os.mkdir(os.path.join(dest_root, subfolder)) # for image_name in os.listdir(os.path.join(source_root, subfolder)): print("Processing\t{}".format(os.path.join(source_root, image_name)))
from PIL import Image import matplotlib.pyplot as plt import torch from torchvision import transforms import scipy.misc as m import os import random # img_path = '../00003.jpg' # img = Image.open(img_path) # img = img.convert('RGB') # img = img.convert('YCbCr') crop_size = 112 reference = get_reference_facial_points( default_square=True) * crop_size / 112.0 TARGET_ROOT = '/media/hyo/文档/VIS-NIR/SKETCH/cropped_sketch' SOURCE_ROOT_LIST = [ '/media/hyo/文档/VIS-NIR/SKETCH/CUFS/AR_sketch/sketch', '/media/hyo/文档/VIS-NIR/SKETCH/CUFS/CUHK_testing_sketch/sketch', '/media/hyo/文档/VIS-NIR/SKETCH/CUFS/CUHK_training_sketch/sketch', '/media/hyo/文档/VIS-NIR/SKETCH/CUFS/XM2VTS_sketch/sketch', '/media/hyo/文档/VIS-NIR/SKETCH/CUFSF/original_sketch' ] count = 0 random.shuffle(SOURCE_ROOT_LIST) for source_root in SOURCE_ROOT_LIST: img_list = os.listdir(source_root) for img_name in img_list: img = Image.open(os.path.join(source_root, img_name))