def load_noonan_val_pair(path, rootdir, transform, image_size=[112,112]): # path: path for original images, str # rootdir: path to store images and issame_list, Path mtcnn = MTCNN() print('mtcnn loaded') if not rootdir.exists(): rootdir.mkdir() images = os.listdir(path) comb_size = len(images) * (len(images) - 1) / 2 pairs = combinations(images, 2) data = bcolz.fill([int(comb_size * 2), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=rootdir, mode='w') issame_list = np.zeros(int(comb_size)) i = 0 for pair in pairs: img0 = Image.open(os.path.join(path, pair[0])) if img0.size != (112, 112): img0 = mtcnn.align(img0) img1 = Image.open(os.path.join(path, pair[1])) if img1.size != (112, 112): img1 = mtcnn.align(img1) data[2*i, ...] = transform(img0) data[2*i + 1, ...] = transform(img1) if ('noonan' in pair[0] and 'noonan' in pair[1]) or ('normal' in pair[0] and 'normal' in pair[1]): issame_list[i] = 1 i += 1 if i % 1000 == 0: print('loading noonan', i) print(data.shape) np.save(str(rootdir)+'_list', np.array(issame_list)) return data, issame_list
def alignMain(args): LOG_DIR = '../Dataset/LogError/' os.makedirs(os.path.dirname(LOG_DIR) + "/", exist_ok=True) f = open(LOG_DIR+'AlignErrortxt', 'w') if not os.path.exists(args.rawdataDir): raise UserWarning("Input Dataset Directory does not exist!") os.makedirs(os.path.dirname(args.outputDir) + "/", exist_ok=True) dataset = load_dataset(args.rawdataDir) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' detector = MTCNN(image_size=args.size, device=device) for i in range(len(dataset)): print("Working in folder " + dataset[i].name +" with index " + str(i), end='') for path in dataset[i].paths: _, file_name = os.path.split(path) imgBGR = cv2.imread(path) thumbnails = detector.align(img=imgBGR, select_largest=True) if thumbnails is not None: thumbnail = thumbnails[-1] # thumbnail = cv2.cvtColor(np.float32(thumbnail), cv2.COLOR_RGB2BGR) OUTPUT_DIR = args.outputDir + dataset[i].name + "/" os.makedirs(os.path.dirname(OUTPUT_DIR) + "/", exist_ok=True) str1 = "{}{}".format(OUTPUT_DIR, file_name) cv2.imwrite(str1, thumbnail) else: f.write("Unable to detect faces: {}/{}. \n".format(dataset[i].name, file_name)) print(' ... Done!') f.close()
def get_sg_faces(source_path, save_path): # st: stylegan # os.makedirs(save_path, exist_ok=True) fake_images = [item for item in os.listdir(source_path)] mtcnn = MTCNN() for img in fake_images: bboxes, _ = mtcnn.align_multi(Image.open(source_path + os.sep + img), 1, 30) # the last two params: conf.face_limit, conf.min_face_size if bboxes.shape[0] != 1: print(img + ':', bboxes.shape) else: new_img = mtcnn.align(Image.open(source_path + os.sep + img), (112,112)) subfolder = os.path.basename(img).split('_')[0] os.makedirs(save_path + os.sep + subfolder, exist_ok=True) new_img.save(os.path.join(save_path, subfolder, img))
def register(user_id): data_path = Path('data') save_path = data_path / 'facebank' / user_id fetch_path = data_path / 'dataset' / user_id images = load_images_from_folder(fetch_path) print(images) if not save_path.exists(): save_path.mkdir() mtcnn = MTCNN() count = 0 face_id = user_id count = 0 for img in images: frame = img p = Image.fromarray(frame[..., ::-1]) try: warped_face = np.array(mtcnn.align(p))[..., ::-1] cv2.imwrite( "data/facebank/" + str(face_id) + '/' + str(face_id) + '_' + str(count) + ".jpg", warped_face) count += 1 #cv2.imwrite(str(save_path/'{}.jpg'.format(str(datetime.now())[:-7].replace(":","-").replace(" ","-"))), warped_face) except: result = { "_result": "Error", "_message": "Unable to detect the face" } if count == len(images): result = { "_result": "success", "_message": "User Registered Successfully" } conf = get_config(False) learner = face_learner(conf, True) learner.load_state(conf, 'cpu_final.pth', True, True) learner.model.eval() #print('learner loaded') targets, names = prepare_facebank(conf, learner.model, mtcnn, user_id) #print('facebank updated') return result
def get_train_dataset_gan(imgs_folder, target_folder, target_size): # if 'divided' in imgs_folder: for sub_folder in os.listdir(imgs_folder): os.makedirs(target_folder + os.sep + sub_folder, exist_ok=True) # else: # os.makedirs(target_folder, exist_ok=True) imgs=[] for root, _, fnames in sorted(os.walk(imgs_folder)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) imgs.append((path, fname)) mtcnn = MTCNN() for img in imgs: bboxes, _ = mtcnn.align_multi(Image.open(img[0]), 1, 30) # the last two params: conf.face_limit, conf.min_face_size if bboxes.shape[0] > 0: new_img = mtcnn.align(Image.open(img[0]), target_size) # if 'divided' in imgs_folder: new_img.save(target_folder + os.sep + '/'.join(img[0].strip().split(os.sep)[-2:]))
def align_images(base_dir, output_dir): if not os.path.exists(output_dir): os.mkdir(output_dir) mtcnn = MTCNN() patients_list = os.listdir(base_dir) for ppl in patients_list: patient_folder = os.path.join(base_dir, ppl) new_folder = os.path.join(output_dir, ppl) if not os.path.exists(new_folder): os.mkdir(new_folder) print("Creating new folder:", new_folder) img_list = os.listdir(patient_folder) for img in img_list: image = Image.open(os.path.join(patient_folder, img)).convert('RGB') if image.size != (112, 112): new_image = mtcnn.align(image) # new_image = image.resize((112, 112)) new_image.save(os.path.join(new_folder, img)) print(new_image.size) print('Saving', img)
if isSuccess: frame_text = cv2.putText(frame, 'Press t to take a picture,q to quit.....', (10,100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 3, cv2.LINE_AA) cv2.imshow("My Capture",frame_text) # 实现按下“t”键拍照 if cv2.waitKey(1): p = Image.fromarray(frame[...,::-1]) try: warped_face = np.array(mtcnn.align(p))[...,::-1] bboxes, faces = mtcnn.align_multi(p, conf.face_limit, conf.min_face_size) print('-----------box count {}', len(bboxes)) bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1,-1,1,1] # personal choice for box in bboxes: frame_text = cv2.rectangle(frame_text,(box[0],box[1]),(box[2],box[3]),(0,0,255),6) cv2.imwrite(str(save_path/'{}.jpg'.format(str(datetime.now())[:-7].replace(":","-").replace(" ","-"))), warped_face) except: print('no face captured') if cv2.waitKey(1)&0xFF == ord('q'): break
mtcnn = MTCNN() print('mtcnn loaded') # inital camera img = [] img.append(cv2.imread('data/input/evans/evans_p.jpg')) img.append(cv2.imread('data/input/hermsworth/hermsworth_p.jpg')) img.append(cv2.imread('data/input/jeremy/jeremy.jpg')) img.append(cv2.imread('data/input/mark/mark.jpg')) img.append(cv2.imread('data/input/olsen/olsen.jpg')) faces = [] re_img = Image.fromarray(img[0][..., ::-1]) re_img = mtcnn.align(re_img) tolist_face = np.array(re_img).tolist() URL = server + "register" json_feed = {'face_image': tolist_face} response = requests.post(URL, json=json_feed) print(response) tolist_face = img[1].tolist() URL = server + "getframe" json_feed = {'face_list': tolist_face} response = requests.post(URL, json=json_feed) ''' for i in range(5): image = Image.fromarray(img[i][...,::-1]) #bgr to rgb # image = Image.fromarray(img[i])
print('facebank updated') else: targets, ftoid, idinfo = load_facebank(conf) print('facebank loaded') faces = [] predfns = [] with open(args.file) as f: imgfiles = list(map(str.strip, f.readlines())) for imgfn in imgfiles: try: face = Image.open(imgfn) except: print('cannot open query image file {}'.format(imgfn)) continue try: face = mtcnn.align(face) except: print('mtcnn failed for {}'.format(imgfn)) face = face.resize((112, 112), Image.ANTIALIAS) #data = np.array((cv2.cvtColor(np.asarray(face), cv2.COLOR_RGB2GRAY),)*3).T #face = Image.fromarray(data) data = np.array(face) face = Image.fromarray(data[:, :, ::-1]) faces.append(face) results, score, d = learner.infer(conf, faces, targets, args.tta) print(score) for idx, imgfn in enumerate(imgfiles): i = results[idx] print("For {} found face {}".format( imgfn, "Unknown" if i == -1 else idinfo[i][1])) print(d[idx], d[idx][i])
IMAGE_PATH1 = '../Dataset/Test/3.jpg' device = 'cuda:0' if torch.cuda.is_available() else 'cpu' detector = MTCNN(image_size=160, device=device) model = MobileNetV2(128, alpha=1.0) model.load_weights(TRAINED_MODEL).expect_partial() with open(SVM_MODEL, 'rb') as infile: (svm_model, class_names) = pickle.load(infile) img = cv2.imread(IMAGE_PATH1) if max(img.shape[0], img.shape[1]) > 900: scale_percent = 900 / max(img.shape[0], img.shape[1]) img = img_resize(img, scale_percent) #faces = detector.align(img=img, select_largest=False, save_path='../../Test/') faces = detector.align(img=img, select_largest=False) if faces is not None: for face in faces: face = np.float32(face) face_embedding = model(face, True) predictions = svm_model.predict_proba(face_embedding) best_class_idxs = np.argmax(predictions, axis=1) best_class_probabilities = predictions[np.arange(len(best_class_idxs)), best_class_idxs] for i in range(len(best_class_idxs)): print('%4d %s: %.3f' % (i, class_names[best_class_idxs[i]], best_class_probabilities[i]))
img1 = cv2.imread(IMAGE_PATH1) if max(img1.shape[0], img1.shape[1]) > 900: scale_percent = 900 / max(img1.shape[0], img1.shape[1]) img1 = img_resize(img1, scale_percent) img2 = cv2.imread(IMAGE_PATH2) if max(img2.shape[0], img2.shape[1]) > 900: scale_percent = 900 / max(img2.shape[0], img2.shape[1]) img2 = img_resize(img2, scale_percent) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' detector = MTCNN(image_size=160, device=device) model = MobileNetV2(128, alpha=1.0) model.load_weights(TRAINED_MODEL) faces1 = detector.align(img=img1, select_largest=True) faces2 = detector.align(img=img2, select_largest=True) if faces1 is not None: for face1 in faces1: face1 = np.float32(face1) face_embedding1 = model(face1, True) print(face_embedding1) if faces2 is not None: for face2 in faces2: face2 = np.float32(face2) face_embedding2 = model(face2, True) print(face_embedding2)