def load_imgs_labels_core(img_path, bbox, img_size, normalizer=None, label_ext=".pts"): img = cv2.imread(img_path) face = cv2.resize(get_face(img, bbox, need_to_convert_to_int=True), (img_size, img_size)) if normalizer is not None: face = normalizer.transform(face) label_path = os.path.splitext(img_path)[0] + label_ext if label_ext == '.wdpts': landmark = wdpts_process(label_path) elif label_ext == '.opts': landmark, occlu = opts_process(label_path, bbox, img_size) # print(landmark) # print(occlu) # print('---------') return face, landmark, occlu elif label_ext == '.pts': landmark = pts_process(label_path, bbox, img_size) else: raise ValueError('there is no such exts') return face, landmark
def make_compare_data(): emb_dict = load() img_dict = {} for folder_name in os.listdir(DATA_PATH): if folder_name in emb_dict.keys(): continue print(folder_name) folder_path = DATA_PATH + folder_name + '/' img_paths = np.random.choice( [file_name for file_name in os.listdir(folder_path)], 3, replace=False) # print(img_paths) img_dict[folder_name] = [] for i, img_path in enumerate(img_paths): img_path = folder_path + img_path face = utils.get_face(img_path)[0] face = prewhiten(face) img_dict[folder_name].append(face) misc.imsave(COMPARE_PATH + folder_name + '_' + str(i + 1) + '.png', face) new_emb_dict = get_embedding(img_dict, make_data=True) emb_dict.update(new_emb_dict) np.save(COMPARE_PATH + NAME + '.npy', emb_dict)
def predict_image(): """Gets an image file via POST request, feeds the image to the FaceNet model, the resulting embedding is then sent to be compared with the embeddings database. The image file is not stored. An html page is then rendered showing the prediction result. """ if request.method == 'POST': if 'file' not in request.files: return "No file part" file = request.files['file'] filename = file.filename if filename == "": return "No selected file" if file and allowed_file(filename=filename, allowed_set=allowed_set): # Read image file as numpy array of RGB dimension img = imread(name=file, mode='RGB') # Detect and crop a 160 x 160 image containing a human face in the image file img = get_face(img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size) # If a human face is detected if img is not None: embedding = forward_pass( img=img, session=facenet_persistent_session, images_placeholder=images_placeholder, embeddings=embeddings, phase_train_placeholder=phase_train_placeholder, image_size=image_size) embedding_dict = load_embeddings() if embedding_dict: # Compare euclidean distance between this embedding and the embeddings in 'embeddings/' identity = identify_face(embedding=embedding, embedding_dict=embedding_dict) return render_template('predict_result.html', identity=identity) else: return render_template( 'predict_result.html', identity= "No embedding files detected! Please upload image files for embedding!" ) else: return render_template( 'predict_result.html', identity= "Operation was unsuccessful! No human face was detected.") else: return "POST HTTP method required!"
def get_face(user_id): user = get_user_by_id(user_id) result = {} if user.face_path is None: result[RSP_CODE] = RC_NO_FACE else: result[RSP_CODE] = RC_SUCESS result['face'] = utils.get_face(user.face_path) return result
def get_image(): """Gets an image file via POST request, feeds the image to the FaceNet model then saves both the original image and its resulting embedding from the FaceNet model in their designated folders. 'uploads' folder: for image files 'embeddings' folder: for embedding numpy files. """ if request.method == 'POST': if 'file' not in request.files: return render_template("warning.html", status="No 'file' field in POST request!") file = request.files['file'] filename = file.filename if filename == "": return render_template("warning.html", status="No selected file!") if file and allowed_file(filename=filename, allowed_set=allowed_set): filename = secure_filename(filename=filename) # Read image file as numpy array of RGB dimension img = imread(name=file, mode='RGB') # Detect and crop a 160 x 160 image containing a human face in the image file img = get_face(img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size) # If a human face is detected if img is not None: embedding = forward_pass( img=img, session=facenet_persistent_session, images_placeholder=images_placeholder, embeddings=embeddings, phase_train_placeholder=phase_train_placeholder, image_size=image_size ) # Save cropped face image to 'uploads/' folder save_image(img=img, filename=filename, uploads_path=uploads_path) # Remove file extension from image filename for numpy file storage being based on image filename filename = remove_file_extension(filename=filename) # Save embedding to 'embeddings/' folder save_embedding(embedding=embedding, filename=filename, embeddings_path=embeddings_path) return render_template( "upload_result.html", status="Image uploaded and embedded successfully!" ) else: return render_template( "upload_result.html", status="Image upload was unsuccessful! No human face was detected!" ) else: return render_template("warning.html", status="POST HTTP method required!")
def get_image(): if request.method == 'POST': if 'file' not in request.files: return "No file part" file = request.files['file'] filename = file.filename if filename == "": return "No selected file" if file and allowed_file(filename=filename, allowed_set=allowed_set): # Read image file as numpy array of RGB dimension img = io.imread(fname=file, mode='RGB') # Detect and crop a 160 x 160 image containing a human face in the image file img = get_face(img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size) # If a human face is detected if img is not None: embedding = forward_pass( img=img, session=facenet_persistent_session, images_placeholder=images_placeholder, embeddings=embeddings, phase_train_placeholder=phase_train_placeholder, image_size=image_size) # Save cropped face image to 'uploads/' folder save_image(img=img, filename=filename, uploads_path=uploads_path) # Remove file extension from image filename for numpy file storage being based on image filename filename = remove_file_extension(filename=filename) # Save embedding to 'embeddings/' folder save_embedding(embedding=embedding, filename=filename, embeddings_path=embeddings_path) return render_template( "upload_result.html", status="Image uploaded and embedded successfully!") else: return render_template( "upload_result.html", status= "Image upload was unsuccessful! No human face was detected." ) else: return "POST HTTP method required!"
def predict_image(): """Gets an image file via POST request, feeds the image to the FaceNet model, the resulting embedding is then sent to be compared with the embeddings database. The image file is not stored. An html page is then rendered showing the prediction result. """ if request.method == 'POST': if 'file' not in request.files: return "No 'file' field in POST request!" file = request.files['file'] filename = file.filename if filename == "": return "No selected file!" if file and allowed_file(filename=filename, allowed_set=allowed_set): # Read image file as numpy array of RGB dimension img = imread(name=file, mode='RGB') # Detect and crop a 160 x 160 image containing a human face in the image file img = get_face( img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size ) # If a human face is detected if img is not None: if embedding_dict: # Compare euclidean distance between this embedding and the embeddings in 'embeddings/' identity = who_is_it(img,embedding_dict,FRmodel ) return identity else: return "No embedding files detected! Please upload image files for embedding!" else: return "Operation was unsuccessful! No human face was detected!" else: return "POST HTTP method required!"
def _load_imgs(self): """Load imgs""" for index in range(len(self.landmarks)): # load img if self.color: img = cv2.imread(self.img_paths[index]) else: img = cv2.imread(self.img_paths[index], cv2.IMREAD_GRAYSCALE) if img is None: logger("{} img read error".format(self.img_paths[index])) continue bbox = [int(_) for _ in self.bboxes[index]] # normalize landmark landmark = self.landmarks[index] landmark_normalized = normalize_data(landmark) # data augment face = cv2.resize(get_face(img, bbox), (self.img_size, self.img_size)) name = self.img_paths[index].lstrip( data_param['img_root_dir']).replace("/", "_") faces, landmarks, occlusions, names = \ data_aug(face=face, landmark=landmark_normalized, name=name) self.faces.extend(faces) self.aug_landmarks.extend(landmarks) self.occlusions.extend(occlusions) self.names.extend(names) if self.print_debug and (index + 1) % 500 == 0: logger("processed {} images".format(index + 1)) self.data_size = len(self.occlusions) for index in range(len(self.aug_landmarks)): self.aug_landmarks[index] = np.multiply( self.aug_landmarks[index], np.array([self.img_size, self.img_size])) del self.landmarks
face_detector = mtcnn.MTCNN() face_encoder = load_model(encoder_model) encoding_dict = dict() for person_name in os.listdir(people_dir): person_dir = os.path.join(people_dir, person_name) encodes = [] for img_name in os.listdir(person_dir): img_path = os.path.join(person_dir, img_name) img = cv2.imread(img_path) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) results = face_detector.detect_faces(img_rgb) if results: res = max(results, key=lambda b: b['box'][2] * b['box'][3]) face, _, _ = get_face(img_rgb, res['box']) face = normalize(face) face = cv2.resize(face, required_size) encode = face_encoder.predict(np.expand_dims(face, axis=0))[0] encodes.append(encode) if encodes: encode = np.sum(encodes, axis=0) encode = l2_normalizer.transform(np.expand_dims(encode, axis=0))[0] encoding_dict[person_name] = encode for key in encoding_dict.keys(): print(key) with open(encodings_path, 'bw') as file: pickle.dump(encoding_dict, file)
test_res_path = 'data/results/friends.jpg' recognition_t = 0.3 required_size = (160, 160) encoding_dict = load_pickle(encodings_path) face_detector = mtcnn.MTCNN() face_encoder = load_model(encoder_model) img = cv2.imread(test_img_path) # plt_show(img) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) results = face_detector.detect_faces(img_rgb) for res in results: face, pt_1, pt_2 = get_face(img_rgb, res['box']) encode = get_encode(face_encoder, face, required_size) encode = l2_normalizer.transform(np.expand_dims(encode, axis=0))[0] name = 'unknown' distance = float("inf") for db_name, db_encode in encoding_dict.items(): dist = cosine(db_encode, encode) if dist < recognition_t and dist < distance: name = db_name distance = dist if name == 'unknown': cv2.rectangle(img, pt_1, pt_2, (0, 0, 255), 2) cv2.putText(img, name, pt_1, cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2) else:
def get_embedding(data, make_data=False): return_data = None gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.09) with tf.Graph().as_default(): with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: #load model load_model('facenet.pb') if make_data: img_dict = data emb_dict = {} for key, imgs in img_dict.items(): images = np.array(imgs) images_placeholder = tf.get_default_graph( ).get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb = sess.run(embeddings, feed_dict=feed_dict) emb_dict[key] = emb return_data = emb_dict else: img = data face = utils.get_face(img, path=False) if len(face) == 0: return else: face = face[0] face = prewhiten(face) images = np.array([face]) images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb = sess.run(embeddings, feed_dict=feed_dict) return_data = emb[0] return return_data
def get_image(): """Gets an image file via POST request, feeds the image to the FaceNet model then saves both the original image and its resulting embedding from the FaceNet model in their designated folders. 'uploads' folder: for image files 'embeddings' folder: for embedding numpy files. """ if request.method == 'POST': if 'file' not in request.files: return "No 'file' field in POST request!" list_success=[] #file = request.files['file'] for file in request.files.getlist('file'): filename = file.filename if filename == "": return "No selected file!" if file and allowed_file(filename=filename, allowed_set=allowed_set): filename = secure_filename(filename=filename) try: # Read image file as numpy array of RGB dimension img = imread(name=file, mode='RGB') # Detect and crop a 160 x 160 image containing a human face in the image file img = get_face( img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size ) # If a human face is detected if img is not None: embedding = img_to_encoding( img,FRmodel ) # Save cropped face image to 'uploads/' folder save_image(img=img, filename=filename, uploads_path=uploads_path) # Remove file extension from image filename for numpy file storage being based on image filename filename = remove_file_extension(filename=filename) # Save embedding to 'embeddings/' folder save_embedding( embedding=embedding, filename=filename, embeddings_path=embeddings_path ) embedding_dict[filename]=embedding list_success.append(filename) #return "Image uploaded and embedded successfully:- "+str(filename) #else: # return "Image upload was unsuccessful! No human face was detected!" except : return 'error'+str(filename)+'Image uploaded and embedded successfully ' +str(len(list_success)) return "Image uploaded and embedded successfully:- "+str(len(list_success)) else: return "POST HTTP method required!"
def detail(): ''' Detects text and face in Aadhaar Card ''' if request.method == 'POST': # saving current timestamp current_time = str(datetime.datetime.now()).replace('-', '_').replace( ':', '_') # The type of image i.e. Front or Back image image_type1 = 'Front' image_type2 = 'Back' # Path for Front image and the face image that will be croppped filename1 = uploads_path + image_type1 + '/' + current_time + '.jpg' photo_path = uploads_path + image_type1 + '/' + 'faces' + '/' + current_time + '.png' # Path for Back image and the face image that will be croppped filename2 = uploads_path + image_type2 + '/' + current_time + '.jpg' crop_path = uploads_path + image_type2 + '/temp/' + current_time + '.png' # if the Front folder (in uploads) doesn't already exist, create it if not os.path.exists(uploads_path + image_type1): os.mkdir(uploads_path + image_type1) # directory for saving faces in the id cards os.mkdir(uploads_path + image_type1 + '/' + 'faces') # if the Back folder (in uploads) doesn't already exist, create it if not os.path.exists(uploads_path + image_type2): os.mkdir(uploads_path + image_type2) os.mkdir(uploads_path + image_type2 + '/temp') # variable to store details extracted from card details = {} # get Front Card Photo from user photo1 = request.files['photo-front'] photo1.save(filename1) # get Front Card Photo from user photo2 = request.files['photo-back'] photo2.save(filename2) print("Processing Front Image ......") # Process The Front Card Image data, photo_path = recognise_text(filename1, photo_path) details = get_labels_from_aadhar(data) print("Processing Front Image ...... DONE") print("Processing Back Image .......") # Process The Back Card Image crop_aadhar(filename2, crop_path) data2, photo_path2 = recognise_text(crop_path, 'none') details.update(get_address(data2)) print("Processing Back Image ....... DONE") os.remove(crop_path) data_dict = { 'status': True, 'fields': details, 'image_path_front': filename1, 'image_path_back': filename2, 'photo_path': photo_path } print("save into json files") # the json file where the output must be stored with open('myfile.json', 'a+') as out_file: json.dump(data_dict, out_file, indent=6) img = imread(name=photo_path, mode='RGB') print("Processing Face Image .......") # Detect and crop a 160 x 160 image containing a human face in the image file img = get_face(img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size) embedding = forward_pass( img=img, session=facenet_persistent_session, images_placeholder=images_placeholder, embeddings=embeddings, phase_train_placeholder=phase_train_placeholder, image_size=image_size) print("Processing Face Image ....... DONE") # Save The Face embedding as the name of the Person filename = data_dict['fields']['Name'] filename = secure_filename(filename=filename) # Save embedding to 'embeddings/' folder save_embedding(embedding=embedding, filename=filename, embeddings_path=embeddings_path) # Write the Raw and Cleaned Text detected from the Card with open('outputs.txt', 'a+') as f: f.write( "##########################################################################\n\n" ) f.write( '######################## Raw Output for Front Card Image #############################\n\n' ) for value in data: f.write(str(value) + '\n') f.write( "##########################################################################\n\n" ) f.write( '######################## Raw Output for Back Card Image #############################\n\n' ) for value in data2: f.write(str(value) + '\n') f.write( '\n\n######################## Cleaned Output #############################\n\n' ) for key, value in details.items(): f.write(str(key) + ' : ' + str(value) + '\n') f.write( "##########################################################################\n\n" ) return jsonify(data_dict) else: # if not POST, terminate return jsonify({'status': False})