def main(character): # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) # ------------- load data # e.g. d:\characters\richardson\face\richardson_t10 image_folder = os.path.join(character.swap_head_dir) print('\nImage Folder: ', image_folder) # e.g. d:\characters\richardson\vertices\richardson_t10 save_folder = os.path.join(character.vertdir) print('\nSave Folder: ', save_folder) if not os.path.exists(save_folder): os.makedirs(save_folder) types = ('*.jpg', '*.png') image_path_list = [] for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) # total_num = len(image_path_list) # print (total_num) for i, image_path in enumerate(image_path_list): get_vert(image_path, save_folder, prn)
def __init__(self, image_size=(96, 96), data_path=".\data\ms_faces", embeddings_path=".\data\pos.json", train=False): self.image_size = image_size os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU self.prn = PRN(is_dlib=True) if train: names = os.listdir(data_path) name_face_path = {} for name in names: faces = os.listdir(os.path.join(data_path, name)) for face in faces: path = os.path.join(data_path, name, face) if name in name_face_path.keys(): name_face_path[name].append(path) else: name_face_path[name] = [] name_face_path[name].append(path) self.embeddings = dict.fromkeys(name_face_path.keys()) for name in name_face_path.keys(): name_vertices = [] for img_path in name_face_path[name]: image = read_rgb(img_path) image = cv2.resize(image, self.image_size, interpolation=cv2.INTER_CUBIC) n, m, _ = image.shape pos = self.prn.process(input=image, image_info=np.array([0, m, 0, n])) vertices = self.prn.get_vertices(pos) name_vertices.append(vertices.tolist()) self.embeddings[name] = name_vertices with open(embeddings_path, mode="w") as f: json.dump(self.embeddings, f, indent=4) else: with open(embeddings_path, mode="r") as f: self.embeddings = json.load(f)
def generate_depth_images(): rootDir = "./dataset/" depthImages = [] allFilenames = [] os.environ['CUDA_VISIBLE_DEVICES'] = "-1" # only cpu for now prn = PRN(is_dlib = True) for root, dirs, files in os.walk(rootDir): for name in dirs: print(os.path.join(root, name)) depthImage = generate_depth_image(os.path.join(root, name), prn) depthImages.append(depthImage) return depthImages
class Recognition: """ Class for face recognition using library (https://github.com/YadiraF/PRNet). """ def __init__(self, image_size=(96, 96), data_path=".\data\ms_faces", embeddings_path=".\data\pos.json", train=False): self.image_size = image_size os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU self.prn = PRN(is_dlib=True) if train: names = os.listdir(data_path) name_face_path = {} for name in names: faces = os.listdir(os.path.join(data_path, name)) for face in faces: path = os.path.join(data_path, name, face) if name in name_face_path.keys(): name_face_path[name].append(path) else: name_face_path[name] = [] name_face_path[name].append(path) self.embeddings = dict.fromkeys(name_face_path.keys()) for name in name_face_path.keys(): name_vertices = [] for img_path in name_face_path[name]: image = read_rgb(img_path) image = cv2.resize(image, self.image_size, interpolation=cv2.INTER_CUBIC) n, m, _ = image.shape pos = self.prn.process(input=image, image_info=np.array([0, m, 0, n])) vertices = self.prn.get_vertices(pos) name_vertices.append(vertices.tolist()) self.embeddings[name] = name_vertices with open(embeddings_path, mode="w") as f: json.dump(self.embeddings, f, indent=4) else: with open(embeddings_path, mode="r") as f: self.embeddings = json.load(f) def get_embedding(self, image): """ :param image: Detected face. :return: Array of vertices with shape=(43867,3). """ image = cv2.resize(image, self.image_size, interpolation=cv2.INTER_CUBIC) n, m, _ = image.shape pos = self.prn.process(image, image_info=np.array([0, m, 0, n])) return self.prn.get_vertices(pos) # def hausdorff_distance(self, embedding1, embedding2): # return max(directed_hausdorff(embedding1, embedding2), directed_hausdorff(embedding2, embedding1))[0] def hausdorff_distance(self, embeddings, img_emb): """ Compute hausdorff_distance between set of embeddings and current embedding. :param embeddings: Set of embeddings with which we will compare. :param img_emb: Current embedding to compare with. :return: A list of distances between embeddings and current image embedding. """ dists = [] for emb in embeddings: dists.append( max(directed_hausdorff(emb, img_emb), directed_hausdorff(img_emb, emb))[0]) return dists def find_closest_mean(self, image): """ Find the average distance to all people from the base. :param image: Detected face. :return: Name of the most like person. """ image_embedding = self.get_embedding(image) confidences = {} for name, embeddings in self.embeddings.items(): confidences[name] = np.mean( self.hausdorff_distance(embeddings, image_embedding)) print("result:", min(confidences, key=confidences.get)) print(confidences) return min(confidences, key=confidences.get)
def __init__(self): self.face_detect = face_recognition.MTCNN() self.face_net = face_recognition.facenetEmbedding() self.prn = PRN() self.bg_list = [] self.gesture_count = 0
def initialize(): os.environ['CUDA_VISIBLE_DEVICES'] = "0" # 0 default gpu, -1 for cpu prn = PRN(is_dlib = True) return prn
class face_collect(): def __init__(self): self.face_detect = face_recognition.MTCNN() self.face_net = face_recognition.facenetEmbedding() self.prn = PRN() self.bg_list = [] self.gesture_count = 0 def face_rect(self, image=None): bboxes, landmarks = self.face_detect.detect_face(image, fixed="height") return bboxes, landmarks def face_512_vector(self, face_images=None): face_images = image_processing.get_prewhiten_images(face_images) pred_emb = self.face_net.get_embedding(face_images) return pred_emb def CLAHE(self, img, clipLimit=2.0, tileGridSize=(4, 4)): # img = img.astype(np.float32) clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) new_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) new_bilateral_image = cv2.bilateralFilter(new_image, 4, 75, 75) # 限制对比度的自适应阈值均衡化 new_image = clahe.apply(new_image) new_bilateral_image = clahe.apply(new_bilateral_image) new_image = cv2.cvtColor(new_image, cv2.COLOR_GRAY2BGR) new_bilateral_image = cv2.cvtColor(new_bilateral_image, cv2.COLOR_GRAY2BGR) return new_image, new_bilateral_image def extension_img(self, bg_list=None): person = Person() euc_dists = [] cos_dists = [] image_list = [] print('数据处理中,请稍等...') for image in bg_list: [w, h, c] = image.shape for scale in np.arange(0.4, 1.7, 0.6): bg_image = image_processing.resize_image( image, int(w * scale), int(h * scale)) for angle in np.arange(-30, 31, 15): rotate_bg_image = tools.rotate_bound(bg_image, angle) bboxes, landmarks = self.face_rect(rotate_bg_image) if len(bboxes) == 0: print("-----no face") else: new_images = image_processing.get_bboxes_image( rotate_bg_image, bboxes, landmarks, resize_width, resize_height) # for clipLimit in np.arange(0.5, 3, 0.5): new_image, bilateral_image = self.CLAHE(new_images[0], clipLimit=2) image_list.append(new_image) image_list.append(bilateral_image) new_clahe_image, clahe_bilateral_image = self.CLAHE( np.fliplr(new_images[0])) image_list.append(new_clahe_image) image_list.append(clahe_bilateral_image) cv2.imshow("789", clahe_bilateral_image) cv2.waitKey(1) image_emb = self.face_512_vector(image_list) face_data = image_emb.tolist() person.face_data = [{ 'yaw': '{}'.format(0), 'pitch': '{}'.format(0), 'face_data': face_data }] person.euc_dists = euc_dists person.cos_dists = cos_dists print('模型生成中,请稍等...') return person def live_test(self, image): try: bboxes, landmarks = self.face_rect(image) images = image_processing.get_bboxes_image(image, bboxes, landmarks, 256, 256) face = images[0] prn_face = face / 255. pos = self.prn.net_forward(prn_face) vertices = self.prn.get_vertices(pos) camera_matrix, pose = estimate_pose.estimate_pose(vertices) l_r, u_d, _ = pose[0], pose[1], pose[2] if self.gesture_count == 0: if abs(l_r) > 0.087 or abs(u_d) > 0.187: if l_r < 0: print("建议略微向右转头") else: print("建议略微向右转头") if u_d < 0: print("建议略微抬头") else: print("建议略微低头") else: self.bg_list.append(image) self.gesture_count += 1 if self.gesture_count == 1: if u_d > -0.35: print("请缓慢低头") else: self.bg_list.append(image) self.gesture_count += 1 if self.gesture_count == 2: if l_r < 0.44: print("请缓慢向右转头") else: self.bg_list.append(image) self.gesture_count += 1 if self.gesture_count == 3: if l_r > -0.44: print(l_r) print("请缓慢向左转头") else: self.bg_list.append(image) self.gesture_count += 1 print(self.gesture_count) return self.gesture_count except: print("-----no face") return self.gesture_count def create_face_vector(self, image=None, user_name='user_name_test', real_name='real_name_test'): gesture_count = self.live_test(image) if gesture_count <= 3: return False person = self.extension_img(self.bg_list) if person is None: return False Person.create_table() Person.delete().where(Person.user_name == user_name).execute() p_id = Person.insert({ 'user_name': user_name, 'real_name': real_name, 'face_data': json.dumps(person.face_data), 'euc_dists': json.dumps(person.euc_dists), 'cos_dists': json.dumps(person.cos_dists), }).execute() classifier_model.classify() print('数据处理完毕,next steps...') return True
def main(args): if args.saveOutput: args.calculateAllPointsError = True args.calculateKeypointsError = False # init CUDA os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" # ID of GPU, -1 for CPU # init PRN prn = PRN(is_dlib=True) # init ground truth model info mat_file_content = loadmat('./../data/model_info.mat') ground_truth_keypoint_indices = np.array(mat_file_content['keypoints'])[0] all_vertices_indices = np.loadtxt('./../data/face_ind.txt').astype( np.int32) # validate image folders image_folder = args.inputFolder ground_truth_folder = args.groundTruthFolder assert os.path.exists(image_folder) and os.path.exists(ground_truth_folder) # get front and angled image paths with mathcing face angle print('\nextracting image data from input folder...') image_path_dict = get_300W_front_and_profile_image_paths_dict( image_folder, image_angle2=args.sideAngle) # sample k random image ids from image path dictionary sampled_face_ids = random.sample(image_path_dict.keys(), k=args.k) sampled_face_ids_length = len(sampled_face_ids) not_outlier = 0 individual_error_values = [] # iterate over each image pair and find error for i, face_id in enumerate(sampled_face_ids): print('\nface id:', face_id) print('', sampled_face_ids_length - i, 'face(s) left..') # read images image1_path = image_path_dict[face_id][0] # front image image2_path = image_path_dict[face_id][1] # angled/profile image image1 = get_image(image1_path) image2 = get_image(image2_path) assert image1.shape == image2.shape h = image1.shape[0] # height # get position maps from images print('getting position map 2...') position_map2 = get_pos_from_image(image2, prn) if position_map2 is None: print('could not detect face') continue print('getting position map 1...') position_map1 = get_pos_from_image(image1, prn) if position_map1 is None: print('could not detect face') continue # get vertices from position maps #start = time.time() raw_vertices1, vertices1 = get_vertices_from_pos(h, position_map1, prn) #end = time.time() #print('single pass:'******'could not find ground truth data') continue #get initial alignment matrices, found empirically init_align1 = get_initial_alignment_trans_matrix_front() init_align2 = get_initial_alignment_trans_matrix_front() if (args.sideAngle > 45 and args.sideAngle < 90): init_align2 = get_initial_alignment_trans_matrix_left() elif (args.sideAngle < -45 and args.sideAngle > -90): init_align2 = get_initial_alignment_trans_matrix_right() # align keypoints and vertices with ground truth if args.calculateKeypointsError: _, interocular_distance, keypoints1 = align_vertices( keypoints1, keypoints_ground_truth, init_align=init_align1) _, interocular_distance, keypoints2 = align_vertices( keypoints2, keypoints_ground_truth, init_align=init_align2) elif args.calculateAllPointsError: _, interocular_distance, keypoints1, vertices1 = align_keypoints_and_vertices( keypoints1, keypoints_ground_truth, vertices1, init_align=init_align1) _, interocular_distance, keypoints2, vertices2 = align_keypoints_and_vertices( keypoints2, keypoints_ground_truth, vertices2, init_align=init_align2) # calculate and align average of predicted keypoints and vertices if args.calculateKeypointsError: keypoints_average = np.array([keypoints2, keypoints1]).mean(axis=0) _, interocular_distance, keypoints_average = align_vertices( keypoints_average, keypoints_ground_truth) keypoints_experimental_average = get_weighted_average( keypoints2, keypoints1) _, interocular_distance, keypoints_experimental_average = align_vertices( keypoints_experimental_average, keypoints_ground_truth) elif args.calculateAllPointsError: vertices_average = np.array([vertices1, vertices2]).mean(axis=0) _, interocular_distance, vertices_average = align_vertices( vertices_average, vertices_ground_truth) # calculate ground truth error for front, side and average if args.calculateKeypointsError: nse_1 = normalized_squared_error(keypoints1, keypoints_ground_truth, interocular_distance) nse_2 = normalized_squared_error(keypoints2, keypoints_ground_truth, interocular_distance) nse_a = normalized_squared_error(keypoints_average, keypoints_ground_truth, interocular_distance) #nse_ea = normalized_squared_error(keypoints_experimental_average, keypoints_ground_truth, interocular_distance) if args.calculateAllPointsError: nse_1 = normalized_squared_error(vertices1, vertices_ground_truth, interocular_distance) nse_2 = normalized_squared_error(vertices2, vertices_ground_truth, interocular_distance) nse_a = normalized_squared_error(vertices_average, vertices_ground_truth, interocular_distance) # check if result is outlier if (np.mean(nse_2) < np.mean(nse_1) * 10) and (np.mean(nse_1) < np.mean(nse_2) * 10): #individual_error_values.append([nse_1, nse_2, nse_a, nse_ea]) individual_error_values.append([nse_1, nse_2, nse_a]) print('success calculating error') not_outlier += 1 else: print('bad ICP fit or outlier prediciton') #save output if args.saveOutput: colors = prn.get_colors(image1, raw_vertices1) plt.imsave('results/' + face_id + 'front.jpg', image1) plt.imsave('results/' + face_id + 'side.jpg', image2) write_obj_with_colors('results/' + face_id + '_average.obj', vertices1, prn.triangles, colors) np.savetxt('results/' + face_id + '_front.txt', vertices1, delimiter=';') np.savetxt('results/' + face_id + '_side.txt', vertices2, delimiter=';') np.savetxt('results/' + face_id + '_average.txt', vertices_average, delimiter=';') np.savetxt('results/' + face_id + '_ground_truth.txt', raw_vertices_ground_truth, delimiter=';') print('NME front face:', np.average(nse_1, axis=0)) print('NME side face:', np.average(nse_2, axis=0)) print('NME average:', np.average(nse_a, axis=0)) #print('NME exp average:', np.average(nse_ea, axis=0)) print( (sampled_face_ids_length - not_outlier) * 100 / sampled_face_ids_length, '% of faces with no detected face, outlier prediciton or bad ICP fit') # plot results NME_values = np.average(individual_error_values, axis=0) print('Mean NME front face: ', np.average(NME_values[0])) print('Mean NME side face: ', np.average(NME_values[1])) print('Mean NME average: ', np.average(NME_values[2])) plot_hist_of_distances(individual_error_values) plot_individual_keypoint_distances(individual_error_values) plot_ced_curves(individual_error_values)
print 'Created temporary directory ' + self.tempdir def __del__(self): print 'Removing temporary directory ' + self.tempdir rmtree(self.tempdir) def join(self, fname): return os.path.join(self.tempdir, fname) def cleanup(self): for f in os.listdir(self.tempdir): os.remove(self.join(f)) tempdir = TempDir() prn = PRN(is_dlib=True) def prnet(image_path): image = imread(image_path) [h, w, c] = image.shape if c > 3: image = image[:, :, :3] max_size = max(image.shape[0], image.shape[1]) if max_size > 1000: image = rescale(image, 1000. / max_size) image = (image * 255).astype(np.uint8) pos = prn.process(image) # use dlib to detect face image = image / 255.
from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2_grpc import tensorflow as tf from tensorflow.python.framework import tensor_util def find_face_bounding_box(boxes, scores): min_score_thresh = 0.7 for i in range(0, boxes.shape[0]): if scores[i] > min_score_thresh: return tuple(boxes[i].tolist()) # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = '1' # GPU number, -1 for CPU prn = PRN(is_dlib=False) save_folder = 'PRNet/TestImages/results' # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = './tensorflow_face_detection/protos/face_label_map.pbtxt' NUM_CLASSES = 2 label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) channel = grpc.insecure_channel('0.0.0.0:8500') stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
def main(character): # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) # ------------- load data # source images: D:\source\raupach\judgeC\raupach_judgeC_001 # vertices: D:\characters\judgeC\vertices\judgeC_t00t # saving to: D:\characters\judgeC\src\align\raupach_judgeC_s001_t00t\obj image_folder = character.srcdir vertices_dir = character.vertdir save_folder = character.align_obj_dir print('\nImages from: ', image_folder) print('\nVertices from: ', vertices_dir) print('\nSaving to: ', save_folder, '\n\n') if not os.path.exists(save_folder): os.makedirs(save_folder) types = ('*.jpg', '*.png') image_path_list = [] # make a sorted list off all the source images for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) # total_num = len(image_path_list) image_path_list = sorted(image_path_list) # make a sorted list off all the reference vertices types = ('*.npy', '*.jpg') vert_path_list = [] for files in types: vert_path_list.extend(glob(os.path.join(vertices_dir, files))) # total_num_vert = len(vert_path_list) vert_path_list = sorted(vert_path_list) print(vert_path_list) # iterate over the source images and repose with corresponding vertices for i, image_path in enumerate(image_path_list): name = image_path.strip().split('\\')[-1][:-4] print("\n%s\nALIGNED WITH\n%s\n" % (image_path_list[i], vert_path_list[i])) # read image image = imread(image_path) [h, w, _] = image.shape # the core: regress position map if True: # if args.isDlib: max_size = max(image.shape[0], image.shape[1]) if max_size > 1000: image = rescale(image, 1000. / max_size) image = (image * 255).astype(np.uint8) pos = prn.process(image) # use dlib to detect face else: if image.shape[1] == image.shape[2]: image = resize(image, (256, 256)) pos = prn.net_forward( image / 255.) # input image has been cropped to 256x256 else: box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1 ]) # cropped with bounding box pos = prn.process(image, box) image = image / 255. if pos is None: continue vertices = prn.get_vertices(pos) # takes the nth file in the directory of the vertices to "frontalize" the source image. can_vert = vert_path_list[i] save_vertices = align(vertices, can_vert) save_vertices[:, 1] = h - 1 - save_vertices[:, 1] colors = prn.get_colors(image, vertices) write_obj(os.path.join(save_folder, name + '.obj'), save_vertices, colors, prn.triangles) # save 3d face(can open with meshlab)