def load_align(): thresh = config.thresh min_face_size = config.min_face stride = config.stride test_mode = config.test_mode detectors = [None, None, None] # 模型放置位置 model_path = [ '../align/model/PNet/', '../align/model/RNet/', '../align/model/ONet' ] batch_size = config.batches PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet if test_mode in ["RNet", "ONet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet if test_mode == "ONet": ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh) return mtcnn_detector
def test_aux_net(name_list, dataset_path, prefix, epoch, batch_size, test_mode="rnet", thresh=[0.6, 0.6, 0.7], min_face_size=24, vis=False): detectors = [None, None, None] model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] # load pnet model PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet # load rnet model if test_mode in ["rnet", "onet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet # load onet model if test_mode == "onet": ONet = Detector(O_AUX_Net, 48, batch_size[2], model_path[2], aux_idx=3) detectors[2] = ONet mtcnn_detector = JDAPDetector(detectors=detectors, min_face_size=min_face_size, threshold=thresh) fin = open(name_list, 'r') fout = open('/home/dafu/workspace/FaceDetect/tf_JDAP/evaluation/onet/onet_wider_landmark_pose_test.txt', 'w') lines = fin.readlines() test_image_id = 0 for line in lines: test_image_id += 1 related_name = line.strip().split()[0] if '.jpg' not in related_name: related_name += '.jpg' print(test_image_id, related_name) image_name = os.path.join(dataset_path, related_name) image = cv2.imread(image_name) src_boxes, cal_boxes, land_reg, pose_reg = mtcnn_detector.detect(image, aux_idx=3) box_num = cal_boxes.shape[0] write_str = line + str(box_num) + '\n' proposal_side = src_boxes[:, 2] - src_boxes[:, 0] pose_reg = pose_reg * 180/3.14 for i in range(box_num): bbox = cal_boxes[i, :4] score = cal_boxes[i, 4] x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]) write_str += ' '.join(str(x) for x in [x1, y1, x2 - x1 + 1, y2 - y1 + 1]) + ' %.4f' % score + '\n' if vis: pose_info = "%.2f %.2f %.2f" % (pose_reg[i][0], pose_reg[i][1], pose_reg[i][2]) cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 200), 2) cv2.putText(image, pose_info, (x1, y2), 1, 1, (200, 200, 0), 2) for land_id in range(5): point_x = int(land_reg[i][land_id * 2] * proposal_side[i] + src_boxes[i][0]) point_y = int(land_reg[i][land_id * 2 + 1] * proposal_side[i] + src_boxes[i][1]) # point_x = int(land_reg[i][land_id * 2] * w + x1) # point_y = int(land_reg[i][land_id * 2 + 1] * h + y1) cv2.circle(image, (point_x, point_y), 2, (200, 0, 0), 2) fout.write(write_str) if vis: cv2.imshow("a", image) cv2.waitKey(0)
def main(args): '''通过PNet或RNet生成下一个网络的输入''' size = args.input_size batch_size = config.batches min_face_size = config.min_face stride = config.stride thresh = config.thresh scale_factor = config.scale_factor #模型地址 model_path = ['model/PNet/', 'model/RNet/', 'model/ONet'] if size == 12: net = 'PNet' save_size = 24 elif size == 24: net = 'RNet' save_size = 48 #图片数据地址 base_dir = 'data/WIDER_train/' #处理后的图片存放地址 data_dir = 'data/%d' % (save_size) neg_dir = os.path.join(data_dir, 'negative') pos_dir = os.path.join(data_dir, 'positive') part_dir = os.path.join(data_dir, 'part') for dir_path in [neg_dir, pos_dir, part_dir]: if not os.path.exists(dir_path): os.makedirs(dir_path) detectors = [None, None, None] PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet if net == 'RNet': RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet filename = 'data/wider_face_train_celeba.txt' # 读取文件的image和box对应函数在utils中 data = read_anno(base_dir, filename) mtcnn_detector = MtcnnDetector(detectors, min_face_size=min_face_size, stride=stride, threshold=thresh) # save_path = data_dir # save_file = os.path.join(save_path, 'detections.pkl') # if not os.path.exists(save_file): # 将data制作成迭代器 print('载入数据') test_data = TestLoader(data['images']) detectors, _ = mtcnn_detector.detect_face(test_data) print('完成识别') # with open(save_file, 'wb') as f: # pickle.dump(detectors, f, 1) print('开始生成图像') save_hard_example(save_size, data, neg_dir, pos_dir, part_dir, detectors)
def t_net(anno_file, data_dir, image_size, epoch, batch_size, thresh, slide_window, test_mode="PNet", min_face_size=25, stride=2): prefix = [config.ROOT_DIR + '/trained_models/PNet', config.ROOT_DIR + '/trained_models/RNet', config.ROOT_DIR + '/trained_models/ONet'] detectors = [None, None, None] print("Test model: ", test_mode) model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] print(model_path[0]) if slide_window: PNet = Detector(P_Net, 12, batch_size[0], model_path[0]) else: PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet if test_mode in ["RNet", "ONet"]: print("==================================", test_mode) RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet if test_mode == "ONet": print("==================================", test_mode) ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet data = read_annotation(anno_file) mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, slide_window=slide_window) print("==================================") print('load test data') test_data = TestLoader(data['images'][:10]) print('finish loading') print('start detecting....') detections, _ = mtcnn_detector.detect_face(test_data) print('finish detecting ') save_net = 'RNet' if test_mode == "PNet": save_net = "RNet" elif test_mode == "RNet": save_net = "ONet" save_path = os.path.join(data_dir, save_net) print('save_path is :') print(save_path) if not os.path.exists(save_path): os.mkdir(save_path) save_file = os.path.join(save_path, "detections.pkl") with open(save_file, 'wb') as f: pickle.dump(detections, f, 1) save_hard_example(data, save_path, image_size)
def test_single_net(prefix, epoch, stage, attribute='landmark_pose'): model_path = '%s-%s' % (prefix, epoch) # load pnet model if stage == 12: detector = FcnDetector(P_Net, model_path) elif stage == 24: detector = Detector(R_Net, 24, 1, model_path) elif stage == 48: if 'landmark_pose' in attribute: detector = Detector(O_AUX_Net, 48, 1, model_path, aux_idx=3) else: detector = Detector(O_Net, 48, 1, model_path) return detector
def test_single_net(prefix, epoch, stage, attribute='landmark_pose'): model_path = '%s-%s' % (prefix, epoch) # load pnet model if stage == 12: detector = FcnDetector(P_Net, model_path) elif stage == 24: detector = Detector(R_Net, 24, 1, model_path) elif stage == 48: if 'landmark_pose' in attribute: detector = Detector(O_AUX_Net, 48, 1, model_path, aux_idx=3) #detector = Detector(JDAP_48Net_Landmark_Pose_Mean_Shape, 48, 1, model_path, aux_idx=3) elif 'landmark' in attribute: detector = Detector(JDAP_48Net_Landmark_Mean_Shape, 48, 1, model_path, aux_idx=1) #etector = Detector(JDAP_48Net_Landmark, 48, 1, model_path, aux_idx=1) elif 'pose' in attribute: detector = Detector(JDAP_48Net_Pose, 48, 1, model_path, aux_idx=2) else: detector = Detector(O_Net, 48, 1, model_path) return detector
def _init_model(self, prefix, epoch): test_mode, batch_size, is_ERC, thresh, min_face_size = self.fix_param # load pnet model detectors = [None, None, None] model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet self.aux_idx = 0 # load rnet model if "onet" in test_mode or "rnet" in test_mode: if is_ERC: self.aux_idx = 4 RNet = Detector(R_Net_ERC, 24, batch_size[1], model_path[1], self.aux_idx) else: #RNet = Detector(M_Net, 18, batch_size[1], model_path[1]) RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet # load onet model if "onet" in test_mode: if 'landmark_pose' in test_mode: self.aux_idx = 3 #ONet = Detector(JDAP_48Net_Landmark_Pose_Dynamic_Shape, 48, batch_size[2], model_path[2], self.aux_idx) ONet = Detector(JDAP_48Net_Landmark_Pose_Mean_Shape, 48, batch_size[2], model_path[2], self.aux_idx) #ONet = Detector(O_AUX_Net, 48, batch_size[2], model_path[2], self.aux_idx) #ONet = Detector(A_Net, 36, batch_size[2], model_path[2], self.aux_idx) elif 'landmark' in test_mode: self.aux_idx = 1 #ONet = Detector(JDAP_48Net_Landmark, 48, batch_size[2], model_path[2], self.aux_idx) ONet = Detector(JDAP_48Net_Landmark_Mean_Shape, 48, batch_size[2], model_path[2], self.aux_idx) elif 'pose' in test_mode: self.aux_idx = 2 ONet = Detector(JDAP_48Net_Pose, 48, batch_size[2], model_path[2], self.aux_idx) #ONet = Detector(JDAP_48Net_Pose_Branch, 48, batch_size[2], model_path[2], self.aux_idx) else: #ONet = Detector(A_Cls_Net, 36, batch_size[2], model_path[2]) ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet jdap_detector = JDAPDetector(detectors=detectors, is_ERC=False, min_face_size=min_face_size, threshold=thresh) return jdap_detector
def gen_mtcnn_model(): test_mode = "ONet" thresh = [0.9, 0.6, 0.7] min_face_size = 24 stride = 2 slide_window = False shuffle = False detectors = [None, None, None] prefix = [ 'E:\\AIcode\\face\\model\\MTCNN_model\\PNet_landmark\\PNet', 'E:\\AIcode\\face\\model\\MTCNN_model\\RNet_landmark\\RNet', 'E:\\AIcode\\face\\model\\MTCNN_model\\ONet_landmark\\ONet' ] epoch = [18, 14, 16] batch_size = [2048, 256, 16] model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] # load pnet model if slide_window: PNet = Detector(P_Net, 12, batch_size[0], model_path[0]) else: PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet # load rnet model if test_mode in ["RNet", "ONet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet # load onet model if test_mode == "ONet": ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, slide_window=slide_window) return mtcnn_detector
thresh = [0.9, 0.6, 0.7] min_face_size = 24 stride = 2 slide_window = False shuffle = False detectors = [None, None, None] prefix = [FLAGS.ckpt + '/PNet_landmark/PNet', FLAGS.ckpt + '/RNet_landmark/RNet', FLAGS.ckpt + '/ONet_landmark/ONet'] epoch = [18, 14, 16] batch_size = [2048, 256, 16] model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] # load pnet model if slide_window: PNet = Detector(P_Net, 12, batch_size[0], model_path[0], FLAGS.num_inter_threads, FLAGS.num_intra_threads) else: PNet = FcnDetector(P_Net, model_path[0], FLAGS.num_inter_threads, FLAGS.num_intra_threads) detectors[0] = PNet # load rnet model if test_mode in ["RNet", "ONet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1], FLAGS.num_inter_threads, FLAGS.num_intra_threads) detectors[1] = RNet # load onet model if test_mode == "ONet": ONet = Detector(O_Net, 48, batch_size[2], model_path[2], FLAGS.num_inter_threads, FLAGS.num_intra_threads) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, slide_window=slide_window) gt_imdb = []
def load_and_align_data(image_size=160, coordinate=None): print('Creating networks and loading parameters') # 加载P_Net PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet # 加载R_Net RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet # 加载O_Net ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, threshold=thresh) img_list = [] test_data = TestLoader(image_files) all_boxes, landmarks = mtcnn_detector.detect_face(test_data) action_num = int(len(coordinate) / 4) face_state = [] action_state = [] for i in range(len(all_boxes[len(all_boxes) - 1])): face_state.append(False) for j in range(action_num): action_state.append(False) for i in range(len(all_boxes[len(all_boxes) - 1])): for j in range(action_num): if action_num == 1: if action_state: continue if all_boxes[len(all_boxes) - 1][i][0] > coordinate[4 * j] and all_boxes[len(all_boxes) - 1][i][1] > coordinate[4 * j + 1]\ and all_boxes[len(all_boxes) - 1][i][2] < coordinate[4 * j + 2] and all_boxes[len(all_boxes) - 1][i][3] < coordinate[4 * j + 3]: face_state[i] = True action_state = True else: if action_state[j]: continue if all_boxes[len(all_boxes) - 1][i][0] > coordinate[4 * j] and all_boxes[len(all_boxes) - 1][i][1] > coordinate[4 * j + 1]\ and all_boxes[len(all_boxes) - 1][i][2] < coordinate[4 * j + 2] and all_boxes[len(all_boxes) - 1][i][3] < coordinate[4 * j + 3]: face_state[i] = True action_state[j] = True count = 0 for image in image_files: img = misc.imread(os.path.expanduser(image), mode='RGB') for bbox in all_boxes[count]: # 提取脸框 cropped = img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] # 人脸摆正 aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear') # 人脸白化 prewhitened = facenet.prewhiten(aligned) img_list.append(prewhitened) count += 1 images = np.stack(img_list) return images, face_state
def main(): thresh = config.thresh min_face_size = config.min_face stride = config.stride test_mode = config.test_mode detectors = [None, None, None] # 模型放置位置 model_path = ['model/PNet/', 'model/RNet/', 'model/ONet'] batch_size = config.batches PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet if test_mode in ["RNet", "ONet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet if test_mode == "ONet": ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh) out_path = config.out_path if not os.path.exists(out_path): os.makedirs(out_path) #选用图片 path = config.test_dir #获取图片类别和路径 dataset = get_dataset(path) random.shuffle(dataset) # In[4]: bounding_boxes_filename = os.path.join(out_path, 'bounding_boxes.txt') with open(bounding_boxes_filename, "w") as text_file: for cls in tqdm(dataset): output_class_dir = os.path.join(out_path, cls.name) if not os.path.exists(output_class_dir): os.makedirs(output_class_dir) random.shuffle(cls.image_paths) for image_path in cls.image_paths: #得到图片名字如001 filename = os.path.splitext(os.path.split(image_path)[1])[0] output_filename = os.path.join(output_class_dir, filename + '.jpg') if not os.path.exists(output_filename): try: img = cv2.imread(image_path) except (IOError, ValueError, IndexError) as e: errorMessage = '{}: {}'.format(image_path, e) print(errorMessage) else: if img.ndim < 3: print('图片不对劲 "%s"' % image_path) text_file.write('%s\n' % (output_filename)) continue img = img[:, :, 0:3] #通过mtcnn获取人脸框 try: boxes_c, _ = mtcnn_detector.detect(img) except: print('识别不出图像:{}'.format(image_path)) continue #人脸框数量 num_box = boxes_c.shape[0] if num_box > 0: det = boxes_c[:, :4] det_arr = [] img_size = np.asarray(img.shape)[:2] if num_box > 1: if config.detect_multiple_faces: for i in range(num_box): det_arr.append(np.squeeze(det[i])) else: #如果保留一张脸,但存在多张,只保留置信度最大的 score = boxes_c[:, 4] index = np.argmax(score) det_arr.append(det[index, :]) else: det_arr.append(np.squeeze(det)) for i, det in enumerate(det_arr): det = np.squeeze(det) bb = [ int(max(det[0], 0)), int(max(det[1], 0)), int(min(det[2], img_size[1])), int(min(det[3], img_size[0])) ] cropped = img[bb[1]:bb[3], bb[0]:bb[2], :] try: scaled = cv2.resize( cropped, (config.image_size, config.image_size), interpolation=cv2.INTER_LINEAR) except: print( '识别不出的图像:{},box的大小{},{},{},{}'.format( image_path, bb[0], bb[1], bb[2], bb[3])) continue filename_base, file_extension = os.path.splitext( output_filename) if config.detect_multiple_faces: output_filename_n = "{}_{}{}".format( filename_base, i, file_extension) else: output_filename_n = "{}{}".format( filename_base, file_extension) cv2.imwrite(output_filename_n, scaled) text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3])) else: print('图像不能对齐 "%s"' % image_path) text_file.write('%s\n' % (output_filename))
slide_window = False shuffle = False detectors = [None, None, None] prefix = [ 'model/MTCNN_model/PNet_landmark/PNet', 'model/MTCNN_model/RNet_landmark/RNet', 'model/MTCNN_model/ONet_landmark/ONet' ] epoch = [18, 14, 16] batch_size = [2048, 256, 16] model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] # load pnet model if slide_window: PNet = Detector(P_Net, 12, batch_size[0], model_path[0]) else: PNet = FcnDetector(P_Net, model_path[0]) detectors[0] = PNet # load rnet model if test_mode in ["RNet", "ONet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet # load onet model if test_mode == "ONet": ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride,
def align_face(path='../pictures/'): thresh=config.thresh min_face_size=config.min_face stride=config.stride test_mode=config.test_mode detectors=[None,None,None] # 模型放置位置 model_path=['../align/model/PNet/','../align/model/RNet/','../align/model/ONet'] batch_size=config.batches PNet=FcnDetector(P_Net,model_path[0]) detectors[0]=PNet if test_mode in ["RNet", "ONet"]: RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet if test_mode == "ONet": ONet = Detector(O_Net, 48, batch_size[2], model_path[2]) detectors[2] = ONet mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh) #选用图片 #获取图片类别和路径 img_paths=os.listdir(path) class_names=[a.split('.')[0] for a in img_paths] img_paths=[os.path.join(path,p) for p in img_paths] scaled_arr=[] class_names_arr=[] for image_path,class_name in zip(img_paths,class_names): img = cv2.imread(image_path) # cv2.imshow('',img) # cv2.waitKey(0) try: boxes_c,_=mtcnn_detector.detect(img) except: print('识别不出图像:{}'.format(image_path)) continue #人脸框数量 num_box=boxes_c.shape[0] if num_box>0: det=boxes_c[:,:4] det_arr=[] img_size=np.asarray(img.shape)[:2] if num_box>1: #如果保留一张脸,但存在多张,只保留置信度最大的 score=boxes_c[:,4] index=np.argmax(score) det_arr.append(det[index,:]) else: det_arr.append(np.squeeze(det)) for i,det in enumerate(det_arr): det=np.squeeze(det) bb=[int(max(det[0],0)), int(max(det[1],0)), int(min(det[2],img_size[1])), int(min(det[3],img_size[0]))] cropped = img[bb[1]:bb[3],bb[0]:bb[2],:] scaled =cv2.resize(cropped,(160, 160),interpolation=cv2.INTER_LINEAR)-127.5/128.0 scaled_arr.append(scaled) class_names_arr.append(class_name) else: print('图像不能对齐 "%s"' % image_path) scaled_arr=np.asarray(scaled_arr) class_names_arr=np.asarray(class_names_arr) return scaled_arr,class_names_arr
net = 'SNet' save_size = 96 # 图片数据地址 base_dir = 'data/WIDER_train/' #处理后的图片存放地址 neg_dir = join('data', str(save_size), 'negative') pos_dir = join('data', str(save_size), 'positive') part_dir = join('data', str(save_size), 'part') for dir_path in [neg_dir, pos_dir, part_dir]: if not os.path.exists(dir_path): os.makedirs(dir_path) detectors = [None, None, None] PNet = FcnDetector(P_Net, model_path[0], using_cpu=True) detectors[0] = PNet if net == 'RNet': RNet = Detector(R_Net, 24, batch_size[1], model_path[1]) detectors[1] = RNet filename = 'data/wider_face_train_celeba.txt' # 读取文件的image和box对应函数在utils中 data = read_anno(base_dir, filename) # using MTCNN Detector to generate all boxes of all images mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor)
# In[ ]: # test_mode = config.test_mode test_mode = 'ONet' thresh = [0.6, 0.7, 0.9] min_face_size = 24 stride = 2 detectors = [None, None, None] scale_factor = 0.79 # 模型放置位置 model_path = ['model/PNet/', 'model/RNet/', 'model/ONet'] batch_size = config.batches detectors[0] = FcnDetector(P_Net, model_path[0]) # detecotors for PNet if test_mode in ['RNet', 'ONet']: detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1]) if test_mode == 'ONet': detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2]) # Use the three detectors to construct a mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor) out_path = join('validate', test_mode) + '/'