def convert_image_to_bcolz(pair_filename, image_dir, save_dir, input_size=[112, 112]): from torchvision import transforms as trans import bcolz transform = trans.Compose( [trans.ToTensor(), trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) if not os.path.exists(save_dir): os.makedirs(save_dir) faces_list1, faces_list2, issames_data = read_pair_data(pair_filename) print("have {} pair".format(len(issames_data))) print("have {} pair".format(len(faces_list1))) issames_data = np.array(issames_data) issames_data = np.where(issames_data > 0, True, False) data = bcolz.fill(shape=[ len(faces_list1) + len(faces_list2), 3, input_size[0], input_size[1] ], dtype=np.float32, rootdir=save_dir, mode='w') for i, (face1_path, face2_path, issame) in enumerate(zip(faces_list1, faces_list2, issames_data)): # pred_id, pred_scores = faceRec.predict(faces) # 或者使用get_faces_embedding()获得embedding,再比较compare_embedding() if image_dir: face1_path = os.path.join(image_dir, face1_path) face2_path = os.path.join(image_dir, face2_path) face1 = image_processing.read_image_gbk(face1_path, colorSpace="BGR") face2 = image_processing.read_image_gbk(face2_path, colorSpace="BGR") face1 = image_processing.resize_image(face1, resize_height=input_size[0], resize_width=input_size[1]) face2 = image_processing.resize_image(face2, resize_height=input_size[0], resize_width=input_size[1]) # img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # image_processing.cv_show_image("image_dict",img) face1 = Image.fromarray(face1.astype(np.uint8)) face2 = Image.fromarray(face2.astype(np.uint8)) data[i * 2, ...] = transform(face1) data[i * 2 + 1, ...] = transform(face2) if i % 100 == 0: print('loading bin', i) print(data.shape) np.save(str(save_dir) + '_list', issames_data)
def face_recognition_image(model_path, dataset_path, filename, image_path): # 加载数据库的数据 dataset_emb, names_list = load_dataset(dataset_path, filename) # 初始化mtcnn人脸检测 face_detect = face_recognition.Facedetection() # 初始化facenet face_net = face_recognition.facenetEmbedding(model_path) image = image_processing.read_image_gbk(image_path) # 获取 判断标识 bounding_box crop_image bboxes, landmarks = face_detect.detect_face(image) bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height") if bboxes == [] or landmarks == []: print("-----no face") exit(0) # print("-----image have {} faces".format(len(bboxes))) face_images = image_processing.get_bboxes_image(image, bboxes, resize_height, resize_width) face_images = image_processing.get_prewhiten_images(face_images) pred_emb = face_net.get_embedding(face_images) pred_name, pred_score = compare_embadding(pred_emb, dataset_emb, names_list) # 在图像上绘制人脸边框和识别的结果 show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)] print(show_info)
def face_recognition_image(model_path, dataset_path, filename, image_path): # 加载数据库的数据 dataset_emb, names_list = load_dataset(dataset_path, filename) # 初始化mtcnn人脸检测 face_detect = face_recognition.FaceDetection() # 初始化facenet face_net = face_recognition.facenetEmbedding(model_path) # 读取待检图片 image = image_processing.read_image_gbk(image_path) print("image_processing.read_image_gbk:", type(image), image.shape) # <class 'numpy.ndarray'>, (616, 922, 3),(高,宽,通道) # 获取 判断标识 bounding_box crop_image bboxes, landmarks = face_detect.detect_face(image) bboxes, landmarks = face_detect.get_square_bboxes( bboxes, landmarks, fixed="height") # 以高为基准,获得等宽的举行 if bboxes == [] or landmarks == []: print("-----no face") exit(0) print("-----image have {} faces".format(len(bboxes))) face_images = image_processing.get_bboxes_image( image, bboxes, resize_height, resize_width) # 按照bboxes截取矩形框 face_images = image_processing.get_prewhiten_images(face_images) # 图像归一化 pred_emb = face_net.get_embedding(face_images) # 获取facenet特征 pred_name, pred_score = compare_embadding(pred_emb, dataset_emb, names_list) # 在图像上绘制人脸边框和识别的结果 show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)] image_processing.show_image_bboxes_text("face_reco", image, bboxes, show_info)
def get_face_embedding(model_path,files_list, names_list): # 获得embedding数据 colorSpace="RGB" face_detect = face_rec.FaceDetection() face_net = face_rec.FacenetEmbedding(model_path) embeddings=[] label_list=[] for image_path, name in zip(files_list, names_list): print("processing image :{}".format(image_path)) image = image_processing.read_image_gbk(image_path, colorSpace=colorSpace) if not isinstance(image, np.ndarray): continue bboxes, landmarks = face_detect.detect_face(image) bboxes, landmarks =face_detect.get_square_bboxes(bboxes, landmarks,fixed="height") if bboxes == [] or landmarks == []: print("-----no face") continue if len(bboxes) >= 2 or len(landmarks) >= 2: print("-----image have {} faces".format(len(bboxes))) continue face_images = image_processing.get_bboxes_image(image, bboxes, resize_height, resize_width) face_images = image_processing.get_prewhiten_images(face_images,normalization=True) pred_emb = face_net.get_embedding(face_images) embeddings.append(pred_emb) label_list.append(name) return embeddings,label_list
def label_test(image_dir, filename, class_names): basename = os.path.basename(filename)[:-len('.txt')] + ".bmp" image_path = os.path.join(image_dir, basename) image = image_processing.read_image_gbk(image_path) data = file_processing.read_data(filename, split=" ") label_list, rect_list = file_processing.split_list(data, split_index=1) label_list = [l[0] for l in label_list] name_list = file_processing.decode_label(label_list, class_names) image_processing.show_image_rects_text("object2", image, rect_list, name_list)
def get_face_embedding(model_path, files_list, names_list): ''' 获得embedding数据 :param files_list: 图像列表 :param names_list: 与files_list一一的名称列表 :return: ''' # 转换颜色空间RGB or BGR colorSpace = "RGB" # 初始化mtcnn人脸检测 face_detect = face_recognition.FaceDetection() # 初始化facenet face_net = face_recognition.facenetEmbedding(model_path) embeddings = [] # 用于保存人脸特征数据库 label_list = [] # 保存人脸label的名称,与embeddings一一对应 for image_path, name in zip(files_list, names_list): print("processing image: {}".format(image_path)) image = image_processing.read_image_gbk(image_path, colorSpace=colorSpace) # 进行人脸检测,获得bounding_box bboxes, landmarks = face_detect.detect_face(image) bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height") # image_processing.show_image_boxes("image",image,bboxes) if bboxes == [] or landmarks == []: print("-----no face") continue if len(bboxes) >= 2 or len(landmarks) >= 2: print("-----image total {} faces".format(len(bboxes))) continue # 获得人脸区域 face_images = image_processing.get_bboxes_image( image, bboxes, resize_height, resize_width) # 人脸预处理,归一化 face_images = image_processing.get_prewhiten_images(face_images, normalization=True) # 获得人脸特征 pred_emb = face_net.get_embedding(face_images) embeddings.append(pred_emb) # 可以选择保存image_list或者names_list作为人脸的标签 # 测试时建议保存image_list,这样方便知道被检测人脸与哪一张图片相似 # label_list.append(image_path) label_list.append(name) return embeddings, label_list
def select_image(self): img_name, img_type = QFileDialog.getOpenFileName( self, "打开图片", "./data/test", "*.jpg;;*.png;;All Files(*)") if not img_name: return fixed_img = osjoin(cache_path, img_name.split('/')[-1]) shutil.copy(img_name, fixed_img) image = image_processing.read_image_gbk(fixed_img) image = image_fix(image) cv2.imwrite(fixed_img, image) iw, ih = image.shape[1], image.shape[0] rw, rh = self.rec_result.width(), self.rec_result.height() w, h = image_processing.scaled_to(iw, ih, rw, rh) img = QtGui.QPixmap(fixed_img).scaled(w, h) self.rec_result.setAlignment(Qt.AlignCenter) self.rec_result.setPixmap(img) os.remove(fixed_img)
def convert_image_format(image_dir, dest_dir, resize_width=None, dest_format='.jpg'): image_id = file_processing.get_sub_directory_list(image_dir) for id in image_id: image_list = file_processing.get_files_list( os.path.join(image_dir, id), postfix=['*.jpg', "*.jpeg", '*.png', "*.JPG"]) print("processing :{}".format(id)) for src_path in image_list: basename = os.path.basename(src_path).split('.')[0] image = image_processing.read_image_gbk(src_path, resize_width=resize_width) dest_path = file_processing.create_dir(dest_dir, id, basename + dest_format) file_processing.create_file_path(dest_path) image_processing.save_image(dest_path, image)
def classify_faces(dataset_path): # 将人脸图像分为0,1和n>1张脸三类,存到三个文件夹下,供后续人工简单筛选用 face_detect = face_rec.FaceDetection() classify_root_path = './data/classify' classify_path = [osjoin(classify_root_path, x) for x in ('NA', '0', '1', 'n')] for tp in classify_path: if os.path.exists(tp): continue os.mkdir(tp) paths = os.listdir(dataset_path) for img_path in paths: real_path = osjoin(dataset_path, img_path) image = image_processing.read_image_gbk(real_path, colorSpace='RGB') if not isinstance(image, np.ndarray): out_put_path = osjoin(classify_path[0], img_path) else: bboxes, landmarks = face_detect.detect_face(image) bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks,fixed="height") if bboxes == [] or landmarks == []: out_put_path = osjoin(classify_path[1], img_path) elif len(bboxes) >= 2 or len(landmarks) >= 2: out_put_path = osjoin(classify_path[3], img_path) else: out_put_path = osjoin(classify_path[2], img_path) shutil.copy(real_path, out_put_path)
def face_recognition_for_bzl(model_path, test_dataset, filename): # 加载数据库的数据 dataset_emb, names_list = predict.load_dataset(dataset_path, filename) print("loadind dataset...\n names_list:{}".format(names_list)) # 初始化mtcnn人脸检测 face_detect = face_recognition.Facedetection() # 初始化facenet face_net = face_recognition.facenetEmbedding(model_path) #获得测试图片的路径和label filePath_list, label_list = file_processing.gen_files_labels(test_dataset) label_list = [name.split('_')[0] for name in label_list] print("filePath_list:{},label_list{}".format(len(filePath_list), len(label_list))) right_num = 0 wrong_num = 0 detection_num = 0 test_num = len(filePath_list) for image_path, label_name in zip(filePath_list, label_list): print("image_path:{}".format(image_path)) # 读取图片 image = image_processing.read_image_gbk(image_path) # 人脸检测 bboxes, landmarks = face_detect.detect_face(image) bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height") if bboxes == [] or landmarks == []: print("-----no face") continue if len(bboxes) >= 2 or len(landmarks) >= 2: print("-----image have {} faces".format(len(bboxes))) continue # 获得人脸框区域 face_images = image_processing.get_bboxes_image( image, bboxes, resize_height, resize_width) face_images = image_processing.get_prewhiten_images(face_images, normalization=True) # face_images = image_processing.get_prewhiten_images(face_images,normalization=True) pred_emb = face_net.get_embedding(face_images) pred_name, pred_score = predict.compare_embadding(pred_emb, dataset_emb, names_list, threshold=1.3) # 在图像上绘制人脸边框和识别的结果 # show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)] # image_processing.show_image_text("face_recognition", image, bboxes, show_info) index = 0 pred_name = pred_name[index] pred_score = pred_score[index] if pred_name == label_name: right_num += 1 else: wrong_num += 1 detection_num += 1 print( "-------------label_name:{},pred_name:{},score:{:3.4f},status:{}". format(label_name, pred_name, pred_score, (label_name == pred_name))) # 准确率 accuracy = right_num / detection_num # 漏检率 misdetection = (test_num - detection_num) / test_num print("-------------right_num/detection_num:{}/{},accuracy rate:{}".format( right_num, detection_num, accuracy)) print( "-------------misdetection/all_num:{}/{},misdetection rate:{}".format( (test_num - detection_num), test_num, misdetection))