예제 #1
0
def face_recognition_image(model_path, dataset_path, filename, image_path):
    # 加载数据库的数据
    dataset_emb, names_list = load_dataset(dataset_path, filename)

    # 初始化mtcnn人脸检测
    face_detect = face_recognition.FaceDetection()

    # 初始化facenet
    face_net = face_recognition.facenetEmbedding(model_path)

    # 读取待检图片
    image = image_processing.read_image_gbk(image_path)
    print("image_processing.read_image_gbk:", type(image),
          image.shape)  # <class 'numpy.ndarray'>, (616, 922, 3),(高,宽,通道)

    # 获取 判断标识 bounding_box crop_image
    bboxes, landmarks = face_detect.detect_face(image)
    bboxes, landmarks = face_detect.get_square_bboxes(
        bboxes, landmarks, fixed="height")  # 以高为基准,获得等宽的举行
    if bboxes == [] or landmarks == []:
        print("-----no face")
        exit(0)
    print("-----image have {} faces".format(len(bboxes)))

    face_images = image_processing.get_bboxes_image(
        image, bboxes, resize_height, resize_width)  # 按照bboxes截取矩形框
    face_images = image_processing.get_prewhiten_images(face_images)  # 图像归一化
    pred_emb = face_net.get_embedding(face_images)  # 获取facenet特征
    pred_name, pred_score = compare_embadding(pred_emb, dataset_emb,
                                              names_list)

    # 在图像上绘制人脸边框和识别的结果
    show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)]
    image_processing.show_image_bboxes_text("face_reco", image, bboxes,
                                            show_info)
예제 #2
0
def face_recognition_image(model_path, dataset_path, filename, image_path):
    # 加载数据库的数据
    dataset_emb, names_list = load_dataset(dataset_path, filename)
    # 初始化mtcnn人脸检测
    face_detect = face_recognition.Facedetection()
    # 初始化facenet
    face_net = face_recognition.facenetEmbedding(model_path)

    image = image_processing.read_image_gbk(image_path)
    # 获取 判断标识 bounding_box crop_image
    bboxes, landmarks = face_detect.detect_face(image)
    bboxes, landmarks = face_detect.get_square_bboxes(bboxes,
                                                      landmarks,
                                                      fixed="height")
    if bboxes == [] or landmarks == []:
        print("-----no face")
        exit(0)
    # print("-----image have {} faces".format(len(bboxes)))
    face_images = image_processing.get_bboxes_image(image, bboxes,
                                                    resize_height,
                                                    resize_width)
    face_images = image_processing.get_prewhiten_images(face_images)
    pred_emb = face_net.get_embedding(face_images)
    pred_name, pred_score = compare_embadding(pred_emb, dataset_emb,
                                              names_list)
    # 在图像上绘制人脸边框和识别的结果
    show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)]
    print(show_info)
예제 #3
0
def faceRecognition(photo):
    """
    :param photo: 前端发来的照片(原生照片,没有经过任何处理)
    :type photo: numpy.ndarray
    :return: 识别结果
    :rtype:
    """
    # 人脸检测、对齐
    # bboxes人脸框信息(左上角横纵坐标、右下角横纵坐标)
    # landmarks人脸五个关键点坐标(左右眼、鼻尖、左右嘴角)
    bboxes, landmarks, image = mtcnnDetection.photo_calculation_and_processing(
        photo_path)
    # 算法要求,将照片调整为160*160,输入到facenet中
    face_images = image_processing.get_bboxes_image(image, bboxes, 160, 160)
    face_images = image_processing.get_prewhiten_images(face_images)
    # 生成face_images的128维的人脸特征向量
    pred_emb = face_net.get_embedding(face_images)

    # 调用数据库模块,返回名字列表names_list,人脸特征向量数组dataset_emb(未实现)
    # 字典形式{name:[feature0,feature1,feature2]}
    db = database.Dao()
    dic = db.nameFeature_dic()
    fF = feature.Feature()
    show_info = fF.calculateDistance(pred_emb, dic, threshold)
    print(show_info)
    if show_info is not "unkonw":
        """签到成功"""
        pass
    else:
        """签到失败"""
예제 #4
0
    def createFeature(self, photos):
        """
            创建人脸特征,返回人脸特征向量列表
        :param photos:图片列表
        :type photos:list
        :return:embeddings列表
        :rtype:list
        """
        embeddings = []  # 用于保存人脸特征数据库
        for photo in photos:
            image = image_processing.change_image(photo)
            # 进行人脸检测,获得bounding_box
            bboxes, landmarks = face_detect.detect_face(image, False)
            # 返回人脸框和5个关键点
            bboxes = face_detect.get_square_bboxes(bboxes, landmarks)
            # 获得人脸区域
            face_images = image_processing.get_bboxes_image(
                image, bboxes, resize_height, resize_width)
            # 人脸预处理,归一化
            face_images = image_processing.get_prewhiten_images(
                face_images, normalization=True)
            # 获得人脸特征
            pred_emb = face_net.get_embedding(face_images)
            embeddings.append(pred_emb)

        embeddings = np.asarray(embeddings)  # 将元组转为列表
        print(embeddings)
        return embeddings
예제 #5
0
def get_face_embedding(model_path,files_list, names_list):
    # 获得embedding数据
    colorSpace="RGB"
    face_detect = face_rec.FaceDetection()
    face_net = face_rec.FacenetEmbedding(model_path)

    embeddings=[]
    label_list=[]
    for image_path, name in zip(files_list, names_list):
        print("processing image :{}".format(image_path))
        image = image_processing.read_image_gbk(image_path, colorSpace=colorSpace)
        if not isinstance(image, np.ndarray): continue
        bboxes, landmarks = face_detect.detect_face(image)
        bboxes, landmarks =face_detect.get_square_bboxes(bboxes, landmarks,fixed="height")
        if bboxes == [] or landmarks == []:
            print("-----no face")
            continue
        if len(bboxes) >= 2 or len(landmarks) >= 2:
            print("-----image have {} faces".format(len(bboxes)))
            continue
        face_images = image_processing.get_bboxes_image(image, bboxes, resize_height, resize_width)
        face_images = image_processing.get_prewhiten_images(face_images,normalization=True)
        pred_emb = face_net.get_embedding(face_images)
        embeddings.append(pred_emb)
        label_list.append(name)
    return embeddings,label_list
def face_recognition_video(model_path,dataset_path, filename,video_path):
    # 加载数据库的数据
    dataset_emb, names_list = load_dataset(dataset_path, filename)
    # 初始化mtcnn人脸检测
    face_detect = face_recognition.Facedetection()
    # 初始化facenet
    face_net = face_recognition.facenetEmbedding(model_path)

    cap = cv2.VideoCapture(video_path)  # 获取视频数据
    face_cnt = 0  # 控制视频读取时候,每秒截取指定数量图片进行一次识别
    while cap.isOpened():
        ok, frame = cap.read()
        face_cnt += 1
        if not ok:
            break
        if(face_cnt % 5 == 0):
            frame = image_processing.read_image1(frame)
            bboxes, landmarks = face_detect.detect_face(frame)
            bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height")
            if bboxes == [] or landmarks == []:
                print("-----no face")
                continue
            print("-----image have {} faces".format(len(bboxes)))
            face_images = image_processing.get_bboxes_image(frame, bboxes, resize_height, resize_width)
            face_images = image_processing.get_prewhiten_images(face_images)
            pred_emb = face_net.get_embedding(face_images)
            pred_name, pred_score = compare_embadding(pred_emb, dataset_emb, names_list)
            # 在图像上绘制人脸边框和识别的结果
            show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)]
            print("showinfo:", show_info)
            image_processing.show_image_bboxes_text("face_recognition", frame, bboxes, show_info)

    cap.release()
    cv2.destroyWindow("face_recognition")
 def do_something(self, frame):
     frame = np.rot90(frame, -1)
     h, w, d = frame.shape
     resize_height = int(h / 2)
     frame = image_processing.resize_image(frame, resize_height=resize_height)
     bboxes_list = [[0, 150, w, h]]
     frame = image_processing.get_bboxes_image(frame, bboxes_list)[0]
     out_frame = np.asarray(frame)
     cv2.imshow("image", out_frame)
     # cv2.imwrite("image.png",out_frame)
     cv2.waitKey(3)
     return out_frame
예제 #8
0
def face_recognition_image(face_detect, face_net, compared_face_path, idcard_face_path):
    # 初始化mtcnn人脸检测
    # face_detect = face_recognition.Facedetection()
    # 初始化facenet
    # face_net = face_recognition.facenetEmbedding(model_path)

    # 人脸数据是否有效
    valid = False

    compared_face = image_processing.read_image(compared_face_path)
    # 获取 判断标识 bounding_box crop_image
    bboxes, landmarks = face_detect.detect_face(compared_face)
    bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height")

    if bboxes == [] or landmarks == []:
        return valid, '未检测到人脸'
    elif len(bboxes) > 1:
        return valid, '检测到多张人脸'

    compared_face_images = image_processing.get_bboxes_image(compared_face, bboxes, resize_height, resize_width)
    compared_face_images = image_processing.get_prewhiten_images(compared_face_images)
    compared_face_emb = face_net.get_embedding(compared_face_images)

    idcard_face = image_processing.read_image(idcard_face_path)
    # 获取 判断标识 bounding_box crop_image
    bboxes, landmarks = face_detect.detect_face(idcard_face)
    bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height")

    if bboxes == [] or landmarks == []:
        return valid, '证件人脸识别错误,请重新上传'
    elif len(bboxes) > 1:
        return valid, '证件人脸识别错误,请重新上传'

    valid = True
    idcard_face_images = image_processing.get_bboxes_image(idcard_face, bboxes, resize_height, resize_width)
    idcard_face_images = image_processing.get_prewhiten_images(idcard_face_images)
    idcard_face_emb = face_net.get_embedding(idcard_face_images)

    compare_res = compare_embadding(compared_face_emb, idcard_face_emb)
    return valid, compare_res
    def extension_img(self, bg_list=None):

        person = Person()
        euc_dists = []
        cos_dists = []
        image_list = []
        print('数据处理中,请稍等...')
        for image in bg_list:

            [w, h, c] = image.shape
            for scale in np.arange(0.4, 1.7, 0.6):
                bg_image = image_processing.resize_image(
                    image, int(w * scale), int(h * scale))

                for angle in np.arange(-30, 31, 15):
                    rotate_bg_image = tools.rotate_bound(bg_image, angle)

                    bboxes, landmarks = self.face_rect(rotate_bg_image)

                    if len(bboxes) == 0:
                        print("-----no face")
                    else:

                        new_images = image_processing.get_bboxes_image(
                            rotate_bg_image, bboxes, landmarks, resize_width,
                            resize_height)
                        # for clipLimit in np.arange(0.5, 3, 0.5):

                        new_image, bilateral_image = self.CLAHE(new_images[0],
                                                                clipLimit=2)
                        image_list.append(new_image)
                        image_list.append(bilateral_image)
                        new_clahe_image, clahe_bilateral_image = self.CLAHE(
                            np.fliplr(new_images[0]))
                        image_list.append(new_clahe_image)
                        image_list.append(clahe_bilateral_image)
                        cv2.imshow("789", clahe_bilateral_image)
                        cv2.waitKey(1)

        image_emb = self.face_512_vector(image_list)
        face_data = image_emb.tolist()
        person.face_data = [{
            'yaw': '{}'.format(0),
            'pitch': '{}'.format(0),
            'face_data': face_data
        }]
        person.euc_dists = euc_dists
        person.cos_dists = cos_dists
        print('模型生成中,请稍等...')
        return person
예제 #10
0
 def get_image_crop(self, bounding_boxes, image, size=48):
     if not isinstance(image, np.ndarray):
         rgb_image = np.asarray(image)
     else:
         rgb_image = image
     # resize
     bboxes = bounding_boxes[:, :4]
     scores = bounding_boxes[:, 4:]
     num_boxes = len(bboxes)
     img_boxes = np.zeros((num_boxes, 3, size, size), 'float32')
     for i, box in enumerate(bboxes):
         img_box = image_processing.get_bboxes_image(rgb_image, [box], resize_height=size, resize_width=size)
         img_box = img_box[0]
         img_boxes[i, :, :, :] = box_utils._preprocess(img_box)
     return img_boxes
예제 #11
0
def get_face_embedding(model_path, files_list, names_list):
    '''
    获得embedding数据
    :param files_list: 图像列表
    :param names_list: 与files_list一一的名称列表
    :return:
    '''
    # 转换颜色空间RGB or BGR
    colorSpace = "RGB"
    # 初始化mtcnn人脸检测
    face_detect = face_recognition.FaceDetection()
    # 初始化facenet
    face_net = face_recognition.facenetEmbedding(model_path)

    embeddings = []  # 用于保存人脸特征数据库
    label_list = []  # 保存人脸label的名称,与embeddings一一对应
    for image_path, name in zip(files_list, names_list):
        print("processing image: {}".format(image_path))

        image = image_processing.read_image_gbk(image_path,
                                                colorSpace=colorSpace)
        # 进行人脸检测,获得bounding_box
        bboxes, landmarks = face_detect.detect_face(image)
        bboxes, landmarks = face_detect.get_square_bboxes(bboxes,
                                                          landmarks,
                                                          fixed="height")
        # image_processing.show_image_boxes("image",image,bboxes)
        if bboxes == [] or landmarks == []:
            print("-----no face")
            continue
        if len(bboxes) >= 2 or len(landmarks) >= 2:
            print("-----image total {} faces".format(len(bboxes)))
            continue
        # 获得人脸区域
        face_images = image_processing.get_bboxes_image(
            image, bboxes, resize_height, resize_width)
        # 人脸预处理,归一化
        face_images = image_processing.get_prewhiten_images(face_images,
                                                            normalization=True)
        # 获得人脸特征
        pred_emb = face_net.get_embedding(face_images)

        embeddings.append(pred_emb)
        # 可以选择保存image_list或者names_list作为人脸的标签
        # 测试时建议保存image_list,这样方便知道被检测人脸与哪一张图片相似
        # label_list.append(image_path)
        label_list.append(name)
    return embeddings, label_list
예제 #12
0
파일: app.py 프로젝트: Rivarrl/face_rec
def image_fix(image):
    # 处理图像,主逻辑
    bboxes, landmarks = face_detect.detect_face(image)
    bboxes, landmarks = face_detect.get_square_bboxes(bboxes,
                                                      landmarks,
                                                      fixed='height')
    if bboxes and landmarks:
        face_images = image_processing.get_bboxes_image(
            image, bboxes, resize_height, resize_width)
        face_images = image_processing.get_prewhiten_images(face_images)
        pred_emb = face_net.get_embedding(face_images)
        pred_name, pred_score = file_processing.compare_embedding(
            pred_emb, dataset_embedding, name_list)
        show_info = [
            n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)
        ]
        image = image_processing.get_image_bboxes_text_han(
            image, bboxes, show_info)
    else:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return image
예제 #13
0
def face_recognition_image():
    """
    Args:
        model_path (): facenet预训练模型的路径
        dataset_path (): 存放人脸特征数据库的路径
        filename (): 存放每张图片信息的txt文档路径
    """
    global model_path, dataset_path, filename
    # 加载人脸特征数据库的数据
    dataset_emb, names_list = load_dataset(dataset_path, filename)
    bboxes, landmarks, image = photo_calculation_and_processing()
    if bboxes == [] or landmarks == []:  # bboxes保存人脸框信息,左上角横纵坐标、右下角横纵坐标
        print("-----no face")
        exit(0)
    print("-----image have {} faces".format(len(bboxes)))
    face_images = image_processing.get_bboxes_image(
        image, bboxes, resize_height,
        resize_width)  # 将照片调整为160*160,输入到facenet中
    face_images = image_processing.get_prewhiten_images(face_images)
    pred_emb = face_net.get_embedding(face_images)  # 生成face_images的人脸特征向量
    pred_name, pred_score = compare_embadding(pred_emb, dataset_emb,
                                              names_list)
예제 #14
0
def face_recognition_image_nn(dataset_emb, names_list, face_detect, face_net,
                              image):
    # 获取 判断标识 bounding_box crop_image
    image = image_processing.read_image_gbk_nn(image)
    bboxes, landmarks = face_detect.detect_face(image)
    bboxes, landmarks, nn_bboxes = face_detect.get_square_bboxes_nn(
        bboxes, landmarks, fixed="height")
    if bboxes == [] or landmarks == []:
        # print("-----no face")
        return None, None
    # print("-----image have {} faces".format(len(bboxes)))
    face_images = image_processing.get_bboxes_image(image, bboxes,
                                                    resize_height,
                                                    resize_width)
    face_images = image_processing.get_prewhiten_images(face_images)
    pred_emb = face_net.get_embedding(face_images)
    pred_name, pred_score = compare_embadding(pred_emb, dataset_emb,
                                              names_list)
    # 在图像上绘制人脸边框和识别的结果
    # show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)]
    # print(show_info)
    return pred_name, nn_bboxes
def faceRecognition(photo_path):
    """
    Args:
        model_path (): facenet预训练模型的路径
        dataset_path (): 存放人脸特征数据库的路径
        filename (): 存放每张图片信息的txt文档路径
        photo_path:进行识别的图片的路径
        分别检测到照片
    """
    # 人脸检测、对齐
    # bboxes人脸框信息(左上角横纵坐标、右下角横纵坐标)
    # landmarks人脸五个关键点坐标(左右眼、鼻尖、左右嘴角)
    bboxes, landmarks, image = mtcnnDetection.photo_calculation_and_processing(
        photo_path)
    if bboxes == [] or landmarks == []:
        # 如果图片中没有人脸
        print("-----no face")
    print("-----image have {} faces".format(len(bboxes)))
    # 算法要求,将照片调整为160*160,输入到facenet中
    face_images = image_processing.get_bboxes_image(image, bboxes, 160, 160)
    face_images = image_processing.get_prewhiten_images(face_images)
    # 生成face_images的128维人脸特征向量
    pred_emb = face_net.get_embedding(face_images)
    print(pred_emb)
예제 #16
0
            print(frame_count, "/", total_frame_count, end=' ')  # 当前第几帧
        # if (frame_count % frame_interval) == 0:    # 跳帧处理,解决算法和采集速度不匹配
        if frame_count > -1:
            frame = np.asanyarray(frame)
            if normalization:
                frame = image_processing.image_normalization(frame)

            # print("frame:", type(frame), frame.shape)    # <class 'numpy.ndarray'> (480, 640, 3),(高,宽,通道)
            bboxes, landmarks = face_detect.detect_face(frame)
            bboxes, landmarks = face_detect.get_square_bboxes(bboxes, landmarks, fixed="height")  # 以高为基准,获得等宽的矩形
            if bboxes == [] or landmarks == []:
                print("-----no face")
            else:
                print("-----now have {} faces".format(len(bboxes)))
                # print("bboxes:", bboxes)
                face_images = image_processing.get_bboxes_image(frame, bboxes, resize_height, resize_width)  # 按照bboxes截取矩形框
                face_images = image_processing.get_prewhiten_images(face_images)  # 图像归一化
                pred_emb = face_net.get_embedding(face_images)  # 获取facenet特征
                pred_name, pred_score = compare_embadding(pred_emb, dataset_emb, names_list)

                # 在图像上绘制人脸边框和识别的结果
                boxes_name = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)]
                for name, box in zip(boxes_name, bboxes):
                    box = [int(b) for b in box]
                    cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2, 8, 0)
                    # cv2.putText(frame, name, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)

                    zh_cn_nums = general_util.get_zhcn_number(name)  # 中文的字数(一个中文字20个像素宽,一个英文字10个像素宽)
                    t_size = (20 * zh_cn_nums + 10 * (len(name) - zh_cn_nums), 22)
                    c2 = box[0] + t_size[0], box[1] - t_size[1] - 3  # 纵坐标,多减3目的是字上方稍留空
                    cv2.rectangle(frame, (box[0], box[1]), c2, (0, 0, 255), -1)  # filled
    def live_test(self, image):

        try:
            bboxes, landmarks = self.face_rect(image)
            images = image_processing.get_bboxes_image(image, bboxes,
                                                       landmarks, 256, 256)
            face = images[0]
            prn_face = face / 255.
            pos = self.prn.net_forward(prn_face)

            vertices = self.prn.get_vertices(pos)

            camera_matrix, pose = estimate_pose.estimate_pose(vertices)
            l_r, u_d, _ = pose[0], pose[1], pose[2]

            if self.gesture_count == 0:
                if abs(l_r) > 0.087 or abs(u_d) > 0.187:

                    if l_r < 0:
                        print("建议略微向右转头")
                    else:
                        print("建议略微向右转头")

                    if u_d < 0:
                        print("建议略微抬头")
                    else:
                        print("建议略微低头")

                else:
                    self.bg_list.append(image)
                    self.gesture_count += 1
            if self.gesture_count == 1:
                if u_d > -0.35:

                    print("请缓慢低头")

                else:
                    self.bg_list.append(image)
                    self.gesture_count += 1

            if self.gesture_count == 2:
                if l_r < 0.44:
                    print("请缓慢向右转头")

                else:
                    self.bg_list.append(image)
                    self.gesture_count += 1

            if self.gesture_count == 3:
                if l_r > -0.44:
                    print(l_r)
                    print("请缓慢向左转头")

                else:
                    self.bg_list.append(image)
                    self.gesture_count += 1

            print(self.gesture_count)
            return self.gesture_count

        except:

            print("-----no face")
            return self.gesture_count
def face_recognition_for_bzl(model_path, test_dataset, filename):
    # 加载数据库的数据
    dataset_emb, names_list = predict.load_dataset(dataset_path, filename)
    print("loadind dataset...\n names_list:{}".format(names_list))
    # 初始化mtcnn人脸检测
    face_detect = face_recognition.Facedetection()
    # 初始化facenet
    face_net = face_recognition.facenetEmbedding(model_path)

    #获得测试图片的路径和label
    filePath_list, label_list = file_processing.gen_files_labels(test_dataset)
    label_list = [name.split('_')[0] for name in label_list]
    print("filePath_list:{},label_list{}".format(len(filePath_list),
                                                 len(label_list)))

    right_num = 0
    wrong_num = 0
    detection_num = 0
    test_num = len(filePath_list)
    for image_path, label_name in zip(filePath_list, label_list):
        print("image_path:{}".format(image_path))
        # 读取图片
        image = image_processing.read_image_gbk(image_path)
        # 人脸检测
        bboxes, landmarks = face_detect.detect_face(image)
        bboxes, landmarks = face_detect.get_square_bboxes(bboxes,
                                                          landmarks,
                                                          fixed="height")
        if bboxes == [] or landmarks == []:
            print("-----no face")
            continue
        if len(bboxes) >= 2 or len(landmarks) >= 2:
            print("-----image have {} faces".format(len(bboxes)))
            continue
        # 获得人脸框区域
        face_images = image_processing.get_bboxes_image(
            image, bboxes, resize_height, resize_width)
        face_images = image_processing.get_prewhiten_images(face_images,
                                                            normalization=True)
        # face_images = image_processing.get_prewhiten_images(face_images,normalization=True)

        pred_emb = face_net.get_embedding(face_images)
        pred_name, pred_score = predict.compare_embadding(pred_emb,
                                                          dataset_emb,
                                                          names_list,
                                                          threshold=1.3)
        # 在图像上绘制人脸边框和识别的结果
        # show_info = [n + ':' + str(s)[:5] for n, s in zip(pred_name, pred_score)]
        # image_processing.show_image_text("face_recognition", image, bboxes, show_info)

        index = 0
        pred_name = pred_name[index]
        pred_score = pred_score[index]
        if pred_name == label_name:
            right_num += 1
        else:
            wrong_num += 1
        detection_num += 1
        print(
            "-------------label_name:{},pred_name:{},score:{:3.4f},status:{}".
            format(label_name, pred_name, pred_score,
                   (label_name == pred_name)))
    # 准确率
    accuracy = right_num / detection_num
    # 漏检率
    misdetection = (test_num - detection_num) / test_num
    print("-------------right_num/detection_num:{}/{},accuracy rate:{}".format(
        right_num, detection_num, accuracy))
    print(
        "-------------misdetection/all_num:{}/{},misdetection rate:{}".format(
            (test_num - detection_num), test_num, misdetection))
예제 #19
0
            face_landmarks = [[landmark[j], landmark[j + 5]] for j in range(5)]
            landmarks_list.append(face_landmarks)
        landmarks = np.asarray(landmarks_list)
        return landmarks


if __name__ == "__main__":
    # img = Image.open('some_img.jpg')  # modify the image path to yours
    # bounding_boxes, landmarks = detect_faces(img)  # detect bboxes and landmarks for all faces in the image
    # show_results(img, bounding_boxes, landmarks)  # visualize the results
    # image_path = "/media/dm/dm/project/dataset/face_recognition/NVR/JPEGImages/2000.jpg"
    image_path = "/media/dm/dm1/FaceRecognition/torch-Face-Recognize-Pipeline/data/dataset2/zhoujielun/zhoujielun_1.jpg"

    image = image_processing.read_image(image_path, colorSpace="RGB")
    mt = MTCNN()
    bbox_score, landmarks = mt.detect(image)
    bboxes, scores, landmarks = mt.adapter_bbox_score_landmarks(bbox_score, landmarks)
    # image_processing.show_image_boxes("image",image,bboxes)
    # image_processing.show_landmark_boxes("image", image, landmarks, bboxes)
    # image_processing.show_landmark_boxes("image2", image, landmarks, bboxes)
    faces = image_processing.get_bboxes_image(image, bboxes)
    # landmarks2 = mt.landmarks_forward(bbox_score, image)
    # bboxes, scores, landmarks2 = mt.adapter_bbox_score_landmarks(bbox_score, landmarks2)
    # image_processing.show_landmark_boxes("image2", image, landmarks2, bboxes)

    for face in faces:
        image_processing.cv_show_image("face", face)
        image_processing.show_landmark_boxes("image", image, landmarks, bboxes)
        landmarks = mt.face_landmarks_forward([face])
        image_processing.show_landmark("landmark", face, landmarks)