def __init__(self):
        # 创建mtcnn对象,建立一个mtcnn模型
        # 检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5, 0.8, 0.9]

        # 载入facenet模型
        # 将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary()
        model_path = './model_data/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")

        self.known_face_encodings = []

        self.known_face_names = []

        # 所有的人脸进行遍历,得到仓库里所有人脸的128维特征向量和对应名称
        for face in face_list:
            # obama.jpg进行'.'分割,取前一段字符串
            name = face.split(".")[0]
            # 人脸读取出来
            img = cv2.imread("./face_dataset/" + face)
            # 图像由 BGR -> RGB
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            # 进行读取
            rectangle = rectangles[0]
            # 记下他们的landmark,5个标记点,进行人脸对齐
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160
            #人脸截取
            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (160, 160))
            # 进行对齐操作
            new_img, _ = utils.Alignment_1(crop_img, landmark)
            # 增加一个维度
            new_img = np.expand_dims(new_img, 0)
            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model, new_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
    def __init__(self):
        # 创建mtcnn对象用于检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5, 0.8, 0.9]

        # 载入facenet用于将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary() #模型summary信息
        model_path = './model_data/facenet_keras.h5'  # 模型文件
        self.facenet_model.load_weights(model_path)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #   face_dataset文件中的图片为人脸数据库
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")  # 列出文件夹中的人脸图片
        self.known_face_encodings = []  # 存储编码后的人脸
        self.known_face_names = []  # 人脸的名字及图片的名字

        # 遍历人脸
        for face in face_list:
            # 图片文件名
            name = face.split(".")[0]
            # 读取图片是BGR格式数据
            img = cv2.imread("./face_dataset/" + face)
            # 颜色空间转换
            # cv2.COLOR_BGR2RGB 将BGR格式转换成RGB格式
            # cv2.COLOR_BGR2GRAY 将BGR格式转换成灰度图片
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测图版中的人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 长方形转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            rectangle = rectangles[0]
            # 记下他们的landmark
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160

            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            # Resize to 160x160
            crop_img = cv2.resize(crop_img, (160, 160))
            new_img, _ = utils.Alignment_1(crop_img, landmark)
            new_img = np.expand_dims(new_img, 0)

            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model, new_img)
            # print(face_encoding)
            # 记录128维护特征向量到数组
            self.known_face_encodings.append(face_encoding)
            # 记录文件名到数组
            self.known_face_names.append(name)
Esempio n. 3
0
    def recognize(self,draw):
        #-----------------------------------------------#
        #   人脸识别
        #   先定位,再进行数据库匹配
        #-----------------------------------------------#
        height,width,_ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw,cv2.COLOR_BGR2RGB)

        # 检测人脸
        rectangles = self.mtcnn_model.detectFace(draw_rgb, self.threshold)

        if len(rectangles)==0:
            return

        # 转化成正方形
        rectangles = utils.rect2square(np.array(rectangles,dtype=np.int32))
        rectangles[:,0] = np.clip(rectangles[:,0],0,width)
        rectangles[:,1] = np.clip(rectangles[:,1],0,height)
        rectangles[:,2] = np.clip(rectangles[:,2],0,width)
        rectangles[:,3] = np.clip(rectangles[:,3],0,height)
        #-----------------------------------------------#
        #   对检测到的人脸进行编码
        #-----------------------------------------------#
        face_encodings = []
        for rectangle in rectangles:
            landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160

            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img,(160,160))

            new_img,_ = utils.Alignment_1(crop_img,landmark)
            new_img = np.expand_dims(new_img,0)

            face_encoding = utils.calc_128_vec(self.facenet_model,new_img)
            face_encodings.append(face_encoding)

        face_names = []
        for face_encoding in face_encodings:
            # 取出一张脸并与数据库中所有的人脸进行对比,计算得分
            matches = utils.compare_faces(self.known_face_encodings, face_encoding, tolerance = 0.6)
            name = "Unknown"
            # 找出距离最近的人脸
            face_distances = utils.face_distance(self.known_face_encodings, face_encoding)
            # 取出这个最近人脸的评分
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = self.known_face_names[best_match_index]
            face_names.append(name)

        rectangles = rectangles[:,0:4]
        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        for (left, top, right, bottom), name in zip(rectangles, face_names):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)
            
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, name, (left , bottom - 15), font, 0.75, (255, 255, 255), 2) 
        return draw
Esempio n. 4
0
    def __init__(self):
        # 创建mtcnn对象
        # 检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5, 0.8, 0.9]

        # 载入facenet
        # 将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary()
        model_path = './model_data/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)

        # -----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        # -----------------------------------------------#
        face_list = os.listdir("face_dataset")
        # 存放编码后的人脸
        self.known_face_encodings = []
        # 存放编码后人脸的名字
        self.known_face_names = []

        for face in face_list:
            name = face.split(".")[0]

            img = cv2.imread("./face_dataset/" + face)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            rectangle = rectangles[0]
            # 记下他们的landmark
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160

            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (160, 160))

            new_img, _ = utils.Alignment_1(crop_img, landmark)

            new_img = np.expand_dims(new_img, 0)
            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model, new_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
Esempio n. 5
0
    def __init__(self):
        #-------------------------#
        #   创建mtcnn的模型
        #   用于检测人脸
        #-------------------------#
        self.mtcnn_model = mtcnn()
        self.threshold = [0.5, 0.6, 0.8]

        #-----------------------------------#
        #   载入facenet
        #   将检测到的人脸转化为128维的向量
        #-----------------------------------#
        self.facenet_model = InceptionResNetV1()
        model_path = './model_data/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")
        self.known_face_encodings = []
        self.known_face_names = []
        for face in face_list:
            name = face.split(".")[0]
            img = cv2.imread("./face_dataset/" + face)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            #---------------------#
            #   检测人脸
            #---------------------#
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)
            #---------------------#
            #   转化成正方形
            #---------------------#
            rectangles = utils.rect2square(np.array(rectangles))
            #-----------------------------------------------#
            #   facenet要传入一个160x160的图片
            #   利用landmark对人脸进行矫正
            #-----------------------------------------------#
            rectangle = rectangles[0]
            landmark = np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])
            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            crop_img, _ = utils.Alignment_1(crop_img, landmark)
            crop_img = np.expand_dims(cv2.resize(crop_img, (160, 160)), 0)
            #--------------------------------------------------------------------#
            #   将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            #--------------------------------------------------------------------#
            face_encoding = utils.calc_128_vec(self.facenet_model, crop_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
Esempio n. 6
0
model_path = './model_data/facenet_keras.h5'
facenet_model.load_weights(model_path)

for rectangle in rectangles:
    if rectangle is not None:
        landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
            [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                        rectangle[1]) * 160

        # y方向的截取和x方向的截取
        crop_img = img[int(rectangle[1]):int(rectangle[3]),
                       int(rectangle[0]):int(rectangle[2])]
        # resize后可以输入facenet中
        crop_img = cv2.resize(crop_img, (160, 160))
        cv2.imshow("before", crop_img)
        new_img, _ = utils.Alignment_1(crop_img, landmark)
        cv2.imshow("two eyes", new_img)

        # std_landmark = np.array([[54.80897114,59.00365493],
        #                         [112.01078961,55.16622207],
        #                         [86.90572522,91.41657571],
        #                         [55.78746897,114.90062758],
        #                         [113.15320624,111.08135986]])
        # crop_img = img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
        # crop_img = cv2.resize(crop_img,(160,160))
        # new_img,_ = utils.Alignment_2(crop_img,std_landmark,landmark)
        # cv2.imshow("affine",new_img)
        new_img = np.expand_dims(new_img, 0)
        feature1 = utils.calc_128_vec(facenet_model, new_img)

cv2.waitKey(0)
Esempio n. 7
0
    #------------------------------#
    rectangles = utils.rect2square(np.array(rectangles))

    #------------------------------#
    #   载入facenet
    #------------------------------#
    model_path = './model_data/facenet_keras.h5'
    facenet_model = InceptionResNetV1()
    facenet_model.load_weights(model_path)

    for rectangle in rectangles:
        #---------------#
        #   截取图像
        #---------------#
        landmark = np.reshape(rectangle[5:15], (5, 2)) - np.array(
            [int(rectangle[0]), int(rectangle[1])])
        crop_img = img[int(rectangle[1]):int(rectangle[3]),
                       int(rectangle[0]):int(rectangle[2])]
        #-----------------------------------------------#
        #   利用人脸关键点进行人脸对齐
        #-----------------------------------------------#
        cv2.imshow("before", cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
        crop_img, _ = utils.Alignment_1(crop_img, landmark)
        cv2.imshow("two eyes", cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))

        crop_img = np.expand_dims(cv2.resize(crop_img, (160, 160)), 0)
        feature1 = utils.calc_128_vec(facenet_model, crop_img)
        print(feature1)

    cv2.waitKey(0)