예제 #1
0
    def recognize(self, draw):
        #-----------------------------------------------#
        #   人脸识别
        #   先定位,再进行数据库匹配
        #-----------------------------------------------#
        height, width, _ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)

        # 检测人脸
        rectangles = self.mtcnn_model.detectFace(draw_rgb, self.threshold)

        if len(rectangles) == 0:
            return

        # 转化成正方形
        rectangles = utils.rect2square(np.array(rectangles, dtype=np.int32))
        rectangles[:, 0] = np.clip(rectangles[:, 0], 0, width)
        rectangles[:, 1] = np.clip(rectangles[:, 1], 0, height)
        rectangles[:, 2] = np.clip(rectangles[:, 2], 0, width)
        rectangles[:, 3] = np.clip(rectangles[:, 3], 0, height)
        #-----------------------------------------------#
        #   对检测到的人脸进行编码
        #-----------------------------------------------#
        face_encodings = []
        for rectangle in rectangles:
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160

            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]),
                                int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (160, 160))

            new_img, _ = utils.Alignment_1(crop_img, landmark)

            new_img = (new_img.reshape(1, 160, 160, 3))
            new_img = np.array(new_img) / 255.0

            pred = model.predict(new_img)
            face_encoding = np.argmax(pred, axis=1)
            face_encodings.append(face_encoding)

        rectangles = rectangles[:, 0:4]
        glasses_list = ('Glasses', 'No_glasses')

        face_names = []
        for i in face_encodings:
            glasses_list = glasses_list[int(face_encoding)]
            face_names.append(glasses_list)

        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        for (left, top, right, bottom), name in zip(rectangles, face_names):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)

            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, name, (left, bottom - 15), font, 0.75,
                        (255, 255, 255), 2)
        return draw
    def __init__(self):
        # 创建mtcnn对象用于检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5, 0.8, 0.9]

        # 载入facenet用于将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary() #模型summary信息
        model_path = './model_data/facenet_keras.h5'  # 模型文件
        self.facenet_model.load_weights(model_path)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #   face_dataset文件中的图片为人脸数据库
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")  # 列出文件夹中的人脸图片
        self.known_face_encodings = []  # 存储编码后的人脸
        self.known_face_names = []  # 人脸的名字及图片的名字

        # 遍历人脸
        for face in face_list:
            # 图片文件名
            name = face.split(".")[0]
            # 读取图片是BGR格式数据
            img = cv2.imread("./face_dataset/" + face)
            # 颜色空间转换
            # cv2.COLOR_BGR2RGB 将BGR格式转换成RGB格式
            # cv2.COLOR_BGR2GRAY 将BGR格式转换成灰度图片
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测图版中的人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 长方形转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            rectangle = rectangles[0]
            # 记下他们的landmark
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160

            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            # Resize to 160x160
            crop_img = cv2.resize(crop_img, (160, 160))
            new_img, _ = utils.Alignment_1(crop_img, landmark)
            new_img = np.expand_dims(new_img, 0)

            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model, new_img)
            # print(face_encoding)
            # 记录128维护特征向量到数组
            self.known_face_encodings.append(face_encoding)
            # 记录文件名到数组
            self.known_face_names.append(name)
    def __init__(self):
        # 创建mtcnn对象,建立一个mtcnn模型
        # 检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5, 0.8, 0.9]

        # 载入facenet模型
        # 将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary()
        model_path = './model_data/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")

        self.known_face_encodings = []

        self.known_face_names = []

        # 所有的人脸进行遍历,得到仓库里所有人脸的128维特征向量和对应名称
        for face in face_list:
            # obama.jpg进行'.'分割,取前一段字符串
            name = face.split(".")[0]
            # 人脸读取出来
            img = cv2.imread("./face_dataset/" + face)
            # 图像由 BGR -> RGB
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            # 进行读取
            rectangle = rectangles[0]
            # 记下他们的landmark,5个标记点,进行人脸对齐
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160
            #人脸截取
            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (160, 160))
            # 进行对齐操作
            new_img, _ = utils.Alignment_1(crop_img, landmark)
            # 增加一个维度
            new_img = np.expand_dims(new_img, 0)
            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model, new_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
예제 #4
0
    def recognize(self,draw):
        #-----------------------------------------------#
        #   人脸识别
        #   先定位,再进行数据库匹配
        #-----------------------------------------------#
        height,width,_ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw,cv2.COLOR_BGR2RGB)

        # 检测人脸
        rectangles = self.mtcnn_model.detectFace(draw_rgb, self.threshold)

        if len(rectangles)==0:
            return

        # 转化成正方形
        rectangles = utils.rect2square(np.array(rectangles,dtype=np.int32))
        rectangles[:,0] = np.clip(rectangles[:,0],0,width)
        rectangles[:,1] = np.clip(rectangles[:,1],0,height)
        rectangles[:,2] = np.clip(rectangles[:,2],0,width)
        rectangles[:,3] = np.clip(rectangles[:,3],0,height)
        #-----------------------------------------------#
        #   对检测到的人脸进行编码
        #-----------------------------------------------#
        face_encodings = []
        for rectangle in rectangles:
            landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160

            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img,(160,160))

            new_img,_ = utils.Alignment_1(crop_img,landmark)
            new_img = np.expand_dims(new_img,0)

            face_encoding = utils.calc_128_vec(self.facenet_model,new_img)
            face_encodings.append(face_encoding)

        face_names = []
        for face_encoding in face_encodings:
            # 取出一张脸并与数据库中所有的人脸进行对比,计算得分
            matches = utils.compare_faces(self.known_face_encodings, face_encoding, tolerance = 0.6)
            name = "Unknown"
            # 找出距离最近的人脸
            face_distances = utils.face_distance(self.known_face_encodings, face_encoding)
            # 取出这个最近人脸的评分
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = self.known_face_names[best_match_index]
            face_names.append(name)

        rectangles = rectangles[:,0:4]
        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        for (left, top, right, bottom), name in zip(rectangles, face_names):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)
            
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, name, (left , bottom - 15), font, 0.75, (255, 255, 255), 2) 
        return draw
예제 #5
0
    def __init__(self):
        # 创建mtcnn对象
        # 检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5, 0.8, 0.9]

        # 载入facenet
        # 将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary()
        model_path = './model_data/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)

        # -----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        # -----------------------------------------------#
        face_list = os.listdir("face_dataset")
        # 存放编码后的人脸
        self.known_face_encodings = []
        # 存放编码后人脸的名字
        self.known_face_names = []

        for face in face_list:
            name = face.split(".")[0]

            img = cv2.imread("./face_dataset/" + face)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            rectangle = rectangles[0]
            # 记下他们的landmark
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160

            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (160, 160))

            new_img, _ = utils.Alignment_1(crop_img, landmark)

            new_img = np.expand_dims(new_img, 0)
            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model, new_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
예제 #6
0
    def __init__(self):
        #-------------------------#
        #   创建mtcnn的模型
        #   用于检测人脸
        #-------------------------#
        self.mtcnn_model = mtcnn()
        self.threshold = [0.5, 0.6, 0.8]

        #-----------------------------------#
        #   载入facenet
        #   将检测到的人脸转化为128维的向量
        #-----------------------------------#
        self.facenet_model = facenet()

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #-----------------------------------------------#
        face_list = os.listdir('face_dataset')
        # print(face_list)
        self.known_face_encodings = []
        self.known_face_names = []
        for face in face_list:
            name = face.split('.')[0]
            img = cv2.imread('./face_dataset/' + face)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            #---------------------#
            #   检测人脸
            #---------------------#
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)
            #---------------------#
            #   转化成正方形
            #---------------------#
            rectangles = utils.rect2square(np.array(rectangles))
            #-----------------------------------------------#
            #   facenet要传入一个160x160的图片
            #   利用landmark对人脸进行矫正
            #-----------------------------------------------#
            rectangle = rectangles[0]
            landmark = np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])
            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                           int(rectangle[0]):int(rectangle[2])]
            crop_img, _ = utils.Alignment_1(crop_img, landmark)
            # cv2.imwrite(name+'.jpg', crop_img)
            # print(crop_img.shape)
            crop_img = np.expand_dims(cv2.resize(crop_img, (160, 160)), 0)
            #--------------------------------------------------------------------#
            #   将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            #--------------------------------------------------------------------#
            face_encoding = self.facenet_model.calc_128_vec(crop_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
    def recognize(self,draw):
        #-----------------------------------------------#
        #   人脸识别
        #   先定位,再进行数据库匹配
        #-----------------------------------------------#
        height,width,_ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw,cv2.COLOR_BGR2RGB)

        # 检测人脸
        rectangles = self.mtcnn_model.detectFace(draw_rgb, self.threshold)
        if len(rectangles)==0:
            return

        rectangles = np.array(rectangles,dtype=np.int32)
        rectangles[:,0] = np.clip(rectangles[:,0],0,width)
        rectangles[:,1] = np.clip(rectangles[:,1],0,height)
        rectangles[:,2] = np.clip(rectangles[:,2],0,width)
        rectangles[:,3] = np.clip(rectangles[:,3],0,height)
        
        rectangles_temp = utils.rect2square(np.array(rectangles,dtype=np.int32))
        rectangles_temp[:,0] = np.clip(rectangles_temp[:,0],0,width)
        rectangles_temp[:,1] = np.clip(rectangles_temp[:,1],0,height)
        rectangles_temp[:,2] = np.clip(rectangles_temp[:,2],0,width)
        rectangles_temp[:,3] = np.clip(rectangles_temp[:,3],0,height)
        # 转化成正方形
        #-----------------------------------------------#
        #   对检测到的人脸进行编码
        #-----------------------------------------------#
        classes_all = []
        for rectangle in rectangles_temp:
            
            # 获取landmark在小图中的坐标
            landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160
            # 截取图像
            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img,(self.Crop_HEIGHT,self.Crop_WIDTH))
            # 对齐
            new_img,_ = utils.Alignment_1(crop_img,landmark)
            # 归一化
            new_img = preprocess_input(np.reshape(np.array(new_img,np.float64),[1,self.Crop_HEIGHT,self.Crop_WIDTH,3]))
            
            classes = self.class_names[np.argmax(self.mask_model.predict(new_img)[0])]
            classes_all.append(classes)

        rectangles = rectangles[:,0:4]
        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        for (left, top, right, bottom), c in zip(rectangles,classes_all):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, c, (left , bottom - 15), font, 0.75, (255, 255, 255), 2)  
        return draw
예제 #8
0
def make_database():
    # prepare models
    retinaface_model = insightface.model_zoo.get_model('retinaface_mnet025_v1')
    retinaface_model.prepare(ctx_id=-1, nms=0.4)
    arcface_model = insightface.model_zoo.get_model('arcface_r100_v1')
    arcface_model.prepare(ctx_id=-1)
    
    face_list = getFilePathList('./face_dataset/casia')
    face_list.sort()
    known_face_encodings = []
    known_face_names = []
    timea = datetime.datetime.now()
    for face in face_list:
        name = face.split(os.sep)[-2]
        # print(name)
        # print(type(name))
        image_path = os.path.join('./face_dataset/casia', name, name + '_0.bmp') # 取第一张图片作数据库
        if(face != image_path): 
            continue
        print(image_path)
        img = cv2.imread(image_path)
        # print(type(img)) 
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        rectangle, landmark = retinaface_model.detect(img, threshold=0.5, scale=1.0)
        rectangle = utils.rect2square(np.array(rectangle))
        crop_img = img[int(rectangle[0, 1]):int(rectangle[0, 3]), int(rectangle[0, 0]):int(rectangle[0, 2])]
        crop_img = cv2.resize(crop_img, (112, 112))
        new_img, _ = utils.Alignment_1(crop_img, landmark[0])
        face_encoding = arcface_model.get_embedding(new_img)
        known_face_encodings.append(face_encoding)
        known_face_names.append(name)
    face_num = len(known_face_names)
    known_face_encodings = np.array(known_face_encodings).reshape(face_num, 512)
    np.save('faceEmbedding_casia', known_face_encodings)
    np.save('name_casia', known_face_names)
    timeb = datetime.datetime.now()
    diff = timeb - timea
    print("Building database:", diff.total_seconds(), 'seconds')    
예제 #9
0
    def recognize(self, draw):
        #-----------------------------------------------#
        #   人脸识别
        #   先定位,再进行数据库匹配
        #-----------------------------------------------#
        height, width, _ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)

        # 检测人脸
        time_0 = datetime.datetime.now()
        rectangles, landmarks = self.retinaface_model.detect(draw_rgb,
                                                             threshold=0.5,
                                                             scale=1.0)

        if len(rectangles) == 0:
            return

        # 转化成正方形
        rectangles = utils.rect2square(np.array(rectangles, dtype=np.int32))
        # rectangles[:,0] = np.clip(rectangles[:,0],0,width)
        # rectangles[:,1] = np.clip(rectangles[:,1],0,height)
        # rectangles[:,2] = np.clip(rectangles[:,2],0,width)
        # rectangles[:,3] = np.clip(rectangles[:,3],0,height)
        time_now = datetime.datetime.now()
        detect_time = time_now - time_0
        print('Detection Time:', detect_time.total_seconds(), 'seconds')
        #-----------------------------------------------#
        #   对检测到的人脸进行编码
        #-----------------------------------------------#
        time_0 = datetime.datetime.now()
        face_encodings = []
        for rectangle, landmark in zip(rectangles, landmarks):
            # landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*112

            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]),
                                int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (112, 112))

            new_img, _ = utils.Alignment_1(crop_img, landmark)
            # new_img = np.expand_dims(new_img,0)

            # face_encoding = utils.calc_128_vec(self.facenet_model,new_img)
            face_encoding = self.arcface_model.get_embedding(new_img)
            face_encodings.append(face_encoding)
        # print(np.linalg.norm(face_encodings, axis=1))

        face_names = []
        for face_encoding in face_encodings:
            # 取出一张脸并与数据库中所有的人脸进行对比,计算得分
            # print(type(face_encoding))
            matches = utils.compare_faces_1(self.known_face_encodings,
                                            face_encoding,
                                            tolerance=20)
            # print(matches)
            name = "Unknown"
            # 找出距离最近的人脸
            face_distances = utils.face_distance_1(self.known_face_encodings,
                                                   face_encoding)
            # print(face_distances)
            # 取出这个最近人脸的评分
            best_match_index = np.argmin(face_distances)
            # print(best_match_index)
            if matches[best_match_index]:
                name = self.known_face_names[best_match_index]
            face_names.append(name)
        time_now = datetime.datetime.now()
        rec_time = time_now - time_0
        print('Recognition Time:', rec_time.total_seconds(), 'seconds')

        rectangles = rectangles[:, 0:4]
        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        for (left, top, right, bottom), name in zip(rectangles, face_names):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)
            # print(1)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, name, (left, bottom - 15), font, 0.5,
                        (255, 255, 255), 2)
        return draw
예제 #10
0
    def __init__(self):
        # 创建mtcnn对象
        # 检测图片中的人脸
        self.retinaface_model = insightface.model_zoo.get_model(
            'retinaface_mnet025_v1')
        self.retinaface_model.prepare(ctx_id=-1, nms=0.4)
        # 门限函数
        # self.threshold = [0.5,0.8,0.9]

        # 载入arcface
        # 将检测到的人脸转化为512维的向量
        self.arcface_model = insightface.model_zoo.get_model('arcface_r100_v1')
        self.arcface_model.prepare(ctx_id=-1)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")
        # print(face_list)

        self.known_face_encodings = []

        self.known_face_names = []

        timea = datetime.datetime.now()
        for face in face_list:
            name = face.split(".")[0]

            img = cv2.imread("./face_dataset/" + face)
            # print(type(img))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # 检测人脸
            # rectangles = self.mtcnn_model.detectFace(img, self.threshold)
            rectangle, landmark = self.retinaface_model.detect(img,
                                                               threshold=0.5,
                                                               scale=1.0)

            # 转化成正方形
            rectangle = utils.rect2square(np.array(rectangle))
            # arcface要传入一个112x112的图片
            # rectangle = rectangles[0]
            # 记下他们的landmark
            # landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*112

            # print(rectangle.shape)
            # print(landmark.shape)
            crop_img = img[int(rectangle[0, 1]):int(rectangle[0, 3]),
                           int(rectangle[0, 0]):int(rectangle[0, 2])]
            crop_img = cv2.resize(crop_img, (112, 112))
            # print(crop_img.shape[0:2])

            new_img, _ = utils.Alignment_1(crop_img, landmark[0])
            # print(new_img.shape[0:2])
            # new_img = np.expand_dims(new_img,0)
            # print(new_img.shape[0:2])
            # 将检测到的人脸传入到arcface的模型中,实现512维特征向量的提取
            # face_encoding = utils.calc_128_vec(self.facenet_model,new_img)
            face_encoding = self.arcface_model.get_embedding(new_img)
            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)
        self.known_face_encodings = np.array(
            self.known_face_encodings).reshape(6, 512)
        timeb = datetime.datetime.now()
        diff = timeb - timea
        print("Building database:", diff.total_seconds(), 'seconds')
예제 #11
0
model_path = './model_data/facenet_keras.h5'
facenet_model.load_weights(model_path)

for rectangle in rectangles:
    if rectangle is not None:
        landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
            [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                        rectangle[1]) * 160

        # y方向的截取和x方向的截取
        crop_img = img[int(rectangle[1]):int(rectangle[3]),
                       int(rectangle[0]):int(rectangle[2])]
        # resize后可以输入facenet中
        crop_img = cv2.resize(crop_img, (160, 160))
        cv2.imshow("before", crop_img)
        new_img, _ = utils.Alignment_1(crop_img, landmark)
        cv2.imshow("two eyes", new_img)

        # std_landmark = np.array([[54.80897114,59.00365493],
        #                         [112.01078961,55.16622207],
        #                         [86.90572522,91.41657571],
        #                         [55.78746897,114.90062758],
        #                         [113.15320624,111.08135986]])
        # crop_img = img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
        # crop_img = cv2.resize(crop_img,(160,160))
        # new_img,_ = utils.Alignment_2(crop_img,std_landmark,landmark)
        # cv2.imshow("affine",new_img)
        new_img = np.expand_dims(new_img, 0)
        feature1 = utils.calc_128_vec(facenet_model, new_img)

cv2.waitKey(0)
예제 #12
0
def read__image(path_name):
    num = 1000
    for dir_image in os.listdir(
            path_name):  # os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表
        full_path = os.path.abspath(os.path.join(path_name, dir_image))

        if os.path.isdir(full_path):  # 如果是文件夹,继续递归调用
            read__image(full_path)
        else:  # 如果是文件了
            if dir_image.endswith('.jpg'):
                img = cv2.imread(full_path)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                print(full_path)
                # 检测人脸
                rectangles = mtcnn_model.detectFace(img, threshold)

                if rectangles != []:
                    # 转化成正方形
                    rectangles = utils.rect2square(np.array(rectangles))

                    for rectangle in rectangles:
                        if rectangle is not None:
                            landmark = (np.reshape(rectangle[5:15],
                                                   (5, 2)) - np.array([
                                                       int(rectangle[0]),
                                                       int(rectangle[1])
                                                   ])) / (rectangle[3] -
                                                          rectangle[1]) * 160

                            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                                           int(rectangle[0]):int(rectangle[2])]

                            crop_img = cv2.resize(crop_img, (160, 160))
                            # cv2.imshow("before",crop_img)
                            new_img, _ = utils.Alignment_1(crop_img, landmark)
                            # cv2.imshow("two eyes",new_img)
                            cv2.imwrite(
                                'C:\\Users\\yu guo long\\PycharmProjects\\face\\mtcnn\\mtcnn-keras-master\\data\\1\\'
                                + '%d.jpg' % (num), new_img)
                            num += 1

            if dir_image.endswith('.jpeg'):
                img = cv2.imread(full_path)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                print(full_path)
                rectangles = mtcnn_model.detectFace(img, threshold)

                if rectangles != []:
                    # 转化成正方形
                    rectangles = utils.rect2square(np.array(rectangles))

                    for rectangle in rectangles:
                        if rectangle is not None:
                            landmark = (np.reshape(rectangle[5:15],
                                                   (5, 2)) - np.array([
                                                       int(rectangle[0]),
                                                       int(rectangle[1])
                                                   ])) / (rectangle[3] -
                                                          rectangle[1]) * 160

                            crop_img = img[int(rectangle[1]):int(rectangle[3]),
                                           int(rectangle[0]):int(rectangle[2])]

                            crop_img = cv2.resize(crop_img, (160, 160))
                            # cv2.imshow("before", crop_img)
                            new_img, _ = utils.Alignment_1(crop_img, landmark)
                            # cv2.imshow("two eyes", new_img)

                            # cv2.waitKey(0)
                            cv2.imwrite(
                                'C:\\Users\\yu guo long\\PycharmProjects\\face\\mtcnn\\keras-face-recognition-master\\data\\1\\'
                                + '%d.jpg' % (num), new_img)
                            num += 1
예제 #13
0
    def recognize(self, draw):
        height, width, _ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
        time_0 = datetime.datetime.now()
        rectangles, landmarks = self.retinaface_model.detect(draw_rgb,
                                                             threshold=0.5,
                                                             scale=1.0)
        if len(rectangles) == 0:
            return
        # 转化成正方形
        rectangles = utils.rect2square(np.array(rectangles, dtype=np.int32))

        time_now = datetime.datetime.now()
        detect_time = time_now - time_0
        print('Detection Time:', detect_time.total_seconds(), 'seconds')
        # -----------------------------------------------#
        #   对检测到的人脸进行编码
        # -----------------------------------------------#
        time_0 = datetime.datetime.now()
        face_encodings = []
        for rectangle, landmark in zip(rectangles, landmarks):
            # landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*112

            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]),
                                int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img, (112, 112))

            new_img, _ = utils.Alignment_1(crop_img, landmark)
            # new_img = np.expand_dims(new_img,0)

            face_encoding = self.arcface_model.get_embedding(new_img)
            face_encodings.append(face_encoding)

        face_names = []
        for face_encoding in face_encodings:
            #print(face_encoding.shape)
            name = 'Unknown'
            # Query the elements for themselves
            names, distances = self.p.knn_query(face_encoding,
                                                k=1)  # 返回的距离是 1 - cosine
            # print(distances)
            matches = list(1 - distances >= 0.45)
            best_match_index = np.argmax(distances)
            if matches[best_match_index]:
                name = str(names[best_match_index][0])  # names 多了个维度
            face_names.append(name)

        time_now = datetime.datetime.now()
        rec_time = time_now - time_0
        print('Recognition Time:', rec_time.total_seconds(), 'seconds')

        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        rectangles = rectangles[:, 0:4]
        for (left, top, right, bottom), name in zip(rectangles, face_names):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 1)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, name, (left, bottom - 15), font, 0.5,
                        (0, 0, 255), 1)
        return draw
예제 #14
0
    def recognize(self, draw):
        #-----------------------------------------------#
        #   recognise the face
        #   Position the face the match in database
        #-----------------------------------------------#
        height, width, _ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)

        # recognise the face
        rectangles = self.mtcnn_model.detectFace(draw_rgb, self.threshold)
        if len(rectangles) == 0:
            return

        rectangles = np.array(rectangles, dtype=np.int32)
        rectangles[:, 0] = np.clip(rectangles[:, 0], 0, width)
        rectangles[:, 1] = np.clip(rectangles[:, 1], 0, height)
        rectangles[:, 2] = np.clip(rectangles[:, 2], 0, width)
        rectangles[:, 3] = np.clip(rectangles[:, 3], 0, height)

        rectangles_temp = utils.rect2square(
            np.array(rectangles, dtype=np.int32))
        rectangles_temp[:, 0] = np.clip(rectangles_temp[:, 0], 0, width)
        rectangles_temp[:, 1] = np.clip(rectangles_temp[:, 1], 0, height)
        rectangles_temp[:, 2] = np.clip(rectangles_temp[:, 2], 0, width)
        rectangles_temp[:, 3] = np.clip(rectangles_temp[:, 3], 0, height)
        # reshape to square
        #-----------------------------------------------#
        #   encode the face
        #-----------------------------------------------#
        classes_all = []
        for rectangle in rectangles_temp:

            # get the axis in landmark
            landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                [int(rectangle[0]), int(rectangle[1])])) / (rectangle[3] -
                                                            rectangle[1]) * 160
            # Intercept the image
            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]),
                                int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img,
                                  (self.Crop_HEIGHT, self.Crop_WIDTH))
            # Oppose
            new_img, _ = utils.Alignment_1(crop_img, landmark)
            # Normalization
            new_img = preprocess_input(
                np.reshape(np.array(new_img, np.float64),
                           [1, self.Crop_HEIGHT, self.Crop_WIDTH, 3]))

            classes = self.class_names[np.argmax(
                self.mask_model.predict(new_img)[0])]
            classes_all.append(classes)

        rectangles = rectangles[:, 0:4]
        #-----------------------------------------------#
        #   draw the square
        #-----------------------------------------------#
        for (left, top, right, bottom), c in zip(rectangles, classes_all):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, c, (left, bottom - 15), font, 0.75,
                        (255, 255, 255), 2)
        return draw