def recFace():
    # 加载模型
    counter = 0
    for dir_item in os.listdir(path_name):
        counter += 1
        if dir_item.endswith('.face.model.h5'):
            model = Model()
            model.load_model(file_path=path_name + dir_item)
            # 框住人脸的矩形边框颜色
            color = (0, 255, 0)
            # 捕获指定摄像头的实时视频流
            cap = cv2.VideoCapture(0)
            # 循环检测识别人脸
            while True:
                try:
                    ret, frame = cap.read()  # 读取一帧视频
                    # 图像灰化,降低计算复杂度
                    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    # 使用人脸识别分类器,读入分类器
                    cascade = cv2.CascadeClassifier(
                        r"./openCv/opencv/data/haarcascades/" +
                        "haarcascade_frontalface_default.xml")
                    # 利用分类器识别出哪个区域为人脸
                    faceRects = cascade.detectMultiScale(frame_gray,
                                                         scaleFactor=1.2,
                                                         minNeighbors=3,
                                                         minSize=(70, 70))
                    for (x, y, w, h) in faceRects:
                        # 截取脸部图像提交给模型识别这是谁
                        image = frame[y - 10:y + h + 10, x - 10:x + w + 10]
                        faceID = model.face_predict(image)
                        print(faceID)
                        cv2.rectangle(frame, (x, y), (x + h, y + w), color, 2)
                        # 如果是“我”
                        if faceID == 0:
                            # 文字提示是谁
                            cv2.putText(frame,
                                        dir_item.split('.')[0], (x, y + 1),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                        (115, 233, 86), 2)
                        else:
                            pass
                    cv2.imshow("Detect my face", frame)
                    # 等待10毫秒看是否有按键输入
                    k = cv2.waitKey(10)
                    # 如果输入q则退出循环
                    if k & 0xFF == ord('q'):
                        break
                except BaseException:
                    continue
            # 释放摄像头并销毁所有窗口
            cap.release()
            cv2.destroyAllWindows()
            break
        elif counter == len(os.listdir(path_name)):
            print('No model has been found, please craft a model first!')
            break
Beispiel #2
0
    def __init__(self):
        with open('contrast_table', 'r') as f:
            self.contrast_table = json.loads(f.read())
        self.model = Model()
        self.model.load_model(file_path='G:\\face\\renlian\\faces.model')
        # 框住人脸的矩形边框颜色
        self.color = (0, 255, 0)

        # 捕获指定摄像头的实时视频流
        self.cap = cv2.VideoCapture(0)

        # 人脸识别分类器本地存储路径
        self.cascade_path = "haarcascade_frontalface_default.xml"
    def __init__(self):
        with open('contrast_table', 'r') as f:
            self.contrast_table = json.loads(f.read())
        self.model = Model()
        # 加载训练数据
        self.model.load_model(file_path='F:/bs_data/lfb.h5')
        # 框住人脸的矩形边框颜色
        self.color = (0, 255, 0)

        # 捕获指定摄像头的实时视频流
        self.cap = cv2.VideoCapture(0)
        # 调用人脸识别器
        self.cascade_path = "D:\\opencv\\build\\etc\\haarcascades\\haarcascade_frontalface_alt2.xml"
Beispiel #4
0
    def __init__(self):
        with open('contrast_table', 'r') as f:
            self.contrast_table = json.loads(f.read())
        self.model = Model()
        self.model.load_model(file_path='./model/face.model')
        # 框住人脸的矩形边框颜色
        self.color = (0, 255, 0)

        # 捕获指定摄像头的实时视频流
        self.cap = cv2.VideoCapture(0)

        # 人脸识别分类器本地存储路径
        self.cascade_path = "venv/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml"
Beispiel #5
0
    def run(self):
        user_num = len(os.listdir('./data/'))

        dataset = Dataset('./data/')
        #self.pushButton_2.setText('训练中')
        dataset.load()

        model1 = Model()
        model1.build_model(dataset, nb_classes=user_num)

        # 先前添加的测试build_model()函数的代码
        model1.build_model(dataset, nb_classes=user_num)
        # 测试训练函数的代码
        model1.train(dataset)

        model1.save_model(file_path='./model/aggregate.face.model.h5')
    def train_click(self):
        #if __name__ == '__main__':

        user_num = len(os.listdir('./data/'))

        dataset = Dataset('./data/')
        self.btn2.setText('训练中')
        dataset.load()

        model = Model()
        model.build_model(dataset, nb_classes=user_num)

        # 先前添加的测试build_model()函数的代码
        model.build_model(dataset, nb_classes=user_num)
        # 测试训练函数的代码
        model.train(dataset)

        model.save_model(file_path='./model/aggregate.face.model.h5')
        self.btn2.setText('训练结束')
Beispiel #7
0
def upload():
    model = Model()
    model.load_model(file_path='./model/face.model')
    file = request.files.get('file')
    f = io.imread(file)
    probability, name_number = model.face_predict(
        transform.resize(f, (160, 160, 3)))
    keyword_list = ['id', 'name', 'time']
    path = "log.csv"
    s = SaveCSV()
    with open('contrast_table', 'r') as f:
        contrast_table = json.loads(f.read())
    res = {}
    res['id'] = create_id()
    res['name'] = contrast_table[str(name_number)]
    res['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    s.save(keyword_list, path, res)

    # res = "识别结果:" + contrast_table[str(name_number)]
    return json.dumps(res, ensure_ascii=False)
Beispiel #8
0
# -*- coding: utf-8 -*-

import cv2
import sys
import gc
from face_train import Model

if __name__ == '__main__':
    if len(sys.argv) != 1:
        print("Usage:%s camera_id\r\n" % (sys.argv[0]))
        sys.exit(0)

    # 加载模型
    model = Model()
    model.load_model(file_path='./model/manwanting.face.model.h5')

    # 框住人脸的矩形边框颜色
    color = (0, 255, 0)

    # 捕获指定摄像头的实时视频流
    cap = cv2.VideoCapture(0)

    # 人脸识别分类器本地存储路径
    cascade_path = "D:\\openvc\\haarcascade_frontalface_default.xml"

    # 循环检测识别人脸
    while True:
        ret, frame = cap.read()  # 读取一帧视频

        if ret is True:
Beispiel #9
0
    def face_det(self):

        if len(sys.argv) != 1:
            print("Usage:%s camera_id\r\n" % (sys.argv[0]))
            sys.exit(0)

        #加载模型
        model = Model()
        model.load_model(file_path='./model/aggregate.face.model.h5')

        # 框住人脸的矩形边框颜色
        color = (0, 255, 0)

        # 人脸识别分类器本地存储路径
        cascade_path = r"D:\user\Software\Anaconda\Lib\site-packages\cv2\data\haarcascade_frontalface_alt2.xml"

        # 循环检测识别人脸
        while self.cap.isOpened():
            ret, frame = self.cap.read()  # 读取一帧视频
            #facecolor = frame

            if ret is True:

                # 图像灰化,降低计算复杂度
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            else:
                continue
            # 使用人脸识别分类器,读入分类器
            cascade = cv2.CascadeClassifier(cascade_path)

            # 利用分类器识别出哪个区域为人脸
            faceRects = cascade.detectMultiScale(frame_gray,
                                                 scaleFactor=1.2,
                                                 minNeighbors=2,
                                                 minSize=(32, 32))
            if len(faceRects) > 0:
                for faceRect in faceRects:
                    x, y, w, h = faceRect

                    # 截取脸部图像提交给模型识别这是谁
                    image = frame[y:y + h, x:x + w]  #(改)
                    faceID = model.face_predict(image)
                    #print(model.)

                    cv2.rectangle(frame, (x - 10, y - 10),
                                  (x + w + 10, y + h + 10),
                                  color,
                                  thickness=2)
                    #face_id判断(改)
                    for i in range(len(os.listdir('./data/'))):
                        if i == faceID:
                            #文字提示是谁
                            cv2.putText(
                                frame,
                                os.listdir('./data/')[faceID],
                                (x + 30, y + 30),  # 坐标
                                cv2.FONT_HERSHEY_SIMPLEX,  # 字体
                                1,  # 字号
                                (255, 0, 255),  # 颜色
                                2)  # 字的线宽
                        else:
                            # 文字提示是谁
                            #cv2.putText(frame, '陌生人',
                            #            (x + 30, y + 30),  # 坐标
                            #            cv2.FONT_HERSHEY_SIMPLEX,  # 字体
                            #            1,  # 字号
                            #            (255, 0, 255),  # 颜色
                            #            2)  # 字的线宽
                            frame = self.cv2ImgAddText(frame, "陌生人", x + 30,
                                                       y + 30, (255, 0, 255),
                                                       25)
                            #frame = Image.blend(frame, img, 0.3)
                            #cv2.imshow('show', img)

            show = cv2.cvtColor(frame,
                                cv2.COLOR_BGR2RGB)  #视频色彩转换回RGB,这样才是现实的颜色
            #show = cv2.resize(self.image,(840,680))
            showImage = QtGui.QImage(
                show.data, show.shape[1], show.shape[0],
                QtGui.QImage.Format_RGB888)  #把读取到的视频数据变成QImage形式
            #hanziimg = QtGui.QImage(img.data,img.shape[1],img.shape[0],QtGui.QImage.Format_RGB888)
            self.cameraLabel.setPixmap(
                QtGui.QPixmap.fromImage(showImage))  #往显示视频的Label里 显示QImage
            #self.cameraLabel.setPixmap(QtGui.QPixmap.fromImage(hanziimg))
            #等待10毫秒看是否有按键输入,如果注释掉就无法显示视频
            cv2.waitKey(10)

        # 释放摄像头并销毁所有窗口
        self.cap.release()
        cv2.destroyAllWindows()