def recFace(): # 加载模型 counter = 0 for dir_item in os.listdir(path_name): counter += 1 if dir_item.endswith('.face.model.h5'): model = Model() model.load_model(file_path=path_name + dir_item) # 框住人脸的矩形边框颜色 color = (0, 255, 0) # 捕获指定摄像头的实时视频流 cap = cv2.VideoCapture(0) # 循环检测识别人脸 while True: try: ret, frame = cap.read() # 读取一帧视频 # 图像灰化,降低计算复杂度 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier( r"./openCv/opencv/data/haarcascades/" + "haarcascade_frontalface_default.xml") # 利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(70, 70)) for (x, y, w, h) in faceRects: # 截取脸部图像提交给模型识别这是谁 image = frame[y - 10:y + h + 10, x - 10:x + w + 10] faceID = model.face_predict(image) print(faceID) cv2.rectangle(frame, (x, y), (x + h, y + w), color, 2) # 如果是“我” if faceID == 0: # 文字提示是谁 cv2.putText(frame, dir_item.split('.')[0], (x, y + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (115, 233, 86), 2) else: pass cv2.imshow("Detect my face", frame) # 等待10毫秒看是否有按键输入 k = cv2.waitKey(10) # 如果输入q则退出循环 if k & 0xFF == ord('q'): break except BaseException: continue # 释放摄像头并销毁所有窗口 cap.release() cv2.destroyAllWindows() break elif counter == len(os.listdir(path_name)): print('No model has been found, please craft a model first!') break
class Face_recognition(): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def __init__(self): with open('contrast_table', 'r') as f: self.contrast_table = json.loads(f.read()) self.model = Model() # 加载训练数据 self.model.load_model(file_path='F:/bs_data/lfb.h5') # 框住人脸的矩形边框颜色 self.color = (0, 255, 0) # 捕获指定摄像头的实时视频流 self.cap = cv2.VideoCapture(0) # 调用人脸识别器 self.cascade_path = "D:\\opencv\\build\\etc\\haarcascades\\haarcascade_frontalface_alt2.xml" def recongition(self): while True: ret, frame = self.cap.read() # 读取一帧视频 if ret is True: # 图像灰化,降低计算复杂度 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) else: continue # 使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier(self.cascade_path) # 利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32)) if len(faceRects) > 0: for faceRect in faceRects: x, y, w, h = faceRect # 截取脸部图像提交给模型识别这是谁 image = frame[y - 10: y + h + 10, x - 10: x + w + 10] probability, name_number = self.model.face_predict(image) print("name_number:", name_number) name = self.contrast_table[str(name_number)] print('name:', name) cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), self.color, thickness=2) # 标出识别者 # cv2.putText(frame, name, (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) if probability > 0.7: cv2.putText(frame, name, (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) else: cv2.putText(frame, 'unknow', (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) cv2.imshow("face_recognition", frame) # 等待10毫秒看是否有按键输入 k = cv2.waitKey(10) if k & 0xFF == ord('q'): break self.cap.release() cv2.destroyAllWindows()
def upload(): model = Model() model.load_model(file_path='./model/face.model') file = request.files.get('file') f = io.imread(file) probability, name_number = model.face_predict( transform.resize(f, (160, 160, 3))) keyword_list = ['id', 'name', 'time'] path = "log.csv" s = SaveCSV() with open('contrast_table', 'r') as f: contrast_table = json.loads(f.read()) res = {} res['id'] = create_id() res['name'] = contrast_table[str(name_number)] res['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') s.save(keyword_list, path, res) # res = "识别结果:" + contrast_table[str(name_number)] return json.dumps(res, ensure_ascii=False)
continue # 使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier(cascade_path) # 利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32)) if len(faceRects) > 0: for faceRect in faceRects: x, y, w, h = faceRect # 截取脸部图像提交给模型识别这是谁 image = frame[y - 10:y + h + 10, x - 10:x + w + 10] faceID = model.face_predict(image) # 如果是“我” if faceID == 0: cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness=2) # 文字提示是谁 cv2.putText( frame, 'manwanting', (x + 30, y + 30), # 坐标 cv2.FONT_HERSHEY_SIMPLEX, # 字体 1, # 字号
class Face_recognition(): def __init__(self): with open('contrast_table', 'r') as f: self.contrast_table = json.loads(f.read()) self.model = Model() self.model.load_model(file_path='G:\\face\\renlian\\faces.model') # 框住人脸的矩形边框颜色 self.color = (0, 255, 0) # 捕获指定摄像头的实时视频流 self.cap = cv2.VideoCapture(0) # 人脸识别分类器本地存储路径 self.cascade_path = "haarcascade_frontalface_default.xml" def recongition(self): while True: ret, frame = self.cap.read() # 读取一帧视频 if ret is True: # 图像灰化,降低计算复杂度 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) else: continue # 使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier(self.cascade_path) # 利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32)) if len(faceRects) > 0: for faceRect in faceRects: x, y, w, h = faceRect # 截取脸部图像提交给模型识别这是谁 image = frame[y - 10: y + h + 10, x - 10: x + w + 10] probability, name_number = self.model.face_predict(image) print(name_number) name = self.contrast_table[str(name_number)] # print('name_number:', name_number) cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), self.color, thickness=2) # 文字提示是谁 cv2.putText(frame, name, (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) # if probability > 0.7: # cv2.putText(frame, name, (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) # else: # cv2.putText(frame, 'unknow', (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) cv2.imshow("face_recognition", frame) # 等待10毫秒看是否有按键输入 k = cv2.waitKey(10) # 如果输入q则退出循环 if k & 0xFF == 27: break # 释放摄像头并销毁所有窗口 self.cap.release() cv2.destroyAllWindows()
def face_det(self): if len(sys.argv) != 1: print("Usage:%s camera_id\r\n" % (sys.argv[0])) sys.exit(0) #加载模型 model = Model() model.load_model(file_path='./model/aggregate.face.model.h5') # 框住人脸的矩形边框颜色 color = (0, 255, 0) # 人脸识别分类器本地存储路径 cascade_path = r"D:\user\Software\Anaconda\Lib\site-packages\cv2\data\haarcascade_frontalface_alt2.xml" # 循环检测识别人脸 while self.cap.isOpened(): ret, frame = self.cap.read() # 读取一帧视频 #facecolor = frame if ret is True: # 图像灰化,降低计算复杂度 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) else: continue # 使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier(cascade_path) # 利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=2, minSize=(32, 32)) if len(faceRects) > 0: for faceRect in faceRects: x, y, w, h = faceRect # 截取脸部图像提交给模型识别这是谁 image = frame[y:y + h, x:x + w] #(改) faceID = model.face_predict(image) #print(model.) cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness=2) #face_id判断(改) for i in range(len(os.listdir('./data/'))): if i == faceID: #文字提示是谁 cv2.putText( frame, os.listdir('./data/')[faceID], (x + 30, y + 30), # 坐标 cv2.FONT_HERSHEY_SIMPLEX, # 字体 1, # 字号 (255, 0, 255), # 颜色 2) # 字的线宽 else: # 文字提示是谁 #cv2.putText(frame, '陌生人', # (x + 30, y + 30), # 坐标 # cv2.FONT_HERSHEY_SIMPLEX, # 字体 # 1, # 字号 # (255, 0, 255), # 颜色 # 2) # 字的线宽 frame = self.cv2ImgAddText(frame, "陌生人", x + 30, y + 30, (255, 0, 255), 25) #frame = Image.blend(frame, img, 0.3) #cv2.imshow('show', img) show = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) #视频色彩转换回RGB,这样才是现实的颜色 #show = cv2.resize(self.image,(840,680)) showImage = QtGui.QImage( show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888) #把读取到的视频数据变成QImage形式 #hanziimg = QtGui.QImage(img.data,img.shape[1],img.shape[0],QtGui.QImage.Format_RGB888) self.cameraLabel.setPixmap( QtGui.QPixmap.fromImage(showImage)) #往显示视频的Label里 显示QImage #self.cameraLabel.setPixmap(QtGui.QPixmap.fromImage(hanziimg)) #等待10毫秒看是否有按键输入,如果注释掉就无法显示视频 cv2.waitKey(10) # 释放摄像头并销毁所有窗口 self.cap.release() cv2.destroyAllWindows()
from face_train import Model from skimage import io, transform import json if __name__ == '__main__': model = Model() model.load_model(file_path='./model/face.model') img = io.imread("D:/python/workspace/Face1/data/lp/3.jpg") print(img.shape) probability, name_number = model.face_predict(img) with open('contrast_table', 'r') as f: contrast_table = json.loads(f.read()) print(probability) print(contrast_table[str(name_number)])
continue #使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier(cascade_path) #利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32)) if len(faceRects) > 0: for faceRect in faceRects: x, y, w, h = faceRect #截取脸部图像提交给模型识别这是谁 image = frame[y - 10:y + h + 10, x - 10:x + w + 10] result, result2 = model.face_predict(image) #图像灰化,降低计算复杂度 image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) print('result:', result) count = 0 for each in result: for each1 in each: if each1 <= 0.72: count += 1 else: pass if count == length: result2 = -1