def createClassifer(id_list: list): faces, labels = testGatherTrainingData(id_list) print("Total faces: ", len(faces)) print("Total labels: ", len(labels), labels) face_recognizer = face.LBPHFaceRecognizer_create() face_recognizer.train(np.array(faces), np.array(labels)) return face_recognizer
def showVideo(): url = "https://tianfeng-510finalproject.appspot.com/form" webbrowser.open_new_tab(url) haar_face_cascade = cv2.CascadeClassifier( 'data/haarcascade_frontalface_alt.xml') recognizer = face.LBPHFaceRecognizer_create() recognizer.read("trainner.yml") lables = {"person_name": 1} with open('lables.pickle', 'rb') as f: og_lables = pickle.load(f) lables = {v: k for k, v in og_lables.items()} cap = cv2.VideoCapture(0) cap.set(3, 320) cap.set(4, 240) cv2.namedWindow('frame', cv2.WINDOW_NORMAL) cv2.resizeWindow('frame', (600, 600)) while (True): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = haar_face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5) for (x, y, w, h) in faces: # print(x , y, w, h) roi_gray = gray[y:y + h, x:x + w] roi_color = frame[y:y + h, x:x + w] id_, conf = recognizer.predict(roi_gray) if conf >= 45 and conf <= 85: name = lables[id_] cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA) img_item = "face/my-image.png" cv2.imwrite(img_item, roi_gray) color = (255, 0, 0) stroke = 1 cv2.rectangle(frame, (x, y), (x + w, y + h), color, stroke) new_data = {"Name": str(lables[id_]), "X": int(x), "Y": int(y)} print(new_data) db.update(new_data) text = "Faces found:" + str(len(faces)) cv2.putText(frame, text, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1) cv2.imshow('frame', frame) if cv2.waitKey(5) & 0xFF == ord('q'): break # When everything is done, release the capture cap.release() cv2.destroyAllWindows()
def face_rec(): names = ['BAO BAO ', 'BA BA BA ', 'Bei Bei', 'MMMMMMM'] # 标签对应的名字0,baobao,1,bbb,2,beibei.... [x, y] = read_img("D:") # 调读取函数,返回图像、和标签列表 y = np.asarray(y, dtype=np.int32) # 转为NUMPY的ARRAY # CV自带的三种算法,现用LBPH算法。此处有坑,坑的我差点放弃,原来叫createLBPHFaceRecognizer,为什么我下载的这模样 # model=fc.EigenFaceRecognizer_create() # model = fc.FisherFaceRecognizer_create() model = fc.LBPHFaceRecognizer_create() # 训练,此处应把训练结果保存,再用到时直接读取结果,效率更高,xml?json?pickle? model.train(np.asarray(x), np.asarray(y)) # 下面读取摄像头图像,用矩形标识检测到脸部和训练后结果比对,打印出对应标签所对应名字 camera = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier('D:\pythonweb\face_test/haarcascade_frontalface_default.xml') while True: read, img = camera.read() faces = face_cascade.detectMultiScale(img, 1.3, 5) for (x, y, w, h) in faces: img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) try: roi = cv2.resize(roi, (200, 200), interpolation=cv2.INTER_LINEAR) params = model.predict(roi) # predict()函数做比对,返回一个元祖格式值 (标签,系数)。系数和算法有关, # 前2种算法值低于5000不可靠,LBPH低于50可靠,80-90不可靠,高于90纯蒙 # 此处有文章可做,通过单位时间内检测到的系数平均值,可以得到更准确结果 print(params) # 打印标签对应名字,如cvtopil的灰度问题解决,可cvtopil函数替换 cv2.putText(img, names[params[0]], (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) except: continue cv2.imshow("abc", img) if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyAllWindows()
import cv2 import cv2.face as fc recongnizer = fc.LBPHFaceRecognizer_create() recongnizer.read('FaceTrainer/trainer.yml') cascadePath = 'Cascade/haarcascade_frontalface_default.xml' faceCascades = cv2.CascadeClassifier(cascadePath) font = cv2.FONT_HERSHEY_SIMPLEX cam = cv2.VideoCapture(0) minW = 0.1 * cam.get(3) minH = 0.1 * cam.get(4) id = 0 names = ['lhz', '哈哈哈'] while True: ret, img = cam.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = faceCascades.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH))) for (x, y, w, h) in faces: # 比对数据 id, confidence = recongnizer.predict(gray[y:y + h, x:x + w])
import multiprocessing import numpy as np from cv2 import face # OpenCV # My imports. import extraction_model as exmodel from sort_database.utils import EMOTIONS_5, EMOTIONS_8 # Start the script. script_name = os.path.basename(__file__) # The name of this script print("\n{}: Beginning face recogniser tests...\n".format(script_name)) start = time.clock() # Start of the speed test. clock() is most accurate. fisherface = face.FisherFaceRecognizer_create() # Fisherface classifier eigenface = face.EigenFaceRecognizer_create() # Eigenface classifier lbph = face.LBPHFaceRecognizer_create() # Local Binary Patterns classifier def run_fisher_recognizer(X_train, y_train, X_test, y_test): """Train the fisherface classifier.""" print("\n***> Training fisherface classifier") print("Size of the training set is {} images.".format(len(y_train))) fisherface.train(X_train, np.array(y_train)) print("Predicting classification set.") cnt = 0 correct = 0 incorrect = 0 for image in X_test: pred, conf = fisherface.predict(image)
import cv2 from cv2 import face import pickle import numpy as np from PIL import Image # whereever the file is saved # looking for the path of it BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # path of the images image_dir = os.path.join(BASE_DIR, "images") face_cascades = cv2.CascadeClassifier( "cascades/data/haarcascade_frontalface_alt2.xml") # face recognizer: recognizer = face.LBPHFaceRecognizer_create() current_id = 0 label_ids = {} y_labels = [] x_train = [] for root, dir, files in os.walk(image_dir): for file in files: if file.endswith("jpg") or file.endswith("png") or file.endswith( "jpeg"): path = os.path.join(root, file) label = os.path.basename(os.path.dirname(path)).replace( " ", "-").lower() #print(label, path) # add the label into label_ids dic if it's not there already