def face(): speak('Processing Facial Recognition') test_img = cv2.imread('saved/{}.jpg'.format(always)) faces_detected, gray_img = fr.faceDetection(test_img) speak("I found your data from our system") faces, faceID = fr.labels_for_training_data('test') face_recognizer = fr.train_classifier(faces, faceID) face_recognizer.write('trainingData.yml') users_list() for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict( roi_gray) #predicting the label of given image print("Confidence:", confidence) print("label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] if ( confidence > 37 ): #If confidence more than 37 then don't print predicted face text on screen welcome() break fr.put_text(test_img, predicted_name, x, y) resized_img = cv2.resize(test_img, (1000, 1000)) cv2.imshow("Face", resized_img) cv2.waitKey(0) #Waits indefinitely until a key is pressed cv2.destroyAllWindows
def checkIn(name): global db print("About to take a Picture") path = "/home/pi/Downloads/" + name + ".jpg" with picamera.PiCamera() as camera: camera.resolution = (1280, 720) camera.capture(path) print("Picture taken.") test_img = cv2.imread("/home/pi/Desktop/Face Recognition/TestImages/" + name + ".jpg") faces_detected, gray_img = fr.faceDetection(test_img) print("faces detected: ", faces_detected) #use for testing purpose:- face_recognizer = cv2.createLBPHFaceRecognizer() face_recognizer.load('/home/pi/Desktop/Project3/trainingData.yml') predicted_name = "" for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict(roi_gray) print("confidence: ", confidence) print("label: ", label) fr.draw_rect(test_img, face) predicted_name = namesAndPath[label] fr.put_text(test_img, predicted_name, x, y) print(predicted_name + " You are checked-in successfully") db[predicted_name] = datetime.datetime.utcnow() return
def labeltrans(x1, x2): x1 = int(x1) ctr = 0 nctr = 0 face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('trainingData.yml') name = { 0: "Aakarsh Anubhav", 1: "Soham Samanta", 2: "Kondala Snehasis Rao", 3: "Apurva Shahabadi", 4: "Shivam Singh", 5: "Kunwar Pratyush", 6: "Harsh Anand", 7: "Ch. Harika", 8: "Maasid Zafar" } cap = cv2.VideoCapture(0) while ctr <= 21: ret, test_img = cap.read() faces_detected, gray_img = fr.faceDetection(test_img) for (x, y, w, h) in faces_detected: cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0), 2) for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + w, x:x + h] label, confidence = face_recognizer.predict(roi_gray) #print("label:",label) fr.draw_rect(test_img, face) predicted_name = name[label] if confidence < 72: print(x1, " ", ctr, " ", nctr, " ", label, " ", confidence, " ", predicted_name, " ", x2) fr.put_text(test_img, predicted_name, x, y) if x1 == label and x2 == predicted_name: ctr = ctr + 1 if ctr == 10: messagebox.showinfo('AKKI', 'Operation Successful') cap.release() cv2.destroyAllWindows() return ctr if x1 != label or x2 != predicted_name: nctr = nctr + 1 if nctr == 10: messagebox.showinfo('Akki', 'Operation unsucessful') cap.release() cv2.destroyAllWindows() return 0 cv2.imshow('Cyber Ninja System', test_img) k = cv2.waitKey(30) & 0xff if k == 27 or ctr == 20: break cap.release() cv2.destroyAllWindows() return
def attendance(): face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('trainingData.yml') #Load saved training data students = Student.query.all() name = Counter() i = 0 for ele in students: ele.status = 0 name[i] = ele i += 1 db.session.commit() cap = cv2.VideoCapture(0) start = time.time() while i and name: ret, test_img = cap.read( ) # captures frame and returns boolean value and captured image faces_detected, gray_img = fr.faceDetection(test_img) for (x, y, w, h) in faces_detected: cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7) resized_img = cv2.resize(test_img, (1000, 700)) cv2.imshow('face detection Tutorial ', resized_img) cv2.waitKey(10) for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + w, x:x + h] label, confidence = face_recognizer.predict( roi_gray) #predicting the label of given image print("confidence:" + str(confidence)) print("label:" + str(label)) fr.draw_rect(test_img, face) a = Student.query.filter_by(roll=label).first() if confidence < 39 and a.status == 0: a.status = 1 db.session.commit() #winsound.Beep(2500, 1200) duration = 1 # seconds freq = 440 # Hz os.system('play -nq -t alsa synth {} sine {}'.format( duration, freq)) i -= 1 fr.put_text(test_img, str(a), x, y) end = time.time() if cv2.waitKey(30) & 0xFF == ord('q'): break if int(end - start) >= 180: break cap.release() cv2.destroyAllWindows() return redirect(url_for('result'))
def train(): # read time table and class names read_tt.read_tt_and_names() # making excel template excel_template.make_template() # This module takes images stored in disk and performs face recognition test_img_name = str(input("image name : ")) test_img = cv2.imread('TestImages/' + test_img_name + '.jpg') # test_img path # detect all the faces in image faces_detected, gray_img = fr.faceDetection(test_img) print("faces_detected:", faces_detected) # counter for keeping face count face_count = len(faces_detected) print("face_count:", face_count) faces, faceID = fr.labels_for_training_data('trainingImages') face_recognizer = fr.train_classifier(faces, faceID) # have to store our trained data so we can use it later without going through the training process again face_recognizer.write('trainingData.yml') # use this .yml file in future to avoid training time # creating dictionary containing names for each label name = np.load("names.npy", allow_pickle=True, fix_imports=True) name = name.item() # id of students present in class present = [] for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict( roi_gray) # predicting the label of given image present.append(label) fr.draw_rect(test_img, face) # drawing rectangle on face predicted_name = name[label] fr.put_text(test_img, predicted_name, x, y) # printing name of the person # # print ids of present students # present.sort() print(present) # cv2.imshow("test image", resized_img) cv2.waitKey(0) # Waits indefinitely until a key is pressed cv2.destroyAllWindows() # train()
def test(): #This module captures images via webcam and performs face recognition face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('trainingData.yml') #Load saved training data list = Student.query.all() name = {} i = 1 for ele in list: ele.status = False name[i] = ele i = i + 1 cap = cv2.VideoCapture(0) while True: ret, test_img = cap.read( ) # captures frame and returns boolean value and captured image faces_detected, gray_img = fr.faceDetection(test_img) for (x, y, w, h) in faces_detected: cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7) resized_img = cv2.resize(test_img, (1000, 700)) #cv2.imshow('face detection Tutorial ',resized_img) cv2.waitKey(10) for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + w, x:x + h] label, confidence = face_recognizer.predict( roi_gray) #predicting the label of given image print("confidence:", confidence) print("label:", label) fr.draw_rect(test_img, face) predicted_name = str(name[label]) if confidence < 37: #If confidence less than 37 then don't print predicted face text on screen detected = Student.query.filter_by(Roll=predicted_name) detected.status = True db.session.commit() fr.put_text(test_img, predicted_name, x, y) resized_img = cv2.resize(test_img, (1000, 700)) cv2.imshow('face recognition tutorial ', resized_img) if cv2.waitKey(10) == ord('q'): #wait until 'q' key is pressed break cap.release() cv2.destroyAllWindows()
def find_face(imageNames): haar_file = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') test_img = cv2.imread("dest_folder/"+imageNames) test_img = cv2.resize(test_img, (2000,2000)) test_img = detect_faces(haar_file,test_img) #cv2.imshow("bikas",test_img) #name={0:"Bikas",1:"Bishal"} name={} db = MySQLdb.connect('localhost', 'root','','facetag_db') cur = db.cursor() cur.execute("SELECT * FROM login_tbl") for row in cur.fetchall(): #imageName=row[2] uidd=int(row[0]) uidd=uidd-1 name.update({uidd:row[3]}) faces_detected,gray_img=fr.faceDetection(test_img) print("faces_detected:",faces_detected) face_recognizer=cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('trainingData.yml') for face in faces_detected: (x,y,w,h)=face roi_gray=gray_img[y:y+h,x:x+h] label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image print("confidence:",confidence) print("label:",label) fr.draw_rect(test_img,face) predicted_name=name[label] uidds=label+1 if(userId !=uidds): if(confidence>47):#If confidence less than 37 then don't print predicted face text on screen continue db = MySQLdb.connect('localhost', 'root','','facetag_db') cur = db.cursor() sql = "INSERT INTO tagimg_tbl(id,img_id,uid,date,status)VALUES (null,'"+str(image_ids)+"','"+str(label+1)+"', '2020-04-10','0')" try: cur.execute(sql) db.commit() except pymysql.Error: db.rollback() fr.put_text(test_img,predicted_name,x,y) resized_img=cv2.resize(test_img,(700,700)) cv2.imshow("face detection",resized_img) cv2.waitKey(0) cv2.destroyAllWindows()
def b(): import os import cv2 import numpy as np import faceRecognition as fr face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read("D:\ML PROJECT\TrainingData.yml") name = {0: "Alex", 1: "Ashwini", 2: "Amanda"} cap = cv2.VideoCapture(0) while True: ret, test_img = cap.read() face_detected, gray_img = fr.faceDetection(test_img) for (x, y, w, h) in face_detected: cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0), thickness=5) resized_img = cv2.resize(test_img, (1000, 700)) cv2.imshow("Face Detection", resized_img) cv2.waitKey(10) for face in face_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict(roi_gray) print("Confidence:", confidence) print("Label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] if label == 1: messagebox.showinfo("Authentication", "face detected") fr.put_text(test_img, predicted_name, x, y) resized_img = cv2.resize(test_img, (1000, 700)) cv2.imshow("Face detection", resized_img) if cv2.waitKey(10) == ord('q'): break cap.release() cv2.destroyAllWindows()
def get_frame(self): if self.vid.isOpened(): ret, frame = self.vid.read() predicted_name = 'Take attandence' label = 0 if ret: # Return a boolean success flag and the current frame converted to BGR faces_detected, gray_img = fr.faceDetection(frame) for (x, y, w, h) in faces_detected: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), thickness=7) resized_img = cv2.resize(frame, (1000, 700)) #print(faces_detected) for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + w, x:x + h] label, confidence = face_recognizer.predict( roi_gray) #predicting the label of given image if confidence < 39: print("confidence:", confidence) print("label:", label) fr.draw_rect(frame, face) predicted_name = name[label] print(predicted_name) fr.put_text(frame, predicted_name, x, y) break return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), predicted_name, label) else: return (ret, None, 'Take attandence', 0) else: return (ret, None, 'Take attandence', 0)
def a(): import cv2 import os import numpy as np import faceRecognition as fr test_img = cv2.imread("D:\ML PROJECT\Testimg\Ma.jpg") faces_detected, gray_img = fr.faceDetection(test_img) print("Face Detected:", faces_detected) '''for (x,y,w,h) in faces_detected: cv2.rectangle(test_img,(x,y),(x+w,y+h),(0,255,0),thickness=5) resized_img = cv2.resize(test_img,(1000,700)) cv2.imshow("Face detection",resized_img) cv2.waitKey(0) cv2.destroyAllWindows()''' # faces,faceID = fr.labels_for_training_data("D:\ML PROJECT\TrainingData") # face_recognizer = fr.train_classifier(faces,faceID) # face_recognizer.save('trainingData.yml') face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read("D:\ML PROJECT\TrainingData.yml") name = {0: "Alex", 1: "Ashwini", 2: "Amanda"} for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict(roi_gray) print("Confidence:", confidence) print("Label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] if confidence > 37: continue fr.put_text(test_img, predicted_name, x, y) resized_img = cv2.resize(test_img, (800, 600)) cv2.imshow("Face detection", resized_img) cv2.waitKey(0) cv2.destroyAllWindows()
def dete(): def audio(): my_text = "user found" language = 'en' myobj = gTTS(text=my_text, lang=language, slow=False) myobj.save("welcome.mp3") os.system("welcome.mp3") test_img = cv2.imread('TestImages/divyanshu.jpg') faces_detected, gray_img = fr.faceDetection(test_img) print("faces_detected:", faces_detected) faces, faceID = fr.labels_for_training_data('trainingImages') face_recognizer = fr.train_classifier(faces, faceID) face_recognizer.write('trainingData.yml') name = {0: "chirag", 1: "divyanshu"} for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict(roi_gray) print("confidence:", confidence) print("label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] if (confidence > 37): continue fr.put_text(test_img, predicted_name, x, y) audio() resized_img = cv2.resize(test_img, (1000, 1000)) cv2.imshow("face dtecetion ", resized_img) cv2.waitKey(0) cv2.destroyAllWindows
def poison(load_img): test_img = imagePoisoning() faces_detected, gray_img = fr.faceDetection(load_img) face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('trainingData.yml') name = { 0: "Priyanka", 1: "Kangana", } #creating dictionary containing names for each label for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict( roi_gray) #predicting the label of given image print("confidence:", confidence) print("label:", label) fr.draw_rect(load_img, face) predicted_name = name[label] if ( confidence < 100 ): #If confidence more than 100 then don't print predicted face text on screen fr.put_text(load_img, predicted_name, x, y) else: poison(test_img)
name = {0 : "Mask on", 1:"Without mask"} cap=cv2.VideoCapture(0) while True: ret,test_img=cap.read()# captures frame and returns boolean value and captured image faces_detected,gray_img=fr.faceDetection(test_img) for face in faces_detected: (x,y,w,h)=face roi_gray=gray_img[y:y+w, x:x+h] label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image print("confidence:",confidence) print("label:",label) if label == 0: fr.draw_rect(test_img, face, 0) elif label == 1: fr.draw_rect(test_img, face, 1) predicted_name=name[label] #if confidence < 39:#If confidence less than 37 then don't print predicted face text on screen fr.put_text(test_img,predicted_name,x,y) for (x,y,w,h) in faces_detected: if label == '0': cv2.rectangle(test_img,(x,y),(x+w,y+h),(0,0,255),thickness=1) elif label == '1': cv2.rectangle(test_img,(x,y),(x+w,y+h),(0,0,255),thickness=2)
def AuthorizationFunction1(self, Flag): self.WebCamValue = 0 if Flag == 1: self.tabSetup.setEnabled(False) self.tabAuthOut.setEnabled(False) else: self.tabSetup.setEnabled(False) self.tabAuthIn.setEnabled(False) # This module captures images via webcam and performs face recognition face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('trainingData.yml') # Load saved training data session = session_factory() employee_dao = Employee_Dao() employees = employee_dao.query_records(session) name = {} for employee in employees: name[employee.id] = employee.name # print('Names in DB : ', name) counter = 1 flag = 0 global predicted_name, employeeId global test_img global cap cap = cv2.VideoCapture(0) while True: ret, test_img = cap.read() # captures frame and returns boolean value and captured image self.displayImage_In(test_img, Flag) faces_detected, gray_img = fr.faceDetection(test_img) for (x, y, w, h) in faces_detected: cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=5) cv2.waitKey(10) if self.WebCamValue == 1: self.WebCamValue = 2 cap.release() if Flag == 1: self.lblAuthWebcamScreen.clear() else: self.lblAuthWebcamScreenOut.clear() break for face in faces_detected: if counter < 31: (x, y, w, h) = face roi_gray = gray_img[y:y + w, x:x + h] label, confidence = face_recognizer.predict(roi_gray) # predicting the label of given image # print("confidence:", confidence) # print("label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] employeeId = label if confidence < 39: # If confidence less than 37 then don't print predicted face text on screen # fr.put_text(test_img, predicted_name, x, y) # fr.put_text(test_img, str(confidence), x, y + h + 50) flag = 1 counter += 1 elif confidence > 39: counter += 1 elif flag == 1: cap.release() flag = 0 counter = 1 if Flag == 1: self.InInsertion(current_time, DateToday) else: self.OutInsertion(current_time, DateToday) granted = QMessageBox() granted.setWindowTitle('Granted !') granted.setText('Authentication successful. Access granted.' ' Welcome : ' + predicted_name) granted.setIcon(QMessageBox.Information) granted.setStandardButtons(QMessageBox.Ok) granted.setDefaultButton(QMessageBox.Ok) xy = granted.exec_() if xy == QMessageBox.Ok: cap = cv2.VideoCapture(0) elif flag == 0: cap.release() counter = 1 granted = QMessageBox() granted.setWindowTitle('Denied !') granted.setText('Authentication unsuccessful. Access denied.') granted.setIcon(QMessageBox.Question) granted.setStandardButtons(QMessageBox.Ok) granted.setDefaultButton(QMessageBox.Ok) xy = granted.exec_() if xy == QMessageBox.Ok: cap = cv2.VideoCapture(0)
) # captures frame and returns boolean value and captured image faces_detected, gray_img = fr.faceDetection(test_img) for (x, y, w, h) in faces_detected: cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7) #resized_img = cv2.resize(test_img, (1000, 700)) #cv2.imshow('face detection Tutorial ',resized_img) #cv2.waitKey(10) for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + w, x:x + h] label, confidence = face_recognizer.predict( roi_gray) #predicting the label of given image print("confidence:", confidence) print("label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] if confidence > 55: #If confidence less than 50 then don't print predicted face text on screen fr.put_text(test_img, predicted_name, x, y) resized_img = cv2.resize(test_img, (600, 570)) cv2.imshow('FR', resized_img) if cv2.waitKey(10) == ord('q'): #wait until 'q' key is pressed break cap.release() cv2.destroyAllWindows
def main(): ## faces,faceID = fr.labels_for_training_data("training") ## face_recognizer = fr.train_classifier(faces,faceID) ## face_recognizer.save("trainingData.yml") face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read("trainingData.yml") name = { 0: "Fariha", 1: "Inara", 2: "Arowa", 3: "Ankon", 4: "Farhan", 5: "Minhaz", 6: "Afifa", 7: "Karishma", 8: "Nafisa" } gender = { 0: "Female", 1: "Male", 2: "Female", 3: "Male", 4: "Male", 5: "Male", 6: "Female", 7: "Female", 8: "Female" } relation = { 0: "Sister", 1: "Cousin", 2: "Aunt", 3: "Uncle", 4: "Nephew", 5: "Child", 6: "Enemy", 7: "Mother", 8: "Daughter" } prof = { 0: "Student", 1: "MUA", 2: "Student", 3: "Student", 4: "Student", 5: "Student", 6: "Villain", 7: "Student", 8: "Student" } cap = cv2.VideoCapture(0) while True: ret, test_img = cap.read() faces_detected, gray_img = fr.faceDetection(test_img) for face in faces_detected: (x, y, w, h) = face roi_gray = gray_img[y:y + h, x:x + h] label, confidence = face_recognizer.predict(roi_gray) print("confidence:", confidence) print("label:", label) fr.draw_rect(test_img, face) predicted_name = name[label] predicted_relation = relation[label] predicted_prof = prof[label] if (confidence < 50): if (cv2.waitKey(1) == ord('s')): if (label == 0): winsound.PlaySound("Fariha.wav", winsound.SND_ASYNC) elif (label == 1): winsound.PlaySound("Inara.wav", winsound.SND_ASYNC) elif (label == 2): winsound.PlaySound("Arowa.wav", winsound.SND_ASYNC) elif (label == 3): winsound.PlaySound("Ankon.wav", winsound.SND_ASYNC) elif (label == 4): winsound.PlaySound("Farhan.wav", winsound.SND_ASYNC) elif (label == 5): winsound.PlaySound("Minhaz.wav", winsound.SND_ASYNC) elif (label == 6): winsound.PlaySound("Afifa.wav", winsound.SND_ASYNC) elif (label == 7): winsound.PlaySound("Karishma.wav", winsound.SND_ASYNC) elif (label == 8): winsound.PlaySound("Nafisa.wav", winsound.SND_ASYNC) fr.put_text(test_img, predicted_name, x, y) fr.put_text2(test_img, predicted_relation, x, y) fr.put_text2(test_img, predicted_prof, x, y + h - 50) if (gender[label] == "Female"): s_img = cv2.imread("pin2.png") elif (gender[label] == "Male"): s_img = cv2.imread("pin3.png") r_img = cv2.imread("relation.png") p_img = cv2.imread("prof.png") x_offset = x y_offset = y - 50 test_img[y_offset:y_offset + s_img.shape[0], x_offset:x_offset + s_img.shape[1]] = s_img x1 = x + 10 y1 = y + 10 test_img[y1:y1 + r_img.shape[0], x1:x1 + r_img.shape[1]] = r_img x1 = x + 10 y1 = y + h - 40 test_img[y1:y1 + p_img.shape[0], x1:x1 + p_img.shape[1]] = p_img resized_img = cv2.resize(test_img, (1000, 700)) cv2.imshow("face detection", resized_img) if cv2.waitKey(1) == ord('q'): break cap.release() cv2.destroyAllWindows
face_recognizer = cv2.face.LBPHFaceRecognizer_create() # we load TrainedModel.yml which has our trained model stored. face_recognizer.read('TrainedModel.yml') label = { 0: "Priyanka", 1: "Kangana" } # creating dictionary containing labels for each identity for face in faces_detected_in_image: (x, y, w, h) = face # x is left co-ordinate of image, x+w is right, y is bottom, y+h is top roi_gray = gray_img[y:y + h, x:x + w] # predicting the identity of given image and uncertainity of prediction # lower the uncertainity value, higher the confidence and less are chances of match. identity, uncertainity = face_recognizer.predict(roi_gray) # print("uncertainity:",uncertainity) print("identity:", identity) fr.draw_rect(subjectImage, face) predicted_label = label[identity] # If uncertainity more than 60 then don't print predicted face text on screen # This is to ensure that our model doesnt wrongly identity any face. if (uncertainity < 65): fr.label_face(subjectImage, predicted_label, x, y) display_image = cv2.resize(subjectImage, (1000, 1000)) cv2.imshow("Face Recognition System", display_image) cv2.waitKey(0) # wait until some key is pressed. cv2.destroyAllWindows