def convert(people_folder): try: people = [person for person in os.listdir(people_folder)] except: print("Have you added at least one person to the system?") sys.exit() print("This are the people in the Recognition System:") for person in people: print("-" + person) save_folder = "./gray" #video = VideoCamera() print(save_folder) detector = FaceDetector() counter = 1 images = [] labels = [] for i, person in enumerate(people): folder = save_folder + '/' + person os.mkdir(folder) for image in os.listdir(people_folder + '/' + person): frame = cv2.imread(people_folder +'/'+ person + '/' + image, 0) face_coord = detector.detect(frame) if len(face_coord): frame, face_img = oo.get_images(frame, face_coord) cv2.imwrite(folder + '/' + str(counter) + '.jpg',face_img[0]) print('Images Saved:' + str(counter)) counter += 1 cv2.imshow('Saved Face', face_img[0]) cv2.waitKey(50)
def add_person(people_folder): person_name = input('What is the name of the new person: ').lower() folder = people_folder + '/' + person_name if not os.path.exists(folder): input("I will now take 20 pictures. Press ENTER when ready.") os.mkdir(folder) video = VideoCamera() detector = FaceDetector() counter = 1 timer = 0 cv2.namedWindow('Video Feed', cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Saved Face', cv2.WINDOW_NORMAL) while counter < 21: frame = video.get_frame() face_coord = detector.detect(frame) if len(face_coord): frame, face_img = oo.get_images(frame, face_coord) if timer % 100 == 5: cv2.imwrite(folder + '/' + str(counter) + '.jpg', face_img[0]) print('Images Saved:' + str(counter)) counter += 1 cv2.imshow('Saved Face', face_img[0]) cv2.imshow('Video Feed', frame) cv2.waitKey(50) timer += 5 else: print("This name already exists.") sys.exit()
def live_add_face(path, shape, num_to_add): #Paramerers: path, string type, relative path of the directorty where the trainign images are about to be stored #shape, string type, values taken from 'rectangle' or 'ellipse', how a detected face is going to be highlighted in a frame of the video stream #num_to_add, int, number of training images to be taken for each new face. #initialise the frontal face detector detector = FaceDetector('app/frontal_face.xml') #Initialising the video camera object vid = video_capture.VideoCamera() #couter: keep track of the number of training images that have been taken counter = 1 #timer: used to record the number of seconds passe after the adding face procedure begins #only take add new training image every ten seconds to avoid adding too many new images in a very short time timer = 0 while counter <= num_to_add: frame = vid.get_frame() face_coord = detector.detect(frame) if len(face_coord): #If there is a face detected frame, face_image = get_image(frame, face_coord, shape) #frame is the current frame for the video stream #face_image is the image of the region in which the face is detected if timer % 10 == 5: cv2.imwrite(path + '/' + str(counter) + '.jpg', face_image[0]) #Save file to directory specified the name of the face to be added print("Image save: " + str(counter)) counter = counter + 1 cv2.putText(frame, "Number of pictures taken: " + str(counter), (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) #show the user the number of pictures that have been taken cv2.imshow("Saved face", face_image[0]) if counter >= num_to_add: cv2.putText(frame, "Adding new face complete. " + str(counter), (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) #create the jpeg encoding of the frame encode = cv2.imencode('.jpg', frame)[1].tobytes() #Yield the encoding #yield means the function is a python generator object. #A generator object is taken as an argument in flask Response() to render video streaming yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + encode + b'\r\n') timer = timer + 5
class FaceLayout(BaseLayout): def _init_custom_layout(self): # initialize data structure self.samples = [] self.labels = [] # call method to save data upon exiting self.Bind(wx.EVT_CLOSE, self._on_exit) def init_algorithm( self, save_training_file='datasets/faces_training.pkl', load_preprocessed_data='datasets/faces_preprocessed.pkl', load_mlp='params/mlp.xml', face_casc='params/haarcascade_frontalface_default.xml', left_eye_casc='params/haarcascade_lefteye_2splits.xml', right_eye_casc='params/haarcascade_righteye_2splits.xml'): self.data_file = save_training_file self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc) self.head = None # load preprocessed dataset to access labels and PCA params if path.isfile(load_preprocessed_data): (_, y_train), (_, y_test), V, m = homebrew.load_from_file( load_preprocessed_data) self.pca_V = V self.pca_m = m self.all_labels = np.unique(np.hstack((y_train, y_test))) # load pre-trained multi-layer perceptron if path.isfile(load_mlp): layer_sizes = np.array([self.pca_V.shape[1], len(self.all_labels)]) self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels) self.MLP.load(load_mlp) else: print "Warning: Testing is disabled" print "Could not find pre-trained MLP file ", load_mlp self.testing.Disable() else: print "Warning: Testing is disabled" print "Could not find data file ", load_preprocessed_data self.testing.Disable() def _create_custom_layout(self): # create horizontal layout with train/test buttons pnl1 = wx.Panel(self, -1) self.training = wx.RadioButton(pnl1, -1, 'Train', (10, 10), style=wx.RB_GROUP) self.Bind(wx.EVT_RADIOBUTTON, self._on_training, self.training) self.testing = wx.RadioButton(pnl1, -1, 'Test') self.Bind(wx.EVT_RADIOBUTTON, self._on_testing, self.testing) hbox1 = wx.BoxSizer(wx.HORIZONTAL) hbox1.Add(self.training, 1) hbox1.Add(self.testing, 1) pnl1.SetSizer(hbox1) # create a horizontal layout with all buttons pnl2 = wx.Panel(self, -1) self.neutral = wx.RadioButton(pnl2, -1, 'neutral', (10, 10), style=wx.RB_GROUP) self.happy = wx.RadioButton(pnl2, -1, 'happy') self.sad = wx.RadioButton(pnl2, -1, 'sad') self.surprised = wx.RadioButton(pnl2, -1, 'surprised') self.angry = wx.RadioButton(pnl2, -1, 'angry') self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted') hbox2 = wx.BoxSizer(wx.HORIZONTAL) hbox2.Add(self.neutral, 1) hbox2.Add(self.happy, 1) hbox2.Add(self.sad, 1) hbox2.Add(self.surprised, 1) hbox2.Add(self.angry, 1) hbox2.Add(self.disgusted, 1) pnl2.SetSizer(hbox2) # create horizontal layout with single snapshot button pnl3 = wx.Panel(self, -1) self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot') self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot) hbox3 = wx.BoxSizer(wx.HORIZONTAL) hbox3.Add(self.snapshot, 1) pnl3.SetSizer(hbox3) # arrange all horizontal layouts vertically self.panels_vertical.Add(pnl1, flag=wx.EXPAND | wx.TOP, border=1) self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1) self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1) def _process_frame(self, frame): # detect face success, frame, self.head, (x, y) = self.faces.detect(frame) if success and self.testing.GetValue(): # if face found: preprocess (align) success, head = self.faces.align_head(self.head) if success: # extract features using PCA (loaded from file) X, _, _ = homebrew.extract_features([head.flatten()], self.pca_V, self.pca_m) # predict label with pre-trained MLP label = self.MLP.predict(np.array(X))[0] # draw label above bounding box cv2.putText(frame, str(label), (x, y - 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2) return frame def _on_training(self, evt): self.neutral.Enable() self.happy.Enable() self.sad.Enable() self.surprised.Enable() self.angry.Enable() self.disgusted.Enable() self.snapshot.Enable() def _on_testing(self, evt): self.neutral.Disable() self.happy.Disable() self.sad.Disable() self.surprised.Disable() self.angry.Disable() self.disgusted.Disable() self.snapshot.Disable() def _on_snapshot(self, evt): if self.neutral.GetValue(): label = 'neutral' elif self.happy.GetValue(): label = 'happy' elif self.sad.GetValue(): label = 'sad' elif self.surprised.GetValue(): label = 'surprised' elif self.angry.GetValue(): label = 'angry' elif self.disgusted.GetValue(): label = 'disgusted' if self.head is None: print "No face detected" else: success, head = self.faces.align_head(self.head) if success: print "Added sample to training set" self.samples.append(head.flatten()) self.labels.append(label) else: print "Could not align head (eye detection failed?)" def _on_exit(self, evt): # if we have collected some samples, dump them to file if len(self.samples) > 0: # make sure we don't overwrite an existing file if path.isfile(self.data_file): # file already exists, construct new load_from_file load_from_file, fileext = path.splitext(self.data_file) offset = 0 while True: file = load_from_file + "-" + str(offset) + fileext if path.isfile(file): offset += 1 else: break self.data_file = file # dump samples and labels to file f = open(self.data_file, 'wb') pickle.dump(self.samples, f) pickle.dump(self.labels, f) f.close() # inform user that file was created print "Saved", len(self.samples), "samples to", self.data_file # deallocate self.Destroy()
class FaceLayout(BaseLayout): """A custom layout for face detection and facial expression recognition A GUI to both assemble a training set and to perform real-time classification on the live stream of a webcam using a pre-trained classifier. The GUI operates in two different modes: * Training Mode: In training mode, the app will collect image frames, detect a face therein, assignassign a label depending on the facial expression, and upon exiting save all collected data samples in a file, so that it can be parsed by datasets.homebrew. * Testing Mode: In testing mode, the app will detect a face in each video frame and predict the corresponding class label using a pre-trained MLP. """ def _init_custom_layout(self): """Initializes GUI""" # initialize data structure self.samples = [] self.labels = [] # call method to save data upon exiting self.Bind(wx.EVT_CLOSE, self._on_exit) def init_algorithm( self, save_training_file='datasets/faces_training.pkl', load_preprocessed_data='datasets/faces_preprocessed.pkl', load_mlp='params/mlp.xml', face_casc='params/haarcascade_frontalface_default.xml', left_eye_casc='params/haarcascade_lefteye_2splits.xml', right_eye_casc='params/haarcascade_righteye_2splits.xml'): """Initializes face detector and facial expression classifier This method initializes both the face detector and the facial expression classifier. :param save_training_file: filename for storing the assembled training set :param load_preprocessed_data: filename for loading a previously preprocessed dataset (for classification in Testing Mode) :param load_mlp: filename for loading a pre-trained MLP classifier (use the script train_test_mlp.py) :param face_casc: path to a face cascade :param left_eye_casc: path to a left-eye cascade :param right_eye_casc: path to a right-eye cascade """ self.data_file = save_training_file self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc) # load preprocessed dataset to access labels and PCA params if path.isfile(load_preprocessed_data): (_, y_train), (_, y_test), V, m = homebrew.load_from_file( load_preprocessed_data) self.pca_V = V self.pca_m = m self.all_labels = np.unique(np.hstack((y_train, y_test))) # load pre-trained multi-layer perceptron if path.isfile(load_mlp): layer_sizes = np.array([self.pca_V.shape[1], len(self.all_labels)]) self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels) self.MLP.load(load_mlp) else: print "Warning: Testing is disabled" print "Could not find pre-trained MLP file ", load_mlp self.testing.Disable() else: print "Warning: Testing is disabled" print "Could not find data file ", load_preprocessed_data self.testing.Disable() def _create_custom_layout(self): """Decorates the GUI with buttons for assigning class labels""" # create horizontal layout with train/test buttons pnl1 = wx.Panel(self, -1) self.training = wx.RadioButton(pnl1, -1, 'Train', (10, 10), style=wx.RB_GROUP) self.Bind(wx.EVT_RADIOBUTTON, self._on_training, self.training) self.testing = wx.RadioButton(pnl1, -1, 'Test') self.Bind(wx.EVT_RADIOBUTTON, self._on_testing, self.testing) hbox1 = wx.BoxSizer(wx.HORIZONTAL) hbox1.Add(self.training, 1) hbox1.Add(self.testing, 1) pnl1.SetSizer(hbox1) # create a horizontal layout with all buttons pnl2 = wx.Panel(self, -1) self.neutral = wx.RadioButton(pnl2, -1, 'neutral', (10, 10), style=wx.RB_GROUP) self.happy = wx.RadioButton(pnl2, -1, 'happy') self.sad = wx.RadioButton(pnl2, -1, 'sad') self.surprised = wx.RadioButton(pnl2, -1, 'surprised') self.angry = wx.RadioButton(pnl2, -1, 'angry') self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted') hbox2 = wx.BoxSizer(wx.HORIZONTAL) hbox2.Add(self.neutral, 1) hbox2.Add(self.happy, 1) hbox2.Add(self.sad, 1) hbox2.Add(self.surprised, 1) hbox2.Add(self.angry, 1) hbox2.Add(self.disgusted, 1) pnl2.SetSizer(hbox2) # create horizontal layout with single snapshot button pnl3 = wx.Panel(self, -1) self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot') self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot) hbox3 = wx.BoxSizer(wx.HORIZONTAL) hbox3.Add(self.snapshot, 1) pnl3.SetSizer(hbox3) # arrange all horizontal layouts vertically self.panels_vertical.Add(pnl1, flag=wx.EXPAND | wx.TOP, border=1) self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1) self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1) def _process_frame(self, frame): """Processes each captured frame This method processes each captured frame. * Training mode: Performs face detection. * Testing mode: Performs face detection, and predicts the class label of the facial expression. """ # detect face success, frame, self.head = self.faces.detect(frame) if success and self.testing.GetValue(): # if face found: preprocess (align) success, head = self.faces.align_head(self.head) if success: # extract features using PCA (loaded from file) X, _, _ = homebrew.extract_features([head.flatten()], self.pca_V, self.pca_m) # predict label with pre-trained MLP label = self.MLP.predict(np.array(X))[0] # draw label above bounding box cv2.putText(frame, str(label), (x, y - 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2) return frame def _on_training(self, evt): """Enables all training-related buttons when Training Mode is on""" self.neutral.Enable() self.happy.Enable() self.sad.Enable() self.surprised.Enable() self.angry.Enable() self.disgusted.Enable() self.snapshot.Enable() def _on_testing(self, evt): """Disables all training-related buttons when Testing Mode is on""" self.neutral.Disable() self.happy.Disable() self.sad.Disable() self.surprised.Disable() self.angry.Disable() self.disgusted.Disable() self.snapshot.Disable() def _on_snapshot(self, evt): """Takes a snapshot of the current frame This method takes a snapshot of the current frame, preprocesses it to extract the head region, and upon success adds the data sample to the training set. """ if self.neutral.GetValue(): label = 'neutral' elif self.happy.GetValue(): label = 'happy' elif self.sad.GetValue(): label = 'sad' elif self.surprised.GetValue(): label = 'surprised' elif self.angry.GetValue(): label = 'angry' elif self.disgusted.GetValue(): label = 'disgusted' if self.head is None: print "No face detected" else: success, head = self.faces.align_head(self.head) if success: print "Added sample to training set" self.samples.append(head.flatten()) self.labels.append(label) else: print "Could not align head (eye detection failed?)" def _on_exit(self, evt): """Dumps the training data to file upon exiting""" # if we have collected some samples, dump them to file if len(self.samples) > 0: # make sure we don't overwrite an existing file if path.isfile(self.data_file): # file already exists, construct new load_from_file load_from_file, fileext = path.splitext(self.data_file) offset = 0 while True: file = load_from_file + "-" + str(offset) + fileext if path.isfile(file): offset += 1 else: break self.data_file = file # dump samples and labels to file f = open(self.data_file, 'wb') pickle.dump(self.samples, f) pickle.dump(self.labels, f) f.close() # inform user that file was created print "Saved", len(self.samples), "samples to", self.data_file # deallocate self.Destroy()
class FaceLayout(BaseLayout): """A custom layout for face detection and facial expression recognition A GUI to both assemble a training set and to perform real-time classification on the live stream of a webcam using a pre-trained classifier. The GUI operates in two different modes: * Training Mode: In training mode, the app will collect image frames, detect a face therein, assignassign a label depending on the facial expression, and upon exiting save all collected data samples in a file, so that it can be parsed by datasets.homebrew. * Testing Mode: In testing mode, the app will detect a face in each video frame and predict the corresponding class label using a pre-trained MLP. """ def _init_custom_layout(self): """Initializes GUI""" # initialize data structure self.samples = [] self.labels = [] # call method to save data upon exiting self.Bind(wx.EVT_CLOSE, self._on_exit) def init_algorithm( self, save_training_file='datasets/faces_training.pkl', load_preprocessed_data='datasets/faces_preprocessed.pkl', load_mlp='params/mlp.xml', face_casc='params/haarcascade_frontalface_default.xml', left_eye_casc='params/haarcascade_lefteye_2splits.xml', right_eye_casc='params/haarcascade_righteye_2splits.xml'): """Initializes face detector and facial expression classifier This method initializes both the face detector and the facial expression classifier. :param save_training_file: filename for storing the assembled training set :param load_preprocessed_data: filename for loading a previously preprocessed dataset (for classification in Testing Mode) :param load_mlp: filename for loading a pre-trained MLP classifier (use the script train_test_mlp.py) :param face_casc: path to a face cascade :param left_eye_casc: path to a left-eye cascade :param right_eye_casc: path to a right-eye cascade """ self.data_file = save_training_file self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc) # load preprocessed dataset to access labels and PCA params if path.isfile(load_preprocessed_data): (_, y_train), ( _, y_test), V, m = homebrew.load_from_file(load_preprocessed_data) self.pca_V = V self.pca_m = m self.all_labels = np.unique(np.hstack((y_train, y_test))) # load pre-trained multi-layer perceptron if path.isfile(load_mlp): layer_sizes = np.array( [self.pca_V.shape[1], len(self.all_labels)]) self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels) self.MLP.load(load_mlp) else: print "Warning: Testing is disabled" print "Could not find pre-trained MLP file ", load_mlp self.testing.Disable() else: print "Warning: Testing is disabled" print "Could not find data file ", load_preprocessed_data self.testing.Disable() def _create_custom_layout(self): """Decorates the GUI with buttons for assigning class labels""" # create horizontal layout with train/test buttons pnl1 = wx.Panel(self, -1) self.training = wx.RadioButton(pnl1, -1, 'Train', (10, 10), style=wx.RB_GROUP) self.Bind(wx.EVT_RADIOBUTTON, self._on_training, self.training) self.testing = wx.RadioButton(pnl1, -1, 'Test') self.Bind(wx.EVT_RADIOBUTTON, self._on_testing, self.testing) hbox1 = wx.BoxSizer(wx.HORIZONTAL) hbox1.Add(self.training, 1) hbox1.Add(self.testing, 1) pnl1.SetSizer(hbox1) # create a horizontal layout with all buttons pnl2 = wx.Panel(self, -1) self.neutral = wx.RadioButton(pnl2, -1, 'neutral', (10, 10), style=wx.RB_GROUP) self.happy = wx.RadioButton(pnl2, -1, 'happy') self.sad = wx.RadioButton(pnl2, -1, 'sad') self.surprised = wx.RadioButton(pnl2, -1, 'surprised') self.angry = wx.RadioButton(pnl2, -1, 'angry') self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted') hbox2 = wx.BoxSizer(wx.HORIZONTAL) hbox2.Add(self.neutral, 1) hbox2.Add(self.happy, 1) hbox2.Add(self.sad, 1) hbox2.Add(self.surprised, 1) hbox2.Add(self.angry, 1) hbox2.Add(self.disgusted, 1) pnl2.SetSizer(hbox2) # create horizontal layout with single snapshot button pnl3 = wx.Panel(self, -1) self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot') self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot) hbox3 = wx.BoxSizer(wx.HORIZONTAL) hbox3.Add(self.snapshot, 1) pnl3.SetSizer(hbox3) # arrange all horizontal layouts vertically self.panels_vertical.Add(pnl1, flag=wx.EXPAND | wx.TOP, border=1) self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1) self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1) def _process_frame(self, frame): """Processes each captured frame This method processes each captured frame. * Training mode: Performs face detection. * Testing mode: Performs face detection, and predicts the class label of the facial expression. """ # detect face success, frame, self.head = self.faces.detect(frame) if success and self.testing.GetValue(): # if face found: preprocess (align) success, head = self.faces.align_head(self.head) if success: # extract features using PCA (loaded from file) X, _, _ = homebrew.extract_features([head.flatten()], self.pca_V, self.pca_m) # predict label with pre-trained MLP label = self.MLP.predict(np.array(X))[0] # draw label above bounding box cv2.putText(frame, str(label), (x, y - 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2) return frame def _on_training(self, evt): """Enables all training-related buttons when Training Mode is on""" self.neutral.Enable() self.happy.Enable() self.sad.Enable() self.surprised.Enable() self.angry.Enable() self.disgusted.Enable() self.snapshot.Enable() def _on_testing(self, evt): """Disables all training-related buttons when Testing Mode is on""" self.neutral.Disable() self.happy.Disable() self.sad.Disable() self.surprised.Disable() self.angry.Disable() self.disgusted.Disable() self.snapshot.Disable() def _on_snapshot(self, evt): """Takes a snapshot of the current frame This method takes a snapshot of the current frame, preprocesses it to extract the head region, and upon success adds the data sample to the training set. """ if self.neutral.GetValue(): label = 'neutral' elif self.happy.GetValue(): label = 'happy' elif self.sad.GetValue(): label = 'sad' elif self.surprised.GetValue(): label = 'surprised' elif self.angry.GetValue(): label = 'angry' elif self.disgusted.GetValue(): label = 'disgusted' if self.head is None: print "No face detected" else: success, head = self.faces.align_head(self.head) if success: print "Added sample to training set" self.samples.append(head.flatten()) self.labels.append(label) else: print "Could not align head (eye detection failed?)" def _on_exit(self, evt): """Dumps the training data to file upon exiting""" # if we have collected some samples, dump them to file if len(self.samples) > 0: # make sure we don't overwrite an existing file if path.isfile(self.data_file): # file already exists, construct new load_from_file load_from_file, fileext = path.splitext(self.data_file) offset = 0 while True: file = load_from_file + "-" + str(offset) + fileext if path.isfile(file): offset += 1 else: break self.data_file = file # dump samples and labels to file f = open(self.data_file, 'wb') pickle.dump(self.samples, f) pickle.dump(self.labels, f) f.close() # inform user that file was created print "Saved", len(self.samples), "samples to", self.data_file # deallocate self.Destroy()
def face_recognition_login(self, SHAPE='rectangle', DURATION=30, NUM_TO_TAKE=5): start = time.time() threshold = 95 user_label = "" #user_label, string type, records the face path of the user who has been detected and is considered for login users = User.query.filter_by(login_enabled=True).all() #get all the users for whom face recognition login has een enabled detector = FaceDetector('app/frontal_face.xml') recogniser = cv2.face.LBPHFaceRecognizer_create() vid = video_capture.VideoCamera() recogniser.train(self.images, np.array(self.labels)) count = 0 timer = 0 while count <= NUM_TO_TAKE and time.time() - start < DURATION + 5: #checking for face recognition login if the set number of face pictures required for logging a user have not been detected yet and #the amount of time elapsed since the start is less than the required duration frame = vid.get_frame() face_coord = detector.detect(frame) if len(face_coord) and time.time() - start < DURATION: #if there is a face detected timer = timer + 1 frame, face_image = get_image(frame, face_coord, SHAPE) image = face_image[0] #face_image[0] since we are only logging one user pred, error = recogniser.predict(image) if timer % 3 == 2: #Updating every three frames in which a face is recognised. if error < threshold: if user_label == "": user_label = self.face_label[pred] if user_label != self.face_label[pred]: count = 0 user_label = self.face_label[pred] #add the label for the current user. #If the length of the user label is 0 then add the first picture else: count = count + 1 print "detected: ", self.face_label[pred] print self.face_label hight, width, channel = frame.shape cv2.rectangle(frame, (22, 22), (22 + count * 30, 26), (206, 0, 209), 4) cv2.rectangle(frame, (20, 20), (20 + NUM_TO_TAKE * 30, 30), (206, 0, 209), 2) cv2.putText( frame, "Login check complete: " + str(count) + "/" + str(NUM_TO_TAKE), (5, 60), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) if count >= NUM_TO_TAKE: cv2.putText(frame, "Login criterions satisfied.", (5, 120), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) cv2.putText( frame, "Please click the Login Face button above to login the user", (5, 140), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) if time.time() - start >= DURATION: cv2.putText(frame, "Login criterions not satisfied.", (5, 120), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) cv2.putText(frame, "Time run out. Face recognition login failed.", (5, 160), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) encode = cv2.imencode('.jpg', frame)[1].tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + encode + b'\r\n') if count > NUM_TO_TAKE: self.login = True #loading the user to log in self.user = User.query.filter_by(login_path=user_label).first() print "Facelogin_enabled: ", self.login print "Login user path: ", self.user
def recognize_people(people_folder): try: people = [person for person in os.listdir(people_folder)] except: print("Have you added at least one person to the system?") sys.exit() print("This are the people in the Recognition System:") for person in people: print("-" + person) start = time.time() #choice = check_choice() recognizer = None detector = FaceDetector() recognizer = cv2.face.createLBPHFaceRecognizer() threshold = 91 #93 images = [] labels = [] labels_people = {} for i, person in enumerate(people): labels_people[i] = person for image in os.listdir(people_folder + '/' + person): images.append(cv2.imread(people_folder +'/'+ person + '/' + image, 0)) labels.append(i) try: recognizer.train(images, np.array(labels)) print("train model") except: print("\nOpenCV Error: Do you have at least two people in the database?\n") sys.exit() end = time.time() print(end - start) video = VideoCamera() while True: frame = video.get_frame() faces_coord = detector.detect(frame) if len(faces_coord): frame, faces_img = oo.get_images(frame, faces_coord) for i, face_img in enumerate(faces_img): if __version__ == "3.1.0": collector = cv2.face.MinDistancePredictCollector() recognizer.predict(face_img, collector) conf = collector.getDist() pred = collector.getLabel() else: pred, conf = recognizer.predict(face_img) print("Prediction: " + str(pred)) print('Confidence: ' + str(round(conf))) print('Threshold: ' + str(threshold)) if conf < threshold: cv2.putText(frame, labels_people[pred].capitalize(), (faces_coord[i][0], faces_coord[i][1] - 2), cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2, cv2.LINE_AA) else: cv2.putText(frame, "Unknown", (faces_coord[i][0], faces_coord[i][1]), cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2, cv2.LINE_AA) cv2.putText(frame, "ESC to exit", (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) cv2.imshow('Video', frame) if cv2.waitKey(100) & 0xFF == 27: sys.exit()