def convert(people_folder):
    try:
        people = [person for person in os.listdir(people_folder)]
    except:
        print("Have you added at least one person to the system?")
        sys.exit()
    print("This are the people in the Recognition System:")
    for person in people:
        print("-" + person)
    save_folder = "./gray"
    
    
    #video = VideoCamera()
    print(save_folder)
    detector = FaceDetector()
    counter = 1
    images = []
    labels = []
    
    for i, person in enumerate(people):
    	folder = save_folder +  '/' + person
    	os.mkdir(folder)
    	for image in os.listdir(people_folder + '/' + person):
        	frame  = cv2.imread(people_folder +'/'+ person + '/' + image, 0)
        	face_coord = detector.detect(frame)
        	if len(face_coord):
        		frame, face_img = oo.get_images(frame, face_coord)
        		cv2.imwrite(folder + '/' + str(counter) + '.jpg',face_img[0])
        		print('Images Saved:' + str(counter))
        		counter += 1
        		cv2.imshow('Saved Face', face_img[0])
        	cv2.waitKey(50)
class FacialExpressionRecognizerLayout(BaseLayout):
    def __init__(self, *args, svm_path=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.clf = cv2.ml.SVM_load(svm_path)

        self.face_detector = FaceDetector(
            face_cascade='params/haarcascade_frontalface_default.xml',
            eye_cascade='params/haarcascade_lefteye_2splits.xml')

    def augment_layout(self):
        pass

    def process_frame(self, frame_rgb: np.ndarray) -> np.ndarray:
        success, frame, self.head, (
            x, y) = self.face_detector.detect_face(frame_rgb)
        if not success:
            return frame

        success, head = self.face_detector.align_head(self.head)
        if not success:
            return frame

        # We have to pass [1 x n] array predict.
        features = featurize(head)[None]
        result = self.clf.predict(features)
        label = int(result[1][0])

        # Draw predicted label above the bounding box.
        cv2.putText(frame, decode(label), (x, y - 20),
                    cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)

        return frame
def add_person(people_folder):
    person_name = input('What is the name of the new person: ').lower()
    folder = people_folder +  '/' + person_name
    if not os.path.exists(folder):
        input("I will now take 20 pictures. Press ENTER when ready.")
        os.mkdir(folder)
        video = VideoCamera()
        detector = FaceDetector()
        counter = 1
        timer = 0
        cv2.namedWindow('Video Feed', cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow('Saved Face', cv2.WINDOW_NORMAL)
        while counter < 21:
            frame = video.get_frame()
            face_coord = detector.detect(frame)
            if len(face_coord):
                frame, face_img = oo.get_images(frame, face_coord)
                
                if timer % 100 == 5:
                    cv2.imwrite(folder + '/' + str(counter) + '.jpg',
                                face_img[0])
                    print('Images Saved:' + str(counter))
                    counter += 1
                    cv2.imshow('Saved Face', face_img[0])

            cv2.imshow('Video Feed', frame)
            cv2.waitKey(50)
            timer += 5
    else:
        print("This name already exists.")
        sys.exit()
Exemplo n.º 4
0
    def init_algorithm(
            self,
            save_training_file='datasets/faces_training.pkl',
            load_preprocessed_data='datasets/faces_preprocessed.pkl',
            load_mlp='params/mlp.xml',
            face_casc='params/haarcascade_frontalface_default.xml',
            left_eye_casc='params/haarcascade_lefteye_2splits.xml',
            right_eye_casc='params/haarcascade_righteye_2splits.xml'):
        
        self.data_file = save_training_file
        self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc)
        self.head = None

        # load preprocessed dataset to access labels and PCA params
        if path.isfile(load_preprocessed_data):
            (_, y_train), (_, y_test), V, m = homebrew.load_from_file(
                load_preprocessed_data)
            self.pca_V = V
            self.pca_m = m
            self.all_labels = np.unique(np.hstack((y_train, y_test)))

            # load pre-trained multi-layer perceptron
            if path.isfile(load_mlp):
                layer_sizes = np.array([self.pca_V.shape[1],
                                        len(self.all_labels)])
                self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels)
                self.MLP.load(load_mlp)
            else:
                print "Warning: Testing is disabled"
                print "Could not find pre-trained MLP file ", load_mlp
                self.testing.Disable()
        else:
            print "Warning: Testing is disabled"
            print "Could not find data file ", load_preprocessed_data
            self.testing.Disable()
    def __init__(self, *args, svm_path=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.clf = cv2.ml.SVM_load(svm_path)

        self.face_detector = FaceDetector(
            face_cascade='params/haarcascade_frontalface_default.xml',
            eye_cascade='params/haarcascade_lefteye_2splits.xml')
    def __init__(self,
                 *args,
                 training_data='data/cropped_faces.csv',
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.face_detector = FaceDetector(
            face_cascade='params/haarcascade_frontalface_default.xml',
            eye_cascade='params/haarcascade_lefteye_2splits.xml')

        self.training_data = training_data
    def __init__(self, *args, clf_path=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.clf = cv2.ml.ANN_MLP_load(str(clf_path / 'mlp.xml'))

        self.index_to_label = pickle_load(clf_path / 'index_to_label')
        self.pca_args = pickle_load(clf_path / 'pca_args')

        self.face_detector = FaceDetector(
            face_cascade='params/haarcascade_frontalface_default.xml',
            eye_cascade='params/haarcascade_lefteye_2splits.xml')
    def init_algorithm(
            self,
            save_training_file='datasets/faces_training.pkl',
            load_preprocessed_data='datasets/faces_preprocessed.pkl',
            load_mlp='params/mlp.xml',
            face_casc='params/haarcascade_frontalface_default.xml',
            left_eye_casc='params/haarcascade_lefteye_2splits.xml',
            right_eye_casc='params/haarcascade_righteye_2splits.xml'):
        """Initializes face detector and facial expression classifier

            This method initializes both the face detector and the facial
            expression classifier.

            :param save_training_file:     filename for storing the assembled
                                           training set
            :param load_preprocessed_data: filename for loading a previously
                                           preprocessed dataset (for
                                           classification in Testing Mode)
            :param load_mlp:               filename for loading a pre-trained
                                           MLP classifier (use the script
                                           train_test_mlp.py)
            :param face_casc:              path to a face cascade
            :param left_eye_casc:          path to a left-eye cascade
            :param right_eye_casc:         path to a right-eye cascade
        """
        self.data_file = save_training_file
        self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc)
        self.head = None

        # load preprocessed dataset to access labels and PCA params
        if path.isfile(load_preprocessed_data):
            (_, y_train), (
                _,
                y_test), V, m = homebrew.load_from_file(load_preprocessed_data)
            self.pca_V = V
            self.pca_m = m
            self.all_labels = np.unique(np.hstack((y_train, y_test)))

            # load pre-trained multi-layer perceptron
            if path.isfile(load_mlp):
                layer_sizes = np.array(
                    [self.pca_V.shape[1],
                     len(self.all_labels)])
                self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels)
                self.MLP.load(load_mlp)
            else:
                print "Warning: Testing is disabled"
                print "Could not find pre-trained MLP file ", load_mlp
                self.testing.Disable()
        else:
            print "Warning: Testing is disabled"
            print "Could not find data file ", load_preprocessed_data
            self.testing.Disable()
def live_add_face(path, shape, num_to_add):
    #Paramerers: path, string type, relative path of the directorty where the trainign images are about to be stored
    #shape, string type, values taken from 'rectangle' or 'ellipse', how a detected face is going to be highlighted in a frame of the video stream
    #num_to_add, int, number of training images to be taken for each new face.

    #initialise the frontal face detector
    detector = FaceDetector('app/frontal_face.xml')
    #Initialising the video camera object
    vid = video_capture.VideoCamera()
    #couter: keep track of the number of training images that have been taken
    counter = 1
    #timer: used to  record the number of seconds passe after the adding face procedure begins
    #only  take add new training image every ten seconds to avoid adding too many new images in a very short time
    timer = 0
    while counter <= num_to_add:
        frame = vid.get_frame()
        face_coord = detector.detect(frame)
        if len(face_coord):
            #If there is a face detected
            frame, face_image = get_image(frame, face_coord, shape)
            #frame is the current frame for the video stream
            #face_image is the image of the region in which the face is detected
            if timer % 10 == 5:
                cv2.imwrite(path + '/' + str(counter) + '.jpg', face_image[0])
                #Save file to directory specified the name of the face to be added
                print("Image save: " + str(counter))
                counter = counter + 1
                cv2.putText(frame, "Number of pictures taken: " + str(counter),
                            (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN,
                            1.2, (206, 0, 209), 2, cv2.LINE_AA)
                #show the user the number of pictures that have been taken
                cv2.imshow("Saved face", face_image[0])
            if counter >= num_to_add:
                cv2.putText(frame, "Adding new face complete. " + str(counter),
                            (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN,
                            1.2, (206, 0, 209), 2, cv2.LINE_AA)

        #create the jpeg encoding of the frame
        encode = cv2.imencode('.jpg', frame)[1].tobytes()
        #Yield the encoding
        #yield means the function is a python generator object.
        #A generator object is taken as an argument in flask Response() to render video streaming
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + encode + b'\r\n')
        timer = timer + 5
Exemplo n.º 10
0
def detect_faces_target(input_queue, output_queue, arguments):
    while True:
        frame = interruptible_get(input_queue)
        if frame == "":
            break
        try:
            for rectangle in FaceDetector.for_image(frame.image).detect_faces():
                frame = Frame(frame.id, frame.image.draw_rectangle(rectangle, Color(255, 0, 0)))
            
            output_queue.put(frame, block = True)
        except:
            traceback.print_exc(file=sys.stdout)
Exemplo n.º 11
0
    def init_algorithm(
            self,
            save_training_file='datasets/faces_training.pkl',
            load_preprocessed_data='datasets/faces_preprocessed.pkl',
            load_mlp='params/mlp.xml',
            face_casc='params/haarcascade_frontalface_default.xml',
            left_eye_casc='params/haarcascade_lefteye_2splits.xml',
            right_eye_casc='params/haarcascade_righteye_2splits.xml'):
        """Initializes face detector and facial expression classifier

            This method initializes both the face detector and the facial
            expression classifier.

            :param save_training_file:     filename for storing the assembled
                                           training set
            :param load_preprocessed_data: filename for loading a previously
                                           preprocessed dataset (for
                                           classification in Testing Mode)
            :param load_mlp:               filename for loading a pre-trained
                                           MLP classifier (use the script
                                           train_test_mlp.py)
            :param face_casc:              path to a face cascade
            :param left_eye_casc:          path to a left-eye cascade
            :param right_eye_casc:         path to a right-eye cascade
        """
        self.data_file = save_training_file
        self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc)
        self.head = None

        # load preprocessed dataset to access labels and PCA params
        if path.isfile(load_preprocessed_data):
            (_, y_train), (_, y_test), V, m = homebrew.load_from_file(
                load_preprocessed_data)
            self.pca_V = V
            self.pca_m = m
            self.all_labels = np.unique(np.hstack((y_train, y_test)))

            # load pre-trained multi-layer perceptron
            if path.isfile(load_mlp):
                layer_sizes = np.array([self.pca_V.shape[1],
                                        len(self.all_labels)])
                self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels)
                self.MLP.load(load_mlp)
            else:
                print "Warning: Testing is disabled"
                print "Could not find pre-trained MLP file ", load_mlp
                self.testing.Disable()
        else:
            print "Warning: Testing is disabled"
            print "Could not find data file ", load_preprocessed_data
            self.testing.Disable()
class FacialExpressionRecognizerLayout(BaseLayout):
    def __init__(self, *args, clf_path=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.clf = cv2.ml.ANN_MLP_load(str(clf_path / 'mlp.xml'))

        self.index_to_label = pickle_load(clf_path / 'index_to_label')
        self.pca_args = pickle_load(clf_path / 'pca_args')

        self.face_detector = FaceDetector(
            face_cascade='params/haarcascade_frontalface_default.xml',
            eye_cascade='params/haarcascade_lefteye_2splits.xml')

    def featurize_head(self, head):
        return _pca_featurize(head[None], *self.pca_args)

    def augment_layout(self):
        pass

    def process_frame(self, frame_rgb: np.ndarray) -> np.ndarray:
        success, frame, self.head, (
            x, y) = self.face_detector.detect_face(frame_rgb)
        if not success:
            return frame

        success, head = self.face_detector.align_head(self.head)
        if not success:
            return frame

        # We have to pass [1 x n] array predict.
        _, output = self.clf.predict(self.featurize_head(head))
        label = self.index_to_label[np.argmax(output)]

        # Draw predicted label above the bounding box.
        cv2.putText(frame, label, (x, y - 20), cv2.FONT_HERSHEY_COMPLEX, 1,
                    (0, 255, 0), 2)

        return frame
Exemplo n.º 13
0
def detect_face(image):
    faces = FaceDetector.for_image(image).detect_faces()
    face = None if len(faces) == 0 else faces[0]
    return face
    def face_recognition_login(self,
                               SHAPE='rectangle',
                               DURATION=30,
                               NUM_TO_TAKE=5):
        start = time.time()
        threshold = 95
        user_label = ""
        #user_label, string type, records the face path of the user who has been detected and is considered for login

        users = User.query.filter_by(login_enabled=True).all()
        #get all the users for whom face recognition login has een enabled

        detector = FaceDetector('app/frontal_face.xml')
        recogniser = cv2.face.LBPHFaceRecognizer_create()
        vid = video_capture.VideoCamera()
        recogniser.train(self.images, np.array(self.labels))

        count = 0
        timer = 0
        while count <= NUM_TO_TAKE and time.time() - start < DURATION + 5:
            #checking for face recognition login if the set number of face pictures required for logging a user have not been detected yet and
            #the amount of time elapsed since the start is less than the required duration

            frame = vid.get_frame()
            face_coord = detector.detect(frame)

            if len(face_coord) and time.time() - start < DURATION:
                #if there is a face detected

                timer = timer + 1
                frame, face_image = get_image(frame, face_coord, SHAPE)
                image = face_image[0]
                #face_image[0] since we are only logging one user
                pred, error = recogniser.predict(image)

                if timer % 3 == 2:
                    #Updating every three frames in which a face is recognised.
                    if error < threshold:
                        if user_label == "":
                            user_label = self.face_label[pred]
                        if user_label != self.face_label[pred]:
                            count = 0
                            user_label = self.face_label[pred]
                        #add the label for the current user.
                        #If the length of the user label is 0 then add the first picture
                        else:
                            count = count + 1
                            print "detected: ", self.face_label[pred]
                            print self.face_label

            hight, width, channel = frame.shape
            cv2.rectangle(frame, (22, 22), (22 + count * 30, 26),
                          (206, 0, 209), 4)
            cv2.rectangle(frame, (20, 20), (20 + NUM_TO_TAKE * 30, 30),
                          (206, 0, 209), 2)
            cv2.putText(
                frame,
                "Login check complete: " + str(count) + "/" + str(NUM_TO_TAKE),
                (5, 60), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2,
                cv2.LINE_AA)
            if count >= NUM_TO_TAKE:
                cv2.putText(frame, "Login criterions satisfied.", (5, 120),
                            cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2,
                            cv2.LINE_AA)
                cv2.putText(
                    frame,
                    "Please click the Login Face button above to login the user",
                    (5, 140), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2,
                    cv2.LINE_AA)
            if time.time() - start >= DURATION:
                cv2.putText(frame, "Login criterions not satisfied.", (5, 120),
                            cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2,
                            cv2.LINE_AA)
                cv2.putText(frame,
                            "Time run out. Face recognition login failed.",
                            (5, 160), cv2.FONT_HERSHEY_PLAIN, 1.2,
                            (206, 0, 209), 2, cv2.LINE_AA)

            encode = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + encode + b'\r\n')
        if count > NUM_TO_TAKE:
            self.login = True
            #loading the user to log in
            self.user = User.query.filter_by(login_path=user_label).first()
            print "Facelogin_enabled: ", self.login
            print "Login user path: ", self.user
Exemplo n.º 15
0
class FaceLayout(BaseLayout):
   
    def _init_custom_layout(self):
        # initialize data structure
        self.samples = []
        self.labels = []

        # call method to save data upon exiting
        self.Bind(wx.EVT_CLOSE, self._on_exit)

    def init_algorithm(
            self,
            save_training_file='datasets/faces_training.pkl',
            load_preprocessed_data='datasets/faces_preprocessed.pkl',
            load_mlp='params/mlp.xml',
            face_casc='params/haarcascade_frontalface_default.xml',
            left_eye_casc='params/haarcascade_lefteye_2splits.xml',
            right_eye_casc='params/haarcascade_righteye_2splits.xml'):
        
        self.data_file = save_training_file
        self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc)
        self.head = None

        # load preprocessed dataset to access labels and PCA params
        if path.isfile(load_preprocessed_data):
            (_, y_train), (_, y_test), V, m = homebrew.load_from_file(
                load_preprocessed_data)
            self.pca_V = V
            self.pca_m = m
            self.all_labels = np.unique(np.hstack((y_train, y_test)))

            # load pre-trained multi-layer perceptron
            if path.isfile(load_mlp):
                layer_sizes = np.array([self.pca_V.shape[1],
                                        len(self.all_labels)])
                self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels)
                self.MLP.load(load_mlp)
            else:
                print "Warning: Testing is disabled"
                print "Could not find pre-trained MLP file ", load_mlp
                self.testing.Disable()
        else:
            print "Warning: Testing is disabled"
            print "Could not find data file ", load_preprocessed_data
            self.testing.Disable()

    def _create_custom_layout(self):
        # create horizontal layout with train/test buttons
        pnl1 = wx.Panel(self, -1)
        self.training = wx.RadioButton(pnl1, -1, 'Train', (10, 10),
                                       style=wx.RB_GROUP)
        self.Bind(wx.EVT_RADIOBUTTON, self._on_training, self.training)
        self.testing = wx.RadioButton(pnl1, -1, 'Test')
        self.Bind(wx.EVT_RADIOBUTTON, self._on_testing, self.testing)
        hbox1 = wx.BoxSizer(wx.HORIZONTAL)
        hbox1.Add(self.training, 1)
        hbox1.Add(self.testing, 1)
        pnl1.SetSizer(hbox1)

        # create a horizontal layout with all buttons
        pnl2 = wx.Panel(self, -1)
        self.neutral = wx.RadioButton(pnl2, -1, 'neutral', (10, 10),
                                      style=wx.RB_GROUP)
        self.happy = wx.RadioButton(pnl2, -1, 'happy')
        self.sad = wx.RadioButton(pnl2, -1, 'sad')
        self.surprised = wx.RadioButton(pnl2, -1, 'surprised')
        self.angry = wx.RadioButton(pnl2, -1, 'angry')
        self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted')
        hbox2 = wx.BoxSizer(wx.HORIZONTAL)
        hbox2.Add(self.neutral, 1)
        hbox2.Add(self.happy, 1)
        hbox2.Add(self.sad, 1)
        hbox2.Add(self.surprised, 1)
        hbox2.Add(self.angry, 1)
        hbox2.Add(self.disgusted, 1)
        pnl2.SetSizer(hbox2)

        # create horizontal layout with single snapshot button
        pnl3 = wx.Panel(self, -1)
        self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot')
        self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot)
        hbox3 = wx.BoxSizer(wx.HORIZONTAL)
        hbox3.Add(self.snapshot, 1)
        pnl3.SetSizer(hbox3)

        # arrange all horizontal layouts vertically
        self.panels_vertical.Add(pnl1, flag=wx.EXPAND | wx.TOP, border=1)
        self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1)
        self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1)

    def _process_frame(self, frame):
        # detect face
        success, frame, self.head, (x, y) = self.faces.detect(frame)

        if success and self.testing.GetValue():
            # if face found: preprocess (align)
            success, head = self.faces.align_head(self.head)
            if success:
                # extract features using PCA (loaded from file)
                X, _, _ = homebrew.extract_features([head.flatten()],
                                                    self.pca_V, self.pca_m)

                # predict label with pre-trained MLP
                label = self.MLP.predict(np.array(X))[0]

                # draw label above bounding box
                cv2.putText(frame, str(label), (x, y - 20),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)

        return frame

    def _on_training(self, evt):
        self.neutral.Enable()
        self.happy.Enable()
        self.sad.Enable()
        self.surprised.Enable()
        self.angry.Enable()
        self.disgusted.Enable()
        self.snapshot.Enable()

    def _on_testing(self, evt):
        self.neutral.Disable()
        self.happy.Disable()
        self.sad.Disable()
        self.surprised.Disable()
        self.angry.Disable()
        self.disgusted.Disable()
        self.snapshot.Disable()

    def _on_snapshot(self, evt):
        if self.neutral.GetValue():
            label = 'neutral'
        elif self.happy.GetValue():
            label = 'happy'
        elif self.sad.GetValue():
            label = 'sad'
        elif self.surprised.GetValue():
            label = 'surprised'
        elif self.angry.GetValue():
            label = 'angry'
        elif self.disgusted.GetValue():
            label = 'disgusted'

        if self.head is None:
            print "No face detected"
        else:
            success, head = self.faces.align_head(self.head)
            if success:
                print "Added sample to training set"
                self.samples.append(head.flatten())
                self.labels.append(label)
            else:
                print "Could not align head (eye detection failed?)"

    def _on_exit(self, evt):
        # if we have collected some samples, dump them to file
        if len(self.samples) > 0:
            # make sure we don't overwrite an existing file
            if path.isfile(self.data_file):
                # file already exists, construct new load_from_file
                load_from_file, fileext = path.splitext(self.data_file)
                offset = 0
                while True:
                    file = load_from_file + "-" + str(offset) + fileext
                    if path.isfile(file):
                        offset += 1
                    else:
                        break
                self.data_file = file

            # dump samples and labels to file
            f = open(self.data_file, 'wb')
            pickle.dump(self.samples, f)
            pickle.dump(self.labels, f)
            f.close()

            # inform user that file was created
            print "Saved", len(self.samples), "samples to", self.data_file

        # deallocate
        self.Destroy()
class FaceLayout(BaseLayout):
    """A custom layout for face detection and facial expression recognition

        A GUI to both assemble a training set and to perform real-time
        classification on the live stream of a webcam using a pre-trained
        classifier.

        The GUI operates in two different modes:
        * Training Mode: In training mode, the app will collect image frames,
                         detect a face therein, assignassign a label depending
                         on the facial expression, and upon exiting save all
                         collected data samples in a file, so that it can be
                         parsed by datasets.homebrew.
        * Testing Mode:  In testing mode, the app will detect a face in each
                         video frame and predict the corresponding class
                         label using a pre-trained MLP.
    """

    def _init_custom_layout(self):
        """Initializes GUI"""
        # initialize data structure
        self.samples = []
        self.labels = []

        # call method to save data upon exiting
        self.Bind(wx.EVT_CLOSE, self._on_exit)

    def init_algorithm(
            self,
            save_training_file='datasets/faces_training.pkl',
            load_preprocessed_data='datasets/faces_preprocessed.pkl',
            load_mlp='params/mlp.xml',
            face_casc='params/haarcascade_frontalface_default.xml',
            left_eye_casc='params/haarcascade_lefteye_2splits.xml',
            right_eye_casc='params/haarcascade_righteye_2splits.xml'):
        """Initializes face detector and facial expression classifier

            This method initializes both the face detector and the facial
            expression classifier.

            :param save_training_file:     filename for storing the assembled
                                           training set
            :param load_preprocessed_data: filename for loading a previously
                                           preprocessed dataset (for
                                           classification in Testing Mode)
            :param load_mlp:               filename for loading a pre-trained
                                           MLP classifier (use the script
                                           train_test_mlp.py)
            :param face_casc:              path to a face cascade
            :param left_eye_casc:          path to a left-eye cascade
            :param right_eye_casc:         path to a right-eye cascade
        """
        self.data_file = save_training_file
        self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc)

        # load preprocessed dataset to access labels and PCA params
        if path.isfile(load_preprocessed_data):
            (_, y_train), (_, y_test), V, m = homebrew.load_from_file(
                load_preprocessed_data)
            self.pca_V = V
            self.pca_m = m
            self.all_labels = np.unique(np.hstack((y_train, y_test)))

            # load pre-trained multi-layer perceptron
            if path.isfile(load_mlp):
                layer_sizes = np.array([self.pca_V.shape[1],
                                        len(self.all_labels)])
                self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels)
                self.MLP.load(load_mlp)
            else:
                print "Warning: Testing is disabled"
                print "Could not find pre-trained MLP file ", load_mlp
                self.testing.Disable()
        else:
            print "Warning: Testing is disabled"
            print "Could not find data file ", load_preprocessed_data
            self.testing.Disable()

    def _create_custom_layout(self):
        """Decorates the GUI with buttons for assigning class labels"""
        # create horizontal layout with train/test buttons
        pnl1 = wx.Panel(self, -1)
        self.training = wx.RadioButton(pnl1, -1, 'Train', (10, 10),
                                       style=wx.RB_GROUP)
        self.Bind(wx.EVT_RADIOBUTTON, self._on_training, self.training)
        self.testing = wx.RadioButton(pnl1, -1, 'Test')
        self.Bind(wx.EVT_RADIOBUTTON, self._on_testing, self.testing)
        hbox1 = wx.BoxSizer(wx.HORIZONTAL)
        hbox1.Add(self.training, 1)
        hbox1.Add(self.testing, 1)
        pnl1.SetSizer(hbox1)

        # create a horizontal layout with all buttons
        pnl2 = wx.Panel(self, -1)
        self.neutral = wx.RadioButton(pnl2, -1, 'neutral', (10, 10),
                                      style=wx.RB_GROUP)
        self.happy = wx.RadioButton(pnl2, -1, 'happy')
        self.sad = wx.RadioButton(pnl2, -1, 'sad')
        self.surprised = wx.RadioButton(pnl2, -1, 'surprised')
        self.angry = wx.RadioButton(pnl2, -1, 'angry')
        self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted')
        hbox2 = wx.BoxSizer(wx.HORIZONTAL)
        hbox2.Add(self.neutral, 1)
        hbox2.Add(self.happy, 1)
        hbox2.Add(self.sad, 1)
        hbox2.Add(self.surprised, 1)
        hbox2.Add(self.angry, 1)
        hbox2.Add(self.disgusted, 1)
        pnl2.SetSizer(hbox2)

        # create horizontal layout with single snapshot button
        pnl3 = wx.Panel(self, -1)
        self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot')
        self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot)
        hbox3 = wx.BoxSizer(wx.HORIZONTAL)
        hbox3.Add(self.snapshot, 1)
        pnl3.SetSizer(hbox3)

        # arrange all horizontal layouts vertically
        self.panels_vertical.Add(pnl1, flag=wx.EXPAND | wx.TOP, border=1)
        self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1)
        self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1)

    def _process_frame(self, frame):
        """Processes each captured frame

            This method processes each captured frame.
            * Training mode:  Performs face detection.
            * Testing mode:   Performs face detection, and predicts the class
                              label of the facial expression.
        """
        # detect face
        success, frame, self.head = self.faces.detect(frame)

        if success and self.testing.GetValue():
            # if face found: preprocess (align)
            success, head = self.faces.align_head(self.head)
            if success:
                # extract features using PCA (loaded from file)
                X, _, _ = homebrew.extract_features([head.flatten()],
                                                    self.pca_V, self.pca_m)

                # predict label with pre-trained MLP
                label = self.MLP.predict(np.array(X))[0]

                # draw label above bounding box
                cv2.putText(frame, str(label), (x, y - 20),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)

        return frame

    def _on_training(self, evt):
        """Enables all training-related buttons when Training Mode is on"""
        self.neutral.Enable()
        self.happy.Enable()
        self.sad.Enable()
        self.surprised.Enable()
        self.angry.Enable()
        self.disgusted.Enable()
        self.snapshot.Enable()

    def _on_testing(self, evt):
        """Disables all training-related buttons when Testing Mode is on"""
        self.neutral.Disable()
        self.happy.Disable()
        self.sad.Disable()
        self.surprised.Disable()
        self.angry.Disable()
        self.disgusted.Disable()
        self.snapshot.Disable()

    def _on_snapshot(self, evt):
        """Takes a snapshot of the current frame

            This method takes a snapshot of the current frame, preprocesses
            it to extract the head region, and upon success adds the data
            sample to the training set.
        """
        if self.neutral.GetValue():
            label = 'neutral'
        elif self.happy.GetValue():
            label = 'happy'
        elif self.sad.GetValue():
            label = 'sad'
        elif self.surprised.GetValue():
            label = 'surprised'
        elif self.angry.GetValue():
            label = 'angry'
        elif self.disgusted.GetValue():
            label = 'disgusted'

        if self.head is None:
            print "No face detected"
        else:
            success, head = self.faces.align_head(self.head)
            if success:
                print "Added sample to training set"
                self.samples.append(head.flatten())
                self.labels.append(label)
            else:
                print "Could not align head (eye detection failed?)"

    def _on_exit(self, evt):
        """Dumps the training data to file upon exiting"""
        # if we have collected some samples, dump them to file
        if len(self.samples) > 0:
            # make sure we don't overwrite an existing file
            if path.isfile(self.data_file):
                # file already exists, construct new load_from_file
                load_from_file, fileext = path.splitext(self.data_file)
                offset = 0
                while True:
                    file = load_from_file + "-" + str(offset) + fileext
                    if path.isfile(file):
                        offset += 1
                    else:
                        break
                self.data_file = file

            # dump samples and labels to file
            f = open(self.data_file, 'wb')
            pickle.dump(self.samples, f)
            pickle.dump(self.labels, f)
            f.close()

            # inform user that file was created
            print "Saved", len(self.samples), "samples to", self.data_file

        # deallocate
        self.Destroy()
from detectors import FaceDetector


DIMENSIONS = (512, 512)


def _imshow(img):
    return plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), vmin=0, vmax=255)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--image', default='media/B04521_07_01.png')
    args = parser.parse_args()

    face_detector = FaceDetector(eye_cascade='../.py3.7-cv-blueprints/lib/python3.7/site-packages/cv2/data/haarcascade_eye.xml')

    img = cv2.resize(cv2.imread(args.image), DIMENSIONS)

    fig = plt.figure(figsize=(19, 6))
    fig.add_subplot(1, 3, 1)

    success, frame, head, (x, y) = face_detector.detect_face(img)
    assert success, 'Face was not detected'
    _imshow(frame)
    plt.title('Detected Face')

    fig.add_subplot(1, 3, 2)
    head_copy = head.copy()
    eyes = face_detector.eye_centers(head_copy, outline=True)
    _imshow(head_copy)
Exemplo n.º 18
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)

    plugin = IEPlugin(device=ML_DEVICE)
    face_detector = FaceDetector(FACE_MODEL, log)
    face_detector.init(plugin)

    emotion_detector = EmotionDetector(EMOTION_MODEL, log)
    emotion_detector.init(plugin)

    gender_detector = GenderDetector(GENDER_MODEL, log)
    gender_detector.init(plugin)

    cap = cv2.VideoCapture(0)  # dla pi camery inaczej

    sio = socketio.Client()
    sio.connect('http://localhost:7777')

    while cap.isOpened():
        ret, next_frame = cap.read()

        if not ret:
            break

        initial_w = cap.get(3)
        initial_h = cap.get(4)

        in_frame = face_detector.preprocess_frame(next_frame)
        face_detector.start(request_id=0, frame=in_frame)

        if face_detector.request(request_id=0):
            response = face_detector.response(request_id=0)
            faces = []
            face_id = 0
            for detected_face in response[0][0]:
                if face_detector.can_read(detected_face[2], 0.9):
                    face_id += 1
                    face = face_detector.extract_face(detected_face,
                                                      next_frame, initial_w,
                                                      initial_h)

                    in_emotion = emotion_detector.preprocess_frame(face)
                    emotion_detector.start(0, in_emotion)
                    emotion = None
                    if (emotion_detector.request(0)):
                        response = emotion_detector.response(0)
                        emotion = emotion_detector.extract_emotion(response[0])

                    in_gender = gender_detector.preprocess_frame(face)
                    gender_detector.start(0, in_gender)
                    gender = None
                    if (gender_detector.request(0)):
                        response = gender_detector.response(0)
                        gender = gender_detector.extract_gender(response[0])

                    if emotion and gender:
                        faces.append((face_id, gender, emotion))

            if len(faces) > 0:
                sio.emit('ai', faces)
                # log.info(faces)

    cv2.destroyAllWindows()
    del plugin
class DataCollectorLayout(BaseLayout):
    def __init__(self,
                 *args,
                 training_data='data/cropped_faces.csv',
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.face_detector = FaceDetector(
            face_cascade='params/haarcascade_frontalface_default.xml',
            eye_cascade='params/haarcascade_lefteye_2splits.xml')

        self.training_data = training_data

    def augment_layout(self):
        """Initializes GUI"""
        # initialize data structure
        self.samples = []
        self.labels = []

        # create a horizontal layout with all buttons
        pnl2 = wx.Panel(self, -1)
        self.neutral = wx.RadioButton(pnl2,
                                      -1,
                                      'neutral', (10, 10),
                                      style=wx.RB_GROUP)
        self.happy = wx.RadioButton(pnl2, -1, 'happy')
        self.sad = wx.RadioButton(pnl2, -1, 'sad')
        self.surprised = wx.RadioButton(pnl2, -1, 'surprised')
        self.angry = wx.RadioButton(pnl2, -1, 'angry')
        self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted')
        hbox2 = wx.BoxSizer(wx.HORIZONTAL)
        hbox2.Add(self.neutral, 1)
        hbox2.Add(self.happy, 1)
        hbox2.Add(self.sad, 1)
        hbox2.Add(self.surprised, 1)
        hbox2.Add(self.angry, 1)
        hbox2.Add(self.disgusted, 1)
        pnl2.SetSizer(hbox2)

        # create horizontal layout with single snapshot button
        pnl3 = wx.Panel(self, -1)
        self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot')
        self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot)
        hbox3 = wx.BoxSizer(wx.HORIZONTAL)
        hbox3.Add(self.snapshot, 1)
        pnl3.SetSizer(hbox3)

        # arrange all horizontal layouts vertically
        self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1)
        self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1)

    def process_frame(self, frame_rgb: np.ndarray) -> np.ndarray:
        """
        Add a bounding box around the face if a face is detected.
        """
        _, frame, self.head, _ = self.face_detector.detect_face(frame_rgb)
        return frame

    def _on_snapshot(self, evt):
        """Takes a snapshot of the current frame

            This method takes a snapshot of the current frame, preprocesses
            it to extract the head region, and upon success adds the data
            sample to the training set.
        """
        if self.neutral.GetValue():
            label = 'neutral'
        elif self.happy.GetValue():
            label = 'happy'
        elif self.sad.GetValue():
            label = 'sad'
        elif self.surprised.GetValue():
            label = 'surprised'
        elif self.angry.GetValue():
            label = 'angry'
        elif self.disgusted.GetValue():
            label = 'disgusted'

        if self.head is None:
            print("No face detected")
        else:
            success, aligned_head = self.face_detector.align_head(self.head)
            if success:
                save_datum(self.training_data, label, aligned_head)
                print(f"Saved {label} training datum.")
            else:
                print("Could not align head (eye detection failed?)")
def classifier_training(PEOPLE_PATH, shape):
    #training an opencv face recognition cklassifier
    #Parameters: PEOPLE_PATH, string type, path of the directory for the current user that stores all the face foldersteach of which contains training images for a face
    #Shape: same as in referred to in the functios above
    #Return values: face_detector, opencv object, a detector that detects faces in an image
    #recogniser, opencv object, a recogniser that recognises detected faces
    #face_label, list type, an array of the labels of all the faces the recogniser that recognise

    try:
        #creating an array people of all the file names in the PEOPLE_PATH directory.
        #PEOPLE_PATH is a string varialble that is also a path of the directrory where the training images for a user are stored
        people = [person for person in os.listdir(PEOPLE_PATH)]
        #In Mac, by defaul the system creates a file called DS.Store to store metadata. This is not a directory that we can access
        for file in people:

            # .DS_Store exists in folders in OS system which affect the running of the program
            # .DS_Store contains metadata about the folder
            if file == '.DS_Store':
                people.remove(file)
    except:
        #If there is no person directory in the folder, then opencv will raise a runtime error
        print("There seems to be no person in the database")
        sys.exit()

    #initialise face detector
    #Type: opencv object that detects faces in a given iamge
    #Return: coordinates of the face if detected

    #Initialising an opencv frontal face detector
    #frontal_face.xml is a xml file provided opencv that links to the frontal face detector in the cv2 library
    face_detector = FaceDetector('app/frontal_face.xml')
    face_detector.__init__

    #initialise face recogniser
    #Type: open cv object that compares detected faces to the face encodings of the known people in the dataset
    #Return: Prediction (string), accury(double)
    #EigenFaceRecognizer is based on a face detection algorithm provided by opencv
    #Threshold: minimium number required for faces detected in the neighbour hood.

    recogniser = cv2.face.LBPHFaceRecognizer_create()
    threshold = 105

    #sample from images from the dataset for the classifier training

    #Images: list containg all the training images to be used for face recognition classifier training
    #Each image element has a corresponding label in the labels list
    images = []

    #Labels: list containing all the labels for the images.
    #Each label indicates which user the image belongs to
    labels = []
    person_label = []

    #face_label: list type, containing all the paths of the people whose faces have been added for the current user
    face_label = []
    for i, person in enumerate(people):
        face_label.append(person)
        for image in os.listdir(PEOPLE_PATH + person):
            images.append(cv2.imread(PEOPLE_PATH + person + '/' + image, 0))
            labels.append(i)
        person_label.append(person)

    try:
        #opencv function to train the recogniser
        #parameters: images, numpy array list type, l
        #labels, list type, storing the labels of the face in each training image, i.e. whose face it belongs to

        recogniser.train(images, np.array(labels))
        print "classifer trained on: ", person_label
    except:
        #Opencv training requirement
        print("At least two people are needed")
        sys.exit()

    return (face_detector, recogniser, face_label)
def recognize_people(people_folder):
    try:
        people = [person for person in os.listdir(people_folder)]
    except:
        print("Have you added at least one person to the system?")
        sys.exit()
    print("This are the people in the Recognition System:")
    for person in people:
        print("-" + person)

    start = time.time()
    #choice = check_choice()
    recognizer = None
    detector = FaceDetector()
    
    recognizer = cv2.face.createLBPHFaceRecognizer()
    threshold = 91 #93
    images = []
    labels = []
    labels_people = {}
    for i, person in enumerate(people):
        labels_people[i] = person
        for image in os.listdir(people_folder + '/' + person):
            images.append(cv2.imread(people_folder +'/'+ person + '/' + image, 0))
            labels.append(i)
    try:
        recognizer.train(images, np.array(labels))
        print("train model")
    except:
        print("\nOpenCV Error: Do you have at least two people in the database?\n")
        sys.exit()
    end = time.time()
    print(end - start)
    video = VideoCamera()
    while True:
        frame = video.get_frame()
        faces_coord = detector.detect(frame)
        if len(faces_coord):
            frame, faces_img = oo.get_images(frame, faces_coord)
            for i, face_img in enumerate(faces_img):
                if __version__ == "3.1.0":
                    collector = cv2.face.MinDistancePredictCollector()
                    recognizer.predict(face_img, collector)
                    conf = collector.getDist()
                    pred = collector.getLabel()
                else:
                    pred, conf = recognizer.predict(face_img)
                print("Prediction: " + str(pred))
                print('Confidence: ' + str(round(conf)))
                print('Threshold: ' + str(threshold))
                if conf < threshold:
                    cv2.putText(frame, labels_people[pred].capitalize(),
                                (faces_coord[i][0], faces_coord[i][1] - 2),
                                cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2,
                                cv2.LINE_AA)
                else:
                    cv2.putText(frame, "Unknown",
                                (faces_coord[i][0], faces_coord[i][1]),
                                cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2,
                                cv2.LINE_AA)

        cv2.putText(frame, "ESC to exit", (5, frame.shape[0] - 5),
                    cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA)
        cv2.imshow('Video', frame)
        if cv2.waitKey(100) & 0xFF == 27:
            sys.exit()
Exemplo n.º 22
0
class FaceLayout(BaseLayout):
    """A custom layout for face detection and facial expression recognition

        A GUI to both assemble a training set and to perform real-time
        classification on the live stream of a webcam using a pre-trained
        classifier.

        The GUI operates in two different modes:
        * Training Mode: In training mode, the app will collect image frames,
                         detect a face therein, assignassign a label depending
                         on the facial expression, and upon exiting save all
                         collected data samples in a file, so that it can be
                         parsed by datasets.homebrew.
        * Testing Mode:  In testing mode, the app will detect a face in each
                         video frame and predict the corresponding class
                         label using a pre-trained MLP.
    """
    def _init_custom_layout(self):
        """Initializes GUI"""
        # initialize data structure
        self.samples = []
        self.labels = []

        # call method to save data upon exiting
        self.Bind(wx.EVT_CLOSE, self._on_exit)

    def init_algorithm(
            self,
            save_training_file='datasets/faces_training.pkl',
            load_preprocessed_data='datasets/faces_preprocessed.pkl',
            load_mlp='params/mlp.xml',
            face_casc='params/haarcascade_frontalface_default.xml',
            left_eye_casc='params/haarcascade_lefteye_2splits.xml',
            right_eye_casc='params/haarcascade_righteye_2splits.xml'):
        """Initializes face detector and facial expression classifier

            This method initializes both the face detector and the facial
            expression classifier.

            :param save_training_file:     filename for storing the assembled
                                           training set
            :param load_preprocessed_data: filename for loading a previously
                                           preprocessed dataset (for
                                           classification in Testing Mode)
            :param load_mlp:               filename for loading a pre-trained
                                           MLP classifier (use the script
                                           train_test_mlp.py)
            :param face_casc:              path to a face cascade
            :param left_eye_casc:          path to a left-eye cascade
            :param right_eye_casc:         path to a right-eye cascade
        """
        self.data_file = save_training_file
        self.faces = FaceDetector(face_casc, left_eye_casc, right_eye_casc)

        # load preprocessed dataset to access labels and PCA params
        if path.isfile(load_preprocessed_data):
            (_, y_train), (
                _,
                y_test), V, m = homebrew.load_from_file(load_preprocessed_data)
            self.pca_V = V
            self.pca_m = m
            self.all_labels = np.unique(np.hstack((y_train, y_test)))

            # load pre-trained multi-layer perceptron
            if path.isfile(load_mlp):
                layer_sizes = np.array(
                    [self.pca_V.shape[1],
                     len(self.all_labels)])
                self.MLP = MultiLayerPerceptron(layer_sizes, self.all_labels)
                self.MLP.load(load_mlp)
            else:
                print "Warning: Testing is disabled"
                print "Could not find pre-trained MLP file ", load_mlp
                self.testing.Disable()
        else:
            print "Warning: Testing is disabled"
            print "Could not find data file ", load_preprocessed_data
            self.testing.Disable()

    def _create_custom_layout(self):
        """Decorates the GUI with buttons for assigning class labels"""
        # create horizontal layout with train/test buttons
        pnl1 = wx.Panel(self, -1)
        self.training = wx.RadioButton(pnl1,
                                       -1,
                                       'Train', (10, 10),
                                       style=wx.RB_GROUP)
        self.Bind(wx.EVT_RADIOBUTTON, self._on_training, self.training)
        self.testing = wx.RadioButton(pnl1, -1, 'Test')
        self.Bind(wx.EVT_RADIOBUTTON, self._on_testing, self.testing)
        hbox1 = wx.BoxSizer(wx.HORIZONTAL)
        hbox1.Add(self.training, 1)
        hbox1.Add(self.testing, 1)
        pnl1.SetSizer(hbox1)

        # create a horizontal layout with all buttons
        pnl2 = wx.Panel(self, -1)
        self.neutral = wx.RadioButton(pnl2,
                                      -1,
                                      'neutral', (10, 10),
                                      style=wx.RB_GROUP)
        self.happy = wx.RadioButton(pnl2, -1, 'happy')
        self.sad = wx.RadioButton(pnl2, -1, 'sad')
        self.surprised = wx.RadioButton(pnl2, -1, 'surprised')
        self.angry = wx.RadioButton(pnl2, -1, 'angry')
        self.disgusted = wx.RadioButton(pnl2, -1, 'disgusted')
        hbox2 = wx.BoxSizer(wx.HORIZONTAL)
        hbox2.Add(self.neutral, 1)
        hbox2.Add(self.happy, 1)
        hbox2.Add(self.sad, 1)
        hbox2.Add(self.surprised, 1)
        hbox2.Add(self.angry, 1)
        hbox2.Add(self.disgusted, 1)
        pnl2.SetSizer(hbox2)

        # create horizontal layout with single snapshot button
        pnl3 = wx.Panel(self, -1)
        self.snapshot = wx.Button(pnl3, -1, 'Take Snapshot')
        self.Bind(wx.EVT_BUTTON, self._on_snapshot, self.snapshot)
        hbox3 = wx.BoxSizer(wx.HORIZONTAL)
        hbox3.Add(self.snapshot, 1)
        pnl3.SetSizer(hbox3)

        # arrange all horizontal layouts vertically
        self.panels_vertical.Add(pnl1, flag=wx.EXPAND | wx.TOP, border=1)
        self.panels_vertical.Add(pnl2, flag=wx.EXPAND | wx.BOTTOM, border=1)
        self.panels_vertical.Add(pnl3, flag=wx.EXPAND | wx.BOTTOM, border=1)

    def _process_frame(self, frame):
        """Processes each captured frame

            This method processes each captured frame.
            * Training mode:  Performs face detection.
            * Testing mode:   Performs face detection, and predicts the class
                              label of the facial expression.
        """
        # detect face
        success, frame, self.head = self.faces.detect(frame)

        if success and self.testing.GetValue():
            # if face found: preprocess (align)
            success, head = self.faces.align_head(self.head)
            if success:
                # extract features using PCA (loaded from file)
                X, _, _ = homebrew.extract_features([head.flatten()],
                                                    self.pca_V, self.pca_m)

                # predict label with pre-trained MLP
                label = self.MLP.predict(np.array(X))[0]

                # draw label above bounding box
                cv2.putText(frame, str(label), (x, y - 20),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)

        return frame

    def _on_training(self, evt):
        """Enables all training-related buttons when Training Mode is on"""
        self.neutral.Enable()
        self.happy.Enable()
        self.sad.Enable()
        self.surprised.Enable()
        self.angry.Enable()
        self.disgusted.Enable()
        self.snapshot.Enable()

    def _on_testing(self, evt):
        """Disables all training-related buttons when Testing Mode is on"""
        self.neutral.Disable()
        self.happy.Disable()
        self.sad.Disable()
        self.surprised.Disable()
        self.angry.Disable()
        self.disgusted.Disable()
        self.snapshot.Disable()

    def _on_snapshot(self, evt):
        """Takes a snapshot of the current frame

            This method takes a snapshot of the current frame, preprocesses
            it to extract the head region, and upon success adds the data
            sample to the training set.
        """
        if self.neutral.GetValue():
            label = 'neutral'
        elif self.happy.GetValue():
            label = 'happy'
        elif self.sad.GetValue():
            label = 'sad'
        elif self.surprised.GetValue():
            label = 'surprised'
        elif self.angry.GetValue():
            label = 'angry'
        elif self.disgusted.GetValue():
            label = 'disgusted'

        if self.head is None:
            print "No face detected"
        else:
            success, head = self.faces.align_head(self.head)
            if success:
                print "Added sample to training set"
                self.samples.append(head.flatten())
                self.labels.append(label)
            else:
                print "Could not align head (eye detection failed?)"

    def _on_exit(self, evt):
        """Dumps the training data to file upon exiting"""
        # if we have collected some samples, dump them to file
        if len(self.samples) > 0:
            # make sure we don't overwrite an existing file
            if path.isfile(self.data_file):
                # file already exists, construct new load_from_file
                load_from_file, fileext = path.splitext(self.data_file)
                offset = 0
                while True:
                    file = load_from_file + "-" + str(offset) + fileext
                    if path.isfile(file):
                        offset += 1
                    else:
                        break
                self.data_file = file

            # dump samples and labels to file
            f = open(self.data_file, 'wb')
            pickle.dump(self.samples, f)
            pickle.dump(self.labels, f)
            f.close()

            # inform user that file was created
            print "Saved", len(self.samples), "samples to", self.data_file

        # deallocate
        self.Destroy()