Example #1
0
    def get_frame(self):

        conn = sqlite3.connect('database.db')
        c = conn.cursor()
        fname = "recognizer/trainingData.yml"
        if not os.path.isfile(fname):
            print("\nPlease train the data first\n")
            return None

        # Model For Facial Expression Recognition
        model = FacialExpressionModel("model.json", "model_weights.h5")

        # Model For Face Recognizer
        face_detector = mtcnn.MTCNN()
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read(fname)

        _, img = self.video.read()
        faces = face_detector.detect_faces(img)

        for res in faces:
            x, y, w, h = res["box"]
            x, y = abs(x), abs(y)
            x1, y1 = x+w, y+h
            image = img[y:y1, x:x1]
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # Recognizing Face
            ids, conf = recognizer.predict(image)
            c.execute("select roll_no from users where id = (?);", (ids,))
            result = c.fetchall()
            try:
                roll_no = result[0][0]
            except:
                roll_no = 'Error'

            if conf > 50:
                roll_no = "No Match"

            image2 = cv2.resize(image, (48, 48))

            # Predicting Expression
            pred = model.predict_emotion(image2[np.newaxis, :, :, np.newaxis])

            msg = pred + " " + roll_no

            # Mark the Expression if Face is detected
            if roll_no != "Error" and roll_no != "No Match" :
                marked = count(roll_no, pred)
                if(marked):
                    Attendance(roll_no)
                    msg = "MARKED"

            cv2.putText(img, msg, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (255, 255, 0), 2)
            cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)

        _, jpeg = cv2.imencode('.jpg', img)
        return jpeg.tobytes()
Example #2
0
def get_frame(our_image):
    model = FacialExpressionModel("model.json", "model_weights.h5")
    image = np.array(our_image.convert("RGB"))
    new_image = cv2.cvtColor(image, 1)
    gray_fr = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)
    faces = facec.detectMultiScale(gray_fr, 1.3, 5)
    for (x, y, w, h) in faces:
        fc = gray_fr[y:y + h, x:x + w]

        roi = cv2.resize(fc, (48, 48))
        pred, probs = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

        cv2.putText(new_image, pred, (x, y), font, 1, (255, 255, 0), 2)
        cv2.rectangle(new_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
    if len(faces) == 0:
        probs = []
    # _, jpeg = cv2.imencode('.jpg', fr)
    return new_image, faces, probs
Example #3
0
    # loop over the face detections
    for rect in rectangles:

        # face detection with emotion label
        cv2.rectangle(frame, (rect.left(), rect.top()),
                      (rect.right(), rect.bottom()), (255, 0, 0), 2)
        roi_gray = gray[rect.top():rect.bottom(), rect.left():rect.right()]

        # rescale needed to 48x48 for model input
        try:
            roi = cv2.resize(roi_gray, (48, 48))
        except Exception as e:
            print(str(e))
        # our prediction
        pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

        # facial expression label
        cv2.putText(frame, pred, (rect.left(), rect.top()),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)

        # determine the facial landmarks for the face region
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # extract the left and right eye coordinates, then use the
        # coordinates to compute the eye aspect ratio for both eyes
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)