def process_facedetection(model_detector, model_poseestimator,
                          model_ageestimator, model_genderestimator,
                          cam_resolution, cam_index):  #model_emotionestimator

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    # try:
    # Initialize face detection
    face_detector = FaceDetector(model=model_detector,
                                 path=INPUT_DIR_MODEL_DETECTION,
                                 minfacesize=120)
    # Initialize face pose/age/gender estimation
    face_pose_estimator = FacePoseEstimator(model=model_poseestimator,
                                            path=INPUT_DIR_MODEL_ESTIMATION)
    face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                          path=INPUT_DIR_MODEL_ESTIMATION)
    face_gender_estimator = FaceGenderEstimator(
        model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    # face_emotion_estimator = FaceEmotionEstimator(model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    # except:
    #     print("Warning, check if models and trained dataset models exists!")
    # (age, gender, emotion) = (None, None, None)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y + h, h:h + w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            # emotion = face_emotion_estimator.estimate(frame, face_image)
            # lable_face(frame, face, )

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            # face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            if True:  # Added condition to easily disable text
                cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255),
                              1)
                # cv2.putText(frame, "Emotion: {}".format(emotion),
                #     (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC
            break
        elif keyPressed == 13:  # Enter
            cv2.imwrite(
                WINDOW_NAME + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg",
                frame)

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Ejemplo n.º 2
0
def process_facedetection():

    cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    model_detector = FaceDetectorModels.HAARCASCADE
    #    model_detector=FaceDetectorModels.DLIBHOG
    #    model_detector=FaceDetectorModels.DLIBCNN
    #    model_detector=FaceDetectorModels.SSDRESNET
    #    model_detector=FaceDetectorModels.MTCNN
    #    model_detector=FaceDetectorModels.FACENET

    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(
            model=model_detector,
            path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)
        # Initialize face pose/age/gender estimation
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    except:
        print("Warning, check if models and trained dataset models exists!")
    (age, gender, emotion) = (None, None, None)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y + h, h:h + w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            emotion = face_emotion_estimator.estimate(frame, face_image)

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (x, y - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)

        # Display updated frame to web app
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
               cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Ejemplo n.º 3
0
def process_facedetection(cam_resolution,
                          out_resolution,
                          framecount,
                          model_detector=0):

    from libfaceid.pose import FacePoseEstimatorModels, FacePoseEstimator
    from libfaceid.age import FaceAgeEstimatorModels, FaceAgeEstimator
    from libfaceid.gender import FaceGenderEstimatorModels, FaceGenderEstimator
    from libfaceid.emotion import FaceEmotionEstimatorModels, FaceEmotionEstimator
    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    cap = cam_init(cam_resolution[0], cam_resolution[1])

    ###############################################################################
    # FACE DETECTION
    ###############################################################################
    # Initialize face detection
    face_detector = FaceDetector(
        model=model_detector,
        path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)

    ###############################################################################
    # FACE POSE/AGE/GENDER/EMOTION ESTIMATION
    ###############################################################################
    # Initialize face pose/age/gender/emotion estimation
    if model_poseestimator is not None:
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_ageestimator is not None:
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
    if model_genderestimator is not None:
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_emotionestimator is not None:
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    (age, gender, emotion) = (None, None, None)

    # Initialize fps counter
    fps_frames = 0
    fps_start = time()

    while (True):

        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == 0:
            break

        # Resize to QVGA so that RPI we can have acceptable fps
        if out_resolution is not None:
            frame = cv2.resize(frame, out_resolution)

        ###############################################################################
        # FACE DETECTION
        ###############################################################################
        # Detect faces and set bounding boxes
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            ###############################################################################
            # FACE AGE/GENDER/EMOTION ESTIMATION
            ###############################################################################
            face_image = frame[y:y + h, h:h + w]
            if model_ageestimator is not None:
                age = face_age_estimator.estimate(frame, face_image)
            if model_genderestimator is not None:
                gender = face_gender_estimator.estimate(frame, face_image)
            if model_emotionestimator is not None:
                emotion = face_emotion_estimator.estimate(frame, face_image)

            ###############################################################################
            # FACE POSE ESTIMATION
            ###############################################################################
            # Detect and draw face pose locations
            if model_poseestimator is not None:
                shape = face_pose_estimator.detect(frame, face)
                face_pose_estimator.add_overlay(frame, shape)
            else:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255),
                              1)

            # Display age, gender, emotion
            if age is not None and gender is not None and emotion is not None:
                cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frame, "Emotion: {}".format(emotion), (x, y - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Update frame count
        fps_frames += 1
        if (framecount != 0 and fps_frames >= framecount):
            break

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:
            break

    # Set the fps
    fps = fps_frames / (time() - fps_start)

    # Release the camera
    cam_release(cap)

    return fps
Ejemplo n.º 4
0
def process_facerecognition_livenessdetection_poseagegenderemotion(
        cam_resolution,
        out_resolution,
        framecount,
        image=None,
        model_detector=0,
        model_recognizer=0):

    from libfaceid.liveness import FaceLivenessDetectorModels, FaceLiveness
    from libfaceid.pose import FacePoseEstimatorModels, FacePoseEstimator
    from libfaceid.age import FaceAgeEstimatorModels, FaceAgeEstimator
    from libfaceid.gender import FaceGenderEstimatorModels, FaceGenderEstimator
    from libfaceid.emotion import FaceEmotionEstimatorModels, FaceEmotionEstimator
    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    if image is not None:
        cap = cv2.VideoCapture(image)
    else:
        cap = cam_init(cam_resolution[0], cam_resolution[1])

    ###############################################################################
    # FACE DETECTION
    ###############################################################################
    # Initialize face detection
    face_detector = FaceDetector(
        model=model_detector,
        path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)

    ###############################################################################
    # FACE RECOGNITION
    ###############################################################################
    # Initialize face recognizer
    face_encoder = FaceEncoder(model=model_recognizer,
                               path=INPUT_DIR_MODEL_ENCODING,
                               path_training=INPUT_DIR_MODEL_TRAINING,
                               training=False)

    ###############################################################################
    # EYE BLINKING DETECTOR
    ###############################################################################
    # Initialize detector for blinking eyes
    face_liveness = FaceLiveness(model=FaceLivenessDetectorModels.EYEBLINKING,
                                 path=INPUT_DIR_MODEL_ESTIMATION)
    face_liveness.initialize()
    (eye_counter, total_eye_blinks) = (0, 0)

    ###############################################################################
    # FACE POSE/AGE/GENDER/EMOTION ESTIMATION
    ###############################################################################
    # Initialize pose/age/gender/emotion estimation
    if model_poseestimator is not None:
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_ageestimator is not None:
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
    if model_genderestimator is not None:
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_emotionestimator is not None:
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    (age, gender, emotion) = (None, None, None)

    # Initialize fps counter
    fps_frames = 0
    fps_start = time()
    fps = 0
    saveVideo = False
    out = None

    while (True):

        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == 0:
            print("Unexpected error! " + image)
            break

        ###############################################################################
        # FACE DETECTION and FACE RECOGNITION
        ###############################################################################
        # Detect and recognize each face in the images

        # Resize to QVGA so that RPI we can have acceptable fps
        if out_resolution is not None:
            #frame = imutils.resize(frame, width=out_resolution[0])
            (h, w) = image.shape[:2]
            frame = cv2.resize(
                frame,
                (out_resolution[0], int(h * out_resolution[0] / float(w))))

        ###############################################################################
        # FACE DETECTION
        ###############################################################################
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            ###############################################################################
            # FACE AGE/GENDER/EMOTION ESTIMATION
            ###############################################################################
            face_image = frame[y:y + h, h:h + w]
            if model_ageestimator is not None:
                age = face_age_estimator.estimate(frame, face_image)
            if model_genderestimator is not None:
                gender = face_gender_estimator.estimate(frame, face_image)
            if model_emotionestimator is not None:
                emotion = face_emotion_estimator.estimate(frame, face_image)

            ###############################################################################
            # FACE RECOGNITION
            ###############################################################################
            face_id, confidence = face_encoder.identify(frame, (x, y, w, h))

            ###############################################################################
            # EYE BLINKING DETECTION
            ###############################################################################
            total_eye_blinks, eye_counter = face_liveness.detect(
                frame, (x, y, w, h), total_eye_blinks, eye_counter)

            ###############################################################################
            # FACE POSE ESTIMATION
            ###############################################################################
            # Detect and draw face pose locations
            if model_poseestimator is not None:
                shape = face_pose_estimator.detect(frame, face)
                face_pose_estimator.add_overlay(frame, shape)

            # Display name, age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (20, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (20, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (20, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame,
                        "Name: {} [{:.2f}%]".format(face_id, confidence),
                        (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (255, 255, 255), 1, cv2.LINE_AA)

        ###############################################################################
        # EYE BLINKING DETECTION
        ###############################################################################
        cv2.putText(frame, "Blinks: {}".format(total_eye_blinks), (20, 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Update frame count
        fps_frames += 1
        if (framecount != 0 and fps_frames >= framecount):
            break
        if (fps_frames % 30 == 29):
            fps = fps_frames / (time() - fps_start)
            fps_frames = 0
            fps_start = time()
        cv2.putText(frame, "FPS {:.2f}".format(fps), (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Save the frame to a video
        if saveVideo:
            out.write(frame)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC
            break
        elif keyPressed == 32:  # Space
            saveVideo, out = save_video(saveVideo, out, frame.shape[:2],
                                        WINDOW_NAME + ".avi")
        elif keyPressed == 13:  # Enter
            save_photo(
                frame, WINDOW_NAME + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg")

    # Set the fps
    time_diff = time() - fps_start
    if time_diff:
        fps = fps_frames / time_diff

    if image is not None:
        cv2.waitKey(3000)

    if saveVideo == True:
        out.release()

    # Release the camera
    cam_release(cap)

    return fps
def process_facedetection(model_detector, model_poseestimator,
                          model_ageestimator, model_genderestimator,
                          model_emotionestimator, cam_resolution, cam_index):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(
            model=model_detector,
            path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)
        # Initialize face pose/age/gender estimation
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    except:
        print("Warning, check if models and trained dataset models exists!")
    (age, gender, emotion) = (None, None, None)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y + h, h:h + w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            emotion = face_emotion_estimator.estimate(frame, face_image)

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (x, y - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
def process_facedetection():

    cam_index = 0
    cam_index = int(args.webcam)
#    cam_index = 'tiv.mp4'

    cam_resolution = RESOLUTION_VGA
#    model_detector=FaceDetectorModels.HAARCASCADE
#    model_detector=FaceDetectorModels.DLIBHOG
    model_detector=FaceDetectorModels.DLIBCNN
#    model_detector=FaceDetectorModels.SSDRESNET
#    model_detector=FaceDetectorModels.MTCNN
#    model_detector=FaceDetectorModels.FACENET

    model_poseestimator=FacePoseEstimatorModels.DEFAULT
    model_ageestimator=FaceAgeEstimatorModels.DEFAULT
    model_genderestimator=FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator=FaceEmotionEstimatorModels.DEFAULT


    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector, path=INPUT_DIR_MODEL_DETECTION)#, optimize=True)
        # Initialize face pose/age/gender estimation
        face_pose_estimator = FacePoseEstimator(model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_gender_estimator = FaceGenderEstimator(model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_emotion_estimator = FaceEmotionEstimator(model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    except:
        print("Warning, check if models and trained dataset models exists!")
    (age, gender, emotion) = (None, None, None)
    df = pd.DataFrame(columns=['Gender'])


    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        t1 = time.time() # current time
        num_seconds = t1 - t0 # diff

        if num_seconds > 30:  # e.g. break after 30 seconds
            break

        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y+h, h:h+w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            emotion = face_emotion_estimator.estimate(frame, face_image)

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), 
                (x, y-45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), 
                (x, y-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), 
                (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)

            df = df.append({'Gender':gender}, ignore_index=True)


        #print(dfg)    
        # Display updated frame to web app
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

    
    df1 = df.groupby('Gender').size()
    data = df1.values.tolist()
    print(data)
    
    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
    return data