Beispiel #1
0
def process_livenessdetection():

    cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    #    model_detector=FaceDetectorModels.HAARCASCADE
    model_detector = FaceDetectorModels.DLIBHOG
    #    model_detector=FaceDetectorModels.DLIBCNN
    #    model_detector=FaceDetectorModels.SSDRESNET
    #    model_detector=FaceDetectorModels.MTCNN
    #    model_detector=FaceDetectorModels.FACENET

    #    model_recognizer=FaceEncoderModels.LBPH
    #    model_recognizer=FaceEncoderModels.OPENFACE
    model_recognizer = FaceEncoderModels.DLIBRESNET
    #    model_recognizer=FaceEncoderModels.FACENET
    #liveness=FaceLivenessModels.EYESBLINK_MOUTHOPEN
    liveness = FaceLivenessModels.COLORSPACE_YCRCBLUV

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])
    check = int(
        input(
            "Enter number 1 if you want to show live video and vice versa: "))
    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
        # Initialize face liveness detection
        face_liveness = FaceLiveness(
            model=FaceLivenessModels.EYESBLINK_MOUTHOPEN,
            path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        #face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
        return
    face_id, confidence = (None, 0)

    ##edit
    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 1000  # monitor for 10 seconds only
    is_fake_count_print = 0
    is_fake_count_replay = 0
    face_count = 0
    ##edit
    time_recognition = 5
    checkface = False

    print("Note: this will run for {} seconds only".format(runtime))

    while (time_elapsed < runtime):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        ## Detect and identify faces in the frame
        #faces = face_detector.detect(frame)
        #for (index, face) in enumerate(faces):
        #(x, y, w, h) = face
        ## Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        #if face_encoder is not None:
        #face_id, confidence = face_encoder.identify(frame, (x, y, w, h))
        ## Set text and bounding box on face
        #label_face(frame, (x, y, w, h), face_id, confidence)

        # Process 1 face only
        #break
        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):

            # Check if eyes are close and if mouth is open
            eyes_close, eyes_ratio = face_liveness.is_eyes_close(frame, face)
            mouth_open, mouth_ratio = face_liveness.is_mouth_open(frame, face)
            #print("eyes_close={}, eyes_ratio ={:.2f}".format(mouth_open, mouth_ratio))
            #print("mouth_open={}, mouth_ratio={:.2f}".format(mouth_open, mouth_ratio))

            # Detect if frame is a print attack or replay attack based on colorspace
            is_fake_print = face_liveness2.is_fake(frame, face)
            is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

            # Identify face only if it is not fake and eyes are open and mouth is close
            if is_fake_print:
                is_fake_count_print += 1
                face_id, confidence = ("Fake", None)
            elif is_fake_replay:
                is_fake_count_replay += 1
                face_id, confidence = ("Fake", None)
            elif not eyes_close and not mouth_open:
                face_id, confidence = face_encoder.identify(frame, face)
                if (face_id
                        not in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] = 1
                elif (face_id in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] += 1

            if (face_count > 100) | (face_id == "Fake"):
                face_count = 0
            elif (face_id != "Fake") & (confidence > 50):
                face_count += 1

            print("Identifying: {:.2f} %".format((face_count / 100) * 100))

            label_face(frame, face, face_id,
                       confidence)  # Set text and bounding box on face
            break  # Process 1 face only

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)

        # Update frame count
        frame_count += 1
        time_elapsed = time() - time_start

        #cv2.imshow(WINDOW_NAME, frame)
        if face_count > 99:
            checkface = True
            break
        else:
            checkface = False

        # Display updated frame to web app
        if check == 1:
            yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
                   cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')
    if checkface == True:
        print("Hello {}!!!".format(
            max(identified_unique_faces, key=identified_unique_faces.get)))
    else:
        print("Can not indentify your face, please try again!")
    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
def process_livenessdetection(model_detector, model_recognizer, model_liveness,
                              cam_index, cam_resolution):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])
    ##input
    check = int(
        input(
            "Enter number 1 if you want to show live video and vice versa: "))
    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize face liveness detection
        face_liveness = FaceLiveness(
            model=FaceLivenessModels.EYESBLINK_MOUTHOPEN,
            path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        print("Error, check if models and trained dataset models exists!")
        return

    face_id, confidence = (None, 0)

    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 60  # monitor for 10 seconds only
    is_fake_count_print = 0
    is_fake_count_replay = 0
    face_count = 0
    ##edit
    time_recognition = 5
    checkface = False

    print("Note: this will run for {} seconds only".format(runtime))
    while (time_elapsed < runtime):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):

            # Check if eyes are close and if mouth is open
            eyes_close, eyes_ratio = face_liveness.is_eyes_close(frame, face)
            mouth_open, mouth_ratio = face_liveness.is_mouth_open(frame, face)
            #print("eyes_close={}, eyes_ratio ={:.2f}".format(mouth_open, mouth_ratio))
            #print("mouth_open={}, mouth_ratio={:.2f}".format(mouth_open, mouth_ratio))

            # Detect if frame is a print attack or replay attack based on colorspace
            is_fake_print = face_liveness2.is_fake(frame, face)
            is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

            # Identify face only if it is not fake and eyes are open and mouth is close
            if is_fake_print:
                is_fake_count_print += 1
                face_id, confidence = ("Fake", None)
            elif is_fake_replay:
                is_fake_count_replay += 1
                face_id, confidence = ("Fake", None)
            elif not eyes_close and not mouth_open:
                face_id, confidence = face_encoder.identify(frame, face)
                if (face_id
                        not in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] = 1
                elif (face_id in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] += 1

            if (face_count > 100) | (face_id == "Fake"):
                face_count = 0
            elif (face_id != "Fake") & (confidence > 50):
                face_count += 1

            print("Identifying: {:.2f} %".format((face_count / 31) * 100))

            label_face(frame, face, face_id,
                       confidence)  # Set text and bounding box on face
            break  # Process 1 face only

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)

        # Update frame count
        frame_count += 1
        time_elapsed = time() - time_start

        # Display updated frame
        if check == 1:
            cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break
        if face_count > 30:
            checkface = True
            break
        else:
            checkface = False

    print("Note: this will run for {} seconds only".format(runtime))

    # Determining if face is alive can depend on the following factors and more:
    time_elapsed = int(time() - time_start)
    print("\n")
    print("Face Liveness Data:")
    print("time_elapsed            = {}".format(time_elapsed)
          )  # recognition will run for specific time (ex. 3 seconds)
    print("frame_count             = {}".format(
        frame_count))  # can be used for averaging
    print("total_eye_blinks        = {}".format(
        total_eye_blinks))  # fake face if 0
    print("total_mouth_opens       = {}".format(
        total_mouth_opens))  # fake face if 0
    print("is_fake_count_print     = {}".format(
        is_fake_count_print))  # fake face if not 0
    print("is_fake_count_replay     = {}".format(
        is_fake_count_replay))  # fake face if not 0
    print("identified_unique_faces = {}".format(
        identified_unique_faces))  # fake face if recognized more than 1 face
    print("Todo: determine if face is alive using this data.")
    print("\n")
    if checkface == True:
        print("Hello {}!!!".format(
            max(identified_unique_faces, key=identified_unique_faces.get)))
    else:
        print("Can not indentify your face, please try again!")
    #print("{}".format(face_id))
    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Beispiel #3
0
def login():
    windowname = "Result"
    INPUT_DIR_DATASET = "datasets"
    INPUT_DIR_MODEL_DETECTION = "models/detection/"
    INPUT_DIR_MODEL_ENCODING = "models/encoding/"
    INPUT_DIR_MODEL_TRAINING = "models/training/"
    INPUT_DIR_MODEL_ESTIMATION = "models/estimation/"
    INPUT_DIR_MODEL_LIVENESS = "models/liveness/"

    # Set width and height
    RESOLUTION_QVGA = (320, 240)

    #cap = cv2.VideoCapture(0)
    # cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    # detector = FaceDetectorModels.HAARCASCADE
    #    detector=FaceDetectorModels.DLIBHOG
    #    detector=FaceDetectorModels.DLIBCNN
    #    detector=FaceDetectorModels.SSDRESNET
    #    detector=FaceDetectorModels.MTCNN
    detector = FaceDetectorModels.FACENET

    # encoder = FaceEncoderModels.LBPH
    #    encoder=FaceEncoderModels.OPENFACE
    #    encoder=FaceEncoderModels.DLIBRESNET
    encoder = FaceEncoderModels.FACENET

    liveness = FaceLivenessModels.EYESBLINK_MOUTHOPEN
    # liveness=FaceLivenessModels.COLORSPACE_YCRCBLUV

    # Initialize the camera
    #camera = cam_init(cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=encoder,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize face liveness detection
        face_liveness = FaceLiveness(model=liveness,
                                     path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        print("Error, check if models and trained dataset models exists!")
        return

    face_id, confidence = (None, 0)

    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 10  # monitor for 10 seconds only
    is_fake_count_print = 0
    # print("Note: this will run for {} seconds only".format(runtime))
    while (True):
        # Capture frame from webcam
        if flask.request.method == "POST":
            # image = request.get("image")
            # read the image in PIL format
            image = request.files["image"]
            print("image :", type(image))
            npimg = np.fromfile(image, np.uint8)
            print("npimg :", type(npimg))
            file = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
            print("file :", type(file))
            # pil_image = Image.open(image)
            #img = np.array(Image.open(io.BytesIO(image)))
            # save the image on server side
            # cv2.imwrite('saved_image/new.jpg', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))

            frame = file
            if frame is None:
                print("Error, check if camera is connected!")
                break

        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            faces = face_detector.detect(frame)
            for (index, face) in enumerate(faces):

                # Check if eyes are close and if mouth is open
                eyes_close, eyes_ratio = face_liveness.is_eyes_close(
                    frame, face)
                mouth_open, mouth_ratio = face_liveness.is_mouth_open(
                    frame, face)
                print("eyes_close={}, eyes_ratio ={:.2f}".format(
                    mouth_open, mouth_ratio))
                print("mouth_open={}, mouth_ratio={:.2f}".format(
                    mouth_open, mouth_ratio))
                # print("confidence: " , confidence)

                # Detect if frame is a print attack or replay attack based on colorspace
                is_fake_print = face_liveness2.is_fake(frame, face)
                # is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

                # Identify face only if it is not fake and eyes are open and mouth is close
                if is_fake_print:
                    is_fake_count_print += 1
                    face_id, confidence = ("Fake", None)
                elif not eyes_close and not mouth_open:
                    face_id, confidence = face_encoder.identify(frame, face)
                    if face_id not in identified_unique_faces:
                        identified_unique_faces[face_id] = 1
                    else:
                        identified_unique_faces[face_id] += 1

                label_face(frame, face, face_id,
                           confidence)  # Set text and bounding box on face
                #cv2.imshow(windowname,frame)
                #cv2.waitKey(1)
                conf = confidence
                id = face_id
                print("confidence :", confidence)
                print("faceid :", face_id)
                '''if face_id in identified_unique_faces:
                    return render_template('success.html')
                else:
                    return render_template('result_F.html')'''
                '''POST_USERNAME = str(request.form['name'])
                if POST_USERNAME==id:
                    return render_template('success.html')
                else:
                    return render_template('result_F.html')'''
                #yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)

        # Update frame count
        frame_count += 1
def process_facerecognition_livenessdetection_poseagegenderemotion(
        cam_resolution,
        out_resolution,
        framecount,
        image=None,
        model_detector=0,
        model_recognizer=0):

    from libfaceid.liveness import FaceLivenessDetectorModels, FaceLiveness
    from libfaceid.pose import FacePoseEstimatorModels, FacePoseEstimator
    from libfaceid.age import FaceAgeEstimatorModels, FaceAgeEstimator
    from libfaceid.gender import FaceGenderEstimatorModels, FaceGenderEstimator
    from libfaceid.emotion import FaceEmotionEstimatorModels, FaceEmotionEstimator
    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    if image is not None:
        cap = cv2.VideoCapture(image)
    else:
        cap = cam_init(cam_resolution[0], cam_resolution[1])

    ###############################################################################
    # FACE DETECTION
    ###############################################################################
    # Initialize face detection
    face_detector = FaceDetector(
        model=model_detector,
        path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)

    ###############################################################################
    # FACE RECOGNITION
    ###############################################################################
    # Initialize face recognizer
    face_encoder = FaceEncoder(model=model_recognizer,
                               path=INPUT_DIR_MODEL_ENCODING,
                               path_training=INPUT_DIR_MODEL_TRAINING,
                               training=False)

    ###############################################################################
    # EYE BLINKING DETECTOR
    ###############################################################################
    # Initialize detector for blinking eyes
    face_liveness = FaceLiveness(model=FaceLivenessDetectorModels.EYEBLINKING,
                                 path=INPUT_DIR_MODEL_ESTIMATION)
    face_liveness.initialize()
    (eye_counter, total_eye_blinks) = (0, 0)

    ###############################################################################
    # FACE POSE/AGE/GENDER/EMOTION ESTIMATION
    ###############################################################################
    # Initialize pose/age/gender/emotion estimation
    if model_poseestimator is not None:
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_ageestimator is not None:
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
    if model_genderestimator is not None:
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_emotionestimator is not None:
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    (age, gender, emotion) = (None, None, None)

    # Initialize fps counter
    fps_frames = 0
    fps_start = time()
    fps = 0
    saveVideo = False
    out = None

    while (True):

        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == 0:
            print("Unexpected error! " + image)
            break

        ###############################################################################
        # FACE DETECTION and FACE RECOGNITION
        ###############################################################################
        # Detect and recognize each face in the images

        # Resize to QVGA so that RPI we can have acceptable fps
        if out_resolution is not None:
            #frame = imutils.resize(frame, width=out_resolution[0])
            (h, w) = image.shape[:2]
            frame = cv2.resize(
                frame,
                (out_resolution[0], int(h * out_resolution[0] / float(w))))

        ###############################################################################
        # FACE DETECTION
        ###############################################################################
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            ###############################################################################
            # FACE AGE/GENDER/EMOTION ESTIMATION
            ###############################################################################
            face_image = frame[y:y + h, h:h + w]
            if model_ageestimator is not None:
                age = face_age_estimator.estimate(frame, face_image)
            if model_genderestimator is not None:
                gender = face_gender_estimator.estimate(frame, face_image)
            if model_emotionestimator is not None:
                emotion = face_emotion_estimator.estimate(frame, face_image)

            ###############################################################################
            # FACE RECOGNITION
            ###############################################################################
            face_id, confidence = face_encoder.identify(frame, (x, y, w, h))

            ###############################################################################
            # EYE BLINKING DETECTION
            ###############################################################################
            total_eye_blinks, eye_counter = face_liveness.detect(
                frame, (x, y, w, h), total_eye_blinks, eye_counter)

            ###############################################################################
            # FACE POSE ESTIMATION
            ###############################################################################
            # Detect and draw face pose locations
            if model_poseestimator is not None:
                shape = face_pose_estimator.detect(frame, face)
                face_pose_estimator.add_overlay(frame, shape)

            # Display name, age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (20, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (20, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (20, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame,
                        "Name: {} [{:.2f}%]".format(face_id, confidence),
                        (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (255, 255, 255), 1, cv2.LINE_AA)

        ###############################################################################
        # EYE BLINKING DETECTION
        ###############################################################################
        cv2.putText(frame, "Blinks: {}".format(total_eye_blinks), (20, 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Update frame count
        fps_frames += 1
        if (framecount != 0 and fps_frames >= framecount):
            break
        if (fps_frames % 30 == 29):
            fps = fps_frames / (time() - fps_start)
            fps_frames = 0
            fps_start = time()
        cv2.putText(frame, "FPS {:.2f}".format(fps), (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Save the frame to a video
        if saveVideo:
            out.write(frame)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC
            break
        elif keyPressed == 32:  # Space
            saveVideo, out = save_video(saveVideo, out, frame.shape[:2],
                                        WINDOW_NAME + ".avi")
        elif keyPressed == 13:  # Enter
            save_photo(
                frame, WINDOW_NAME + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg")

    # Set the fps
    time_diff = time() - fps_start
    if time_diff:
        fps = fps_frames / time_diff

    if image is not None:
        cv2.waitKey(3000)

    if saveVideo == True:
        out.release()

    # Release the camera
    cam_release(cap)

    return fps
def login():
    INPUT_DIR_DATASET = "datasets"
    INPUT_DIR_MODEL_DETECTION = "models/detection/"
    INPUT_DIR_MODEL_ENCODING = "models/encoding/"
    INPUT_DIR_MODEL_TRAINING = "models/training/"
    INPUT_DIR_MODEL_ESTIMATION = "models/estimation/"
    INPUT_DIR_MODEL_LIVENESS = "models/liveness/"

    # Set width and height
    RESOLUTION_QVGA = (320, 240)

    cap = cv2.VideoCapture(0)
    # cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    # detector = FaceDetectorModels.HAARCASCADE
    #    detector=FaceDetectorModels.DLIBHOG
    #    detector=FaceDetectorModels.DLIBCNN
    #    detector=FaceDetectorModels.SSDRESNET
    #    detector=FaceDetectorModels.MTCNN
    detector = FaceDetectorModels.FACENET

    # encoder = FaceEncoderModels.LBPH
    #    encoder=FaceEncoderModels.OPENFACE
    #    encoder=FaceEncoderModels.DLIBRESNET
    encoder = FaceEncoderModels.FACENET

    liveness = FaceLivenessModels.EYESBLINK_MOUTHOPEN
    # liveness=FaceLivenessModels.COLORSPACE_YCRCBLUV

    # Initialize the camera
    #camera = cam_init(cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=encoder,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize face liveness detection
        face_liveness = FaceLiveness(model=liveness,
                                     path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        print("Error, check if models and trained dataset models exists!")
        return

    face_id, confidence = (None, 0)

    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 10  # monitor for 10 seconds only
    is_fake_count_print = 0
    # print("Note: this will run for {} seconds only".format(runtime))
    while (True):
        # Capture frame from webcam
        ret, frame = cap.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):

            # Check if eyes are close and if mouth is open
            eyes_close, eyes_ratio = face_liveness.is_eyes_close(frame, face)
            mouth_open, mouth_ratio = face_liveness.is_mouth_open(frame, face)
            print("eyes_close={}, eyes_ratio ={:.2f}".format(
                mouth_open, mouth_ratio))
            print("mouth_open={}, mouth_ratio={:.2f}".format(
                mouth_open, mouth_ratio))
            # print("confidence: " , confidence)

            # Detect if frame is a print attack or replay attack based on colorspace
            is_fake_print = face_liveness2.is_fake(frame, face)
            # is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

            # Identify face only if it is not fake and eyes are open and mouth is close
            if is_fake_print:
                is_fake_count_print += 1
                face_id, confidence = ("Fake", None)
            elif not eyes_close and not mouth_open:
                face_id, confidence = face_encoder.identify(frame, face)
                if face_id not in identified_unique_faces:
                    identified_unique_faces[face_id] = 1
                else:
                    identified_unique_faces[face_id] += 1

            label_face(frame, face, face_id,
                       confidence)  # Set text and bounding box on face
            conf = confidence
            print("confidence :", confidence)
            print("faceid :", face_id)
            if conf >= 90:
                #return 'Recognized'
                return render_template('success.html')
            else:
                #return 'Not recognized'
                return render_template('dashboard.html')

        #yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)
        # Update frame count
        frame_count += 1
        # Release the camera
    cap.release()
    cv2.destroyAllWindows()