Esempio n. 1
0
def process_facerecognition():

    cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    model_detector = FaceDetectorModels.HAARCASCADE
    #    model_detector=FaceDetectorModels.DLIBHOG
    #    model_detector=FaceDetectorModels.DLIBCNN
    #    model_detector=FaceDetectorModels.SSDRESNET
    #    model_detector=FaceDetectorModels.MTCNN
    model_recognizer = FaceEncoderModels.LBPH
    #    model_recognizer=FaceEncoderModels.OPENFACE
    #    model_recognizer=FaceEncoderModels.DLIBRESNET

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
    except:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            if face_encoder is not None:
                face_id, confidence = face_encoder.identify(
                    frame, (x, y, w, h))
            # Set text and bounding box on face
            label_face(frame, (x, y, w, h), face_id, confidence)

        # Display updated frame to web app
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
               cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
def process_facerecognition(model_detector, model_recognizer, cam_index,
                            cam_resolution):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
    except:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            if face_encoder is not None:
                face_id, confidence = face_encoder.identify(
                    frame, (x, y, w, h))
            # Set text and bounding box on face
            label_face(frame, (x, y, w, h), face_id, confidence)

            # Process 1 face only
            break

        # Display updated frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 3
0
def train_recognition(model_detector, model_encoder, model_classifier,
                      face_embeddings_path, verify):

    ensure_directory(config.INPUT_DIR_DATASET)

    print("")
    names = get_dataset_names(config.INPUT_DIR_DATASET)
    if names is not None:
        print("Names " + str(names))
        for name in names:
            for (_d, _n,
                 files) in os.walk(config.INPUT_DIR_DATASET + "/" + name):
                print(name + ": " + str(files))
    print("")

    ensure_directory(config.INPUT_DIR_MODEL_TRAINING)
    face_detector = FaceDetector(model=model_detector,
                                 path=config.INPUT_DIR_MODEL_DETECTION)
    face_encoder = FaceEncoder(model=model_encoder,
                               path=config.INPUT_DIR_MODEL_ENCODING,
                               path_training=config.INPUT_DIR_MODEL_TRAINING,
                               training=True)
    face_encoder.train(face_detector,
                       path_dataset=face_embeddings_path,
                       verify=verify,
                       classifier=model_classifier)
Esempio n. 4
0
def process_facerecognition(model_detector, model_recognizer, image):

    # Initialize the camera
    image = cv2.VideoCapture(image)

    # Initialize face detection
    face_detector = FaceDetector(model=model_detector,
                                 path=INPUT_DIR_MODEL_DETECTION)

    # Initialize face recognizer
    try:
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
    except Exception as ex:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
        print(ex)
    face_id, confidence = (None, 0)

    # Capture frame-by-frame
    ret, frame = image.read()
    if ret == 0:
        print("Unexpected error! " + image)
        return
    # frame = cv2.imread(image)

    # Detect faces in the image
    faces = face_detector.detect(frame)
    for (index, face) in enumerate(faces):
        (x, y, w, h) = face
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        if face_encoder is not None:
            face_id, confidence = face_encoder.identify(frame, (x, y, w, h))
            print(face_id)
        # Set text and bounding box on face
        label_face(frame, (x, y, w, h), face_id, confidence)

    # Display the resulting frame
    cv2.imshow(WINDOW_NAME, frame)
    cv2.waitKey(1)

    # Release the image
    image.release()
    cv2.destroyAllWindows()
def train_recognition(model_detector, model_encoder, model_classifier, verify):

    ensure_directory(INPUT_DIR_DATASET)
    ensure_directory(INPUT_DIR_MODEL_TRAINING)
    face_detector = FaceDetector(model=model_detector,
                                 path=INPUT_DIR_MODEL_DETECTION)
    face_encoder = FaceEncoder(model=model_encoder,
                               path=INPUT_DIR_MODEL_ENCODING,
                               path_training=INPUT_DIR_MODEL_TRAINING,
                               training=True)
    face_encoder.train(face_detector,
                       path_dataset=INPUT_DIR_DATASET,
                       verify=verify,
                       classifier=model_classifier)
Esempio n. 6
0
def video_to_images(model_detector, dir, name, one_image_only=False):

    ensure_directory(dir + "/" + name + "/")

    try:
        video = cv2.VideoCapture(WINDOW_NAME + ".avi")
        if video is None:
            return
    except:
        return

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
    except:
        print("Warning, check if models and trained dataset models exists!")

    i = 1
    while (True):

        ret, frame = video.read()
        if frame is None:
            break

        faces = face_detector.detect(frame)
        if len(faces) == 1:
            cv2.imwrite("{}/{}/{}.jpg".format(dir, name, i), frame)
            i += 1
            if one_image_only:
                break

        #cv2.imshow(WINDOW_NAME, frame)
        #cv2.waitKey(1)

    video.release()
    cv2.destroyAllWindows()
Esempio n. 7
0
def init_model(int_detect=1, int_encode=3):
    # Only for debugging while developing
    try:
        # Initialize face detection
        global face_recognizer, face_detector, face_encoder
        face_detector = FaceDetector(model=FaceDetectorModels(int_detect),
                                     path=join(ROOT_DIR,
                                               INPUT_DIR_MODEL_DETECTION))
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=FaceEncoderModels(int_encode),
                                   path=join(ROOT_DIR,
                                             INPUT_DIR_MODEL_ENCODING),
                                   path_training=join(
                                       ROOT_DIR, INPUT_DIR_MODEL_TRAINING),
                                   training=False)
        face_recognizer = FaceRecognizer(face_embeddings_path=join(
            ROOT_DIR, EMBEDDINGS_DIR + '/3_face_encodings_83.pickle'))
    except Exception as ex:
        face_encoder = None
        print(ex)
Esempio n. 8
0
def process_facedetection():

    cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    model_detector = FaceDetectorModels.HAARCASCADE
    #    model_detector=FaceDetectorModels.DLIBHOG
    #    model_detector=FaceDetectorModels.DLIBCNN
    #    model_detector=FaceDetectorModels.SSDRESNET
    #    model_detector=FaceDetectorModels.MTCNN
    #    model_detector=FaceDetectorModels.FACENET

    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(
            model=model_detector,
            path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)
        # Initialize face pose/age/gender estimation
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    except:
        print("Warning, check if models and trained dataset models exists!")
    (age, gender, emotion) = (None, None, None)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y + h, h:h + w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            emotion = face_emotion_estimator.estimate(frame, face_image)

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (x, y - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)

        # Display updated frame to web app
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
               cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 9
0
def process_facerecognition(model_detector, model_recognizer,
                            model_speech_synthesizer, model_speech_recognizer,
                            cam_index, cam_resolution):

    # Initialize speech-to-text (speech recognizer) for voice-activated capability (wake-word/hot-word/trigger-word detection)
    # Then wait for trigger word before starting face recognition
    if True:
        speech_recognizer = SpeechRecognizer(model=model_speech_recognizer,
                                             path=None)
        print("\nWaiting for a trigger word: {}".format(TRIGGER_WORDS))
        speech_recognizer.start(TRIGGER_WORDS, speech_recognizer_callback)
        global trigger_word_detected
        try:
            while (trigger_word_detected == False):
                time.sleep(1)
        except:
            pass
        speech_recognizer.stop()
        speech_recognizer = None

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize text-to-speech (speech synthesizer) for voice-enabled capability
        speech_synthesizer = SpeechSynthesizer(model=model_speech_synthesizer,
                                               path=None,
                                               path_output=None,
                                               training=False)

    except:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)

    # Start face recognition
    frame_count = 0
    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            if face_encoder is not None:
                face_id, confidence = face_encoder.identify(
                    frame, (x, y, w, h))
            # Set text and bounding box on face
            label_face(frame, (x, y, w, h), face_id, confidence)

            # Play audio file corresponding to the recognized name
            if (frame_count % 30 == 0):
                if len(faces) == 1 and (face_id is not None) and (face_id !=
                                                                  "Unknown"):
                    speech_synthesizer.playaudio(INPUT_DIR_AUDIOSET,
                                                 face_id,
                                                 block=False)

            # Process 1 face only
            break

        # Display updated frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break

        frame_count += 1

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
def process_facerecognition():

    cam_index = 0
    #    cam_index = "http://192.168.0.105:8000/video"
    #    cam_index = int(args.webcam)

    cam_resolution = RESOLUTION_VGA
    #    model_detector=FaceDetectorModels.HAARCASCADE
    #    model_detector=FaceDetectorModels.DLIBHOG
    #    model_detector=FaceDetectorModels.DLIBCNN
    #    model_detector=FaceDetectorModels.SSDRESNET
    model_detector = FaceDetectorModels.MTCNN
    #    model_detector=FaceDetectorModels.FACENET

    #    model_recognizer=FaceEncoderModels.LBPH
    #    model_recognizer=FaceEncoderModels.OPENFACE
    #    model_recognizer=FaceEncoderModels.DLIBRESNET
    model_recognizer = FaceEncoderModels.FACENET

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
    except:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)
    df = pd.DataFrame(columns=['Employee'])

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            # Identify face based on trained dataset (note: should run facial_recognition_training.py)
            if face_encoder is not None:
                face_id, confidence = face_encoder.identify(
                    frame, (x, y, w, h))
            # Set text and bounding box on face
            label_face(frame, (x, y, w, h), face_id, confidence)

            # Process 1 face only
            #            break

            df = df.append({'Employee': face_id}, ignore_index=True)
            df1 = df.groupby('Employee').size()
            g = df1.values.tolist()
            #print(g)

        # Display updated frame to web app
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
               cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

    # Release the camera
    return g
    camera.release()
    cv2.destroyAllWindows()
def process_livenessdetection(model_detector, model_recognizer, model_liveness,
                              cam_index, cam_resolution):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])
    ##input
    check = int(
        input(
            "Enter number 1 if you want to show live video and vice versa: "))
    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize face liveness detection
        face_liveness = FaceLiveness(
            model=FaceLivenessModels.EYESBLINK_MOUTHOPEN,
            path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        print("Error, check if models and trained dataset models exists!")
        return

    face_id, confidence = (None, 0)

    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 60  # monitor for 10 seconds only
    is_fake_count_print = 0
    is_fake_count_replay = 0
    face_count = 0
    ##edit
    time_recognition = 5
    checkface = False

    print("Note: this will run for {} seconds only".format(runtime))
    while (time_elapsed < runtime):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):

            # Check if eyes are close and if mouth is open
            eyes_close, eyes_ratio = face_liveness.is_eyes_close(frame, face)
            mouth_open, mouth_ratio = face_liveness.is_mouth_open(frame, face)
            #print("eyes_close={}, eyes_ratio ={:.2f}".format(mouth_open, mouth_ratio))
            #print("mouth_open={}, mouth_ratio={:.2f}".format(mouth_open, mouth_ratio))

            # Detect if frame is a print attack or replay attack based on colorspace
            is_fake_print = face_liveness2.is_fake(frame, face)
            is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

            # Identify face only if it is not fake and eyes are open and mouth is close
            if is_fake_print:
                is_fake_count_print += 1
                face_id, confidence = ("Fake", None)
            elif is_fake_replay:
                is_fake_count_replay += 1
                face_id, confidence = ("Fake", None)
            elif not eyes_close and not mouth_open:
                face_id, confidence = face_encoder.identify(frame, face)
                if (face_id
                        not in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] = 1
                elif (face_id in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] += 1

            if (face_count > 100) | (face_id == "Fake"):
                face_count = 0
            elif (face_id != "Fake") & (confidence > 50):
                face_count += 1

            print("Identifying: {:.2f} %".format((face_count / 31) * 100))

            label_face(frame, face, face_id,
                       confidence)  # Set text and bounding box on face
            break  # Process 1 face only

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)

        # Update frame count
        frame_count += 1
        time_elapsed = time() - time_start

        # Display updated frame
        if check == 1:
            cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break
        if face_count > 30:
            checkface = True
            break
        else:
            checkface = False

    print("Note: this will run for {} seconds only".format(runtime))

    # Determining if face is alive can depend on the following factors and more:
    time_elapsed = int(time() - time_start)
    print("\n")
    print("Face Liveness Data:")
    print("time_elapsed            = {}".format(time_elapsed)
          )  # recognition will run for specific time (ex. 3 seconds)
    print("frame_count             = {}".format(
        frame_count))  # can be used for averaging
    print("total_eye_blinks        = {}".format(
        total_eye_blinks))  # fake face if 0
    print("total_mouth_opens       = {}".format(
        total_mouth_opens))  # fake face if 0
    print("is_fake_count_print     = {}".format(
        is_fake_count_print))  # fake face if not 0
    print("is_fake_count_replay     = {}".format(
        is_fake_count_replay))  # fake face if not 0
    print("identified_unique_faces = {}".format(
        identified_unique_faces))  # fake face if recognized more than 1 face
    print("Todo: determine if face is alive using this data.")
    print("\n")
    if checkface == True:
        print("Hello {}!!!".format(
            max(identified_unique_faces, key=identified_unique_faces.get)))
    else:
        print("Can not indentify your face, please try again!")
    #print("{}".format(face_id))
    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
def process_facedetection(model_detector, model_poseestimator,
                          model_ageestimator, model_genderestimator,
                          model_emotionestimator, cam_resolution, cam_index):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(
            model=model_detector,
            path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)
        # Initialize face pose/age/gender estimation
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    except:
        print("Warning, check if models and trained dataset models exists!")
    (age, gender, emotion) = (None, None, None)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y + h, h:h + w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            emotion = face_emotion_estimator.estimate(frame, face_image)

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (x, y - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 13
0
def login():
    windowname = "Result"
    INPUT_DIR_DATASET = "datasets"
    INPUT_DIR_MODEL_DETECTION = "models/detection/"
    INPUT_DIR_MODEL_ENCODING = "models/encoding/"
    INPUT_DIR_MODEL_TRAINING = "models/training/"
    INPUT_DIR_MODEL_ESTIMATION = "models/estimation/"
    INPUT_DIR_MODEL_LIVENESS = "models/liveness/"

    # Set width and height
    RESOLUTION_QVGA = (320, 240)

    #cap = cv2.VideoCapture(0)
    # cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    # detector = FaceDetectorModels.HAARCASCADE
    #    detector=FaceDetectorModels.DLIBHOG
    #    detector=FaceDetectorModels.DLIBCNN
    #    detector=FaceDetectorModels.SSDRESNET
    #    detector=FaceDetectorModels.MTCNN
    detector = FaceDetectorModels.FACENET

    # encoder = FaceEncoderModels.LBPH
    #    encoder=FaceEncoderModels.OPENFACE
    #    encoder=FaceEncoderModels.DLIBRESNET
    encoder = FaceEncoderModels.FACENET

    liveness = FaceLivenessModels.EYESBLINK_MOUTHOPEN
    # liveness=FaceLivenessModels.COLORSPACE_YCRCBLUV

    # Initialize the camera
    #camera = cam_init(cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=encoder,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize face liveness detection
        face_liveness = FaceLiveness(model=liveness,
                                     path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        print("Error, check if models and trained dataset models exists!")
        return

    face_id, confidence = (None, 0)

    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 10  # monitor for 10 seconds only
    is_fake_count_print = 0
    # print("Note: this will run for {} seconds only".format(runtime))
    while (True):
        # Capture frame from webcam
        if flask.request.method == "POST":
            # image = request.get("image")
            # read the image in PIL format
            image = request.files["image"]
            print("image :", type(image))
            npimg = np.fromfile(image, np.uint8)
            print("npimg :", type(npimg))
            file = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
            print("file :", type(file))
            # pil_image = Image.open(image)
            #img = np.array(Image.open(io.BytesIO(image)))
            # save the image on server side
            # cv2.imwrite('saved_image/new.jpg', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))

            frame = file
            if frame is None:
                print("Error, check if camera is connected!")
                break

        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            faces = face_detector.detect(frame)
            for (index, face) in enumerate(faces):

                # Check if eyes are close and if mouth is open
                eyes_close, eyes_ratio = face_liveness.is_eyes_close(
                    frame, face)
                mouth_open, mouth_ratio = face_liveness.is_mouth_open(
                    frame, face)
                print("eyes_close={}, eyes_ratio ={:.2f}".format(
                    mouth_open, mouth_ratio))
                print("mouth_open={}, mouth_ratio={:.2f}".format(
                    mouth_open, mouth_ratio))
                # print("confidence: " , confidence)

                # Detect if frame is a print attack or replay attack based on colorspace
                is_fake_print = face_liveness2.is_fake(frame, face)
                # is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

                # Identify face only if it is not fake and eyes are open and mouth is close
                if is_fake_print:
                    is_fake_count_print += 1
                    face_id, confidence = ("Fake", None)
                elif not eyes_close and not mouth_open:
                    face_id, confidence = face_encoder.identify(frame, face)
                    if face_id not in identified_unique_faces:
                        identified_unique_faces[face_id] = 1
                    else:
                        identified_unique_faces[face_id] += 1

                label_face(frame, face, face_id,
                           confidence)  # Set text and bounding box on face
                #cv2.imshow(windowname,frame)
                #cv2.waitKey(1)
                conf = confidence
                id = face_id
                print("confidence :", confidence)
                print("faceid :", face_id)
                '''if face_id in identified_unique_faces:
                    return render_template('success.html')
                else:
                    return render_template('result_F.html')'''
                '''POST_USERNAME = str(request.form['name'])
                if POST_USERNAME==id:
                    return render_template('success.html')
                else:
                    return render_template('result_F.html')'''
                #yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)

        # Update frame count
        frame_count += 1
Esempio n. 14
0
def process_facerecognition(model_detector, model_recognizer, redis_cam, cam_resolution, embeddings_path, image_folder):
    # Initialize the camera
    # camera = cam_init(redis_cam, cam_resolution[0], cam_resolution[1])
    
    if redis_cam == None:
        print("cannot connect to redis streaming...")
        return
    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector, path=INPUT_DIR_MODEL_DETECTION)
        # face_detector_fn = FaceDetector(model=FaceDetectorModels(5), path=INPUT_DIR_MODEL_DETECTION)
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer, path=INPUT_DIR_MODEL_ENCODING, path_training=INPUT_DIR_MODEL_TRAINING, training=False)
        face_recognizer = FaceRecognizer(face_embeddings_path = embeddings_path)
    except Exception as ex:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)

    if image_folder != '':
        output_dir = './predict_results'
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        lst_image = glob.glob(image_folder+'/*')
        for img_path in lst_image:
            try:
                if img_path:
                    image = cv2.VideoCapture(img_path)
                    ret, frame = image.read()        
                if frame is None:
                    print("Error, check if camera is connected!")
                    continue

                frame, _, _ = face_recognizer.recognize(frame, face_detector, face_encoder,num_times_recognize=1, check_type = 'face_distance')

                cv2.imwrite(join(output_dir, basename(img_path)), frame)
            except Exception as ex:
                print(ex)    

    else:
        # input as webcam
        while (True):
            # Capture frame from webcam
            img_path = None
            # img_path = "/home/500/anh_lbt/IMAGE_TASK/FaceRecognition_2019/faceidsys/datasets/test/me2.jpg"
            img_path = "/home/500/anh_lbt/IMAGE_TASK/test/test.jpg"
            if img_path:
                image = cv2.VideoCapture(img_path)
                ret, frame = image.read()
            else:  
                key = '{0}|uint8_{1}_{2}_3'.format((redis_cam.store_name), \
                    redis_cam.y1 - redis_cam.y0, redis_cam.x1 - redis_cam.x0)
                frame = redis_cam.decoding_image(key)
            
            if frame is None:
                print("Error, check if camera is connected!")
                break

            # TEST === Detect and identify faces in the frame
            # faces = face_detector.detect(frame)
            # label_face(frame, faces[0], None)
            # plt.imshow(frame)
            # plt.show()
            # embs = face_encoder.encode(frame, faces, -1)
            frame, _, _ = face_recognizer.recognize(frame, face_detector, face_encoder, check_type = 'face_distance')

            # Display updated frame
            frame = imutils.resize(frame, 1800)
            cv2.imshow(WINDOW_NAME, frame)

            # Check for user actions
            if cv2.waitKey(1) & 0xFF == 27: # ESC
                break

        # Release the camera
        # camera.release()
        cv2.destroyAllWindows()
    while (True):
        # Capture frame from webcam
        # ret, frame = camera.read()
        key = '{0}|uint8_{1}_{2}_3'.format((redis_cam.store_name), \
            redis_cam.y1 - redis_cam.y0, redis_cam.x1 - redis_cam.x0)
        frame = redis_cam.decoding_image(key)
        
        if frame is None:
            print("Error, check if camera is connected!")
            break
        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            if face_encoder is not None:
                face_id, confidence = face_encoder.identify(frame, (x, y, w, h))
            # Set text and bounding box on face
            label_face(frame, (x, y, w, h), face_id, confidence)
            # Process 1 face only
            break
        # Display updated frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27: # ESC
            break

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
def login():
    INPUT_DIR_DATASET = "datasets"
    INPUT_DIR_MODEL_DETECTION = "models/detection/"
    INPUT_DIR_MODEL_ENCODING = "models/encoding/"
    INPUT_DIR_MODEL_TRAINING = "models/training/"
    INPUT_DIR_MODEL_ESTIMATION = "models/estimation/"
    INPUT_DIR_MODEL_LIVENESS = "models/liveness/"

    # Set width and height
    RESOLUTION_QVGA = (320, 240)

    cap = cv2.VideoCapture(0)
    # cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    # detector = FaceDetectorModels.HAARCASCADE
    #    detector=FaceDetectorModels.DLIBHOG
    #    detector=FaceDetectorModels.DLIBCNN
    #    detector=FaceDetectorModels.SSDRESNET
    #    detector=FaceDetectorModels.MTCNN
    detector = FaceDetectorModels.FACENET

    # encoder = FaceEncoderModels.LBPH
    #    encoder=FaceEncoderModels.OPENFACE
    #    encoder=FaceEncoderModels.DLIBRESNET
    encoder = FaceEncoderModels.FACENET

    liveness = FaceLivenessModels.EYESBLINK_MOUTHOPEN
    # liveness=FaceLivenessModels.COLORSPACE_YCRCBLUV

    # Initialize the camera
    #camera = cam_init(cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=encoder,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize face liveness detection
        face_liveness = FaceLiveness(model=liveness,
                                     path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        print("Error, check if models and trained dataset models exists!")
        return

    face_id, confidence = (None, 0)

    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 10  # monitor for 10 seconds only
    is_fake_count_print = 0
    # print("Note: this will run for {} seconds only".format(runtime))
    while (True):
        # Capture frame from webcam
        ret, frame = cap.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):

            # Check if eyes are close and if mouth is open
            eyes_close, eyes_ratio = face_liveness.is_eyes_close(frame, face)
            mouth_open, mouth_ratio = face_liveness.is_mouth_open(frame, face)
            print("eyes_close={}, eyes_ratio ={:.2f}".format(
                mouth_open, mouth_ratio))
            print("mouth_open={}, mouth_ratio={:.2f}".format(
                mouth_open, mouth_ratio))
            # print("confidence: " , confidence)

            # Detect if frame is a print attack or replay attack based on colorspace
            is_fake_print = face_liveness2.is_fake(frame, face)
            # is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

            # Identify face only if it is not fake and eyes are open and mouth is close
            if is_fake_print:
                is_fake_count_print += 1
                face_id, confidence = ("Fake", None)
            elif not eyes_close and not mouth_open:
                face_id, confidence = face_encoder.identify(frame, face)
                if face_id not in identified_unique_faces:
                    identified_unique_faces[face_id] = 1
                else:
                    identified_unique_faces[face_id] += 1

            label_face(frame, face, face_id,
                       confidence)  # Set text and bounding box on face
            conf = confidence
            print("confidence :", confidence)
            print("faceid :", face_id)
            if conf >= 90:
                #return 'Recognized'
                return render_template('success.html')
            else:
                #return 'Not recognized'
                return render_template('dashboard.html')

        #yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)
        # Update frame count
        frame_count += 1
        # Release the camera
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 16
0
def process_facedetection():

    cam_index = 0
    cam_index = int(args.webcam)
#    cam_index = 'tiv.mp4'

    cam_resolution = RESOLUTION_VGA
#    model_detector=FaceDetectorModels.HAARCASCADE
#    model_detector=FaceDetectorModels.DLIBHOG
    model_detector=FaceDetectorModels.DLIBCNN
#    model_detector=FaceDetectorModels.SSDRESNET
#    model_detector=FaceDetectorModels.MTCNN
#    model_detector=FaceDetectorModels.FACENET

    model_poseestimator=FacePoseEstimatorModels.DEFAULT
    model_ageestimator=FaceAgeEstimatorModels.DEFAULT
    model_genderestimator=FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator=FaceEmotionEstimatorModels.DEFAULT


    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector, path=INPUT_DIR_MODEL_DETECTION)#, optimize=True)
        # Initialize face pose/age/gender estimation
        face_pose_estimator = FacePoseEstimator(model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_gender_estimator = FaceGenderEstimator(model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
        face_emotion_estimator = FaceEmotionEstimator(model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    except:
        print("Warning, check if models and trained dataset models exists!")
    (age, gender, emotion) = (None, None, None)
    df = pd.DataFrame(columns=['Gender'])


    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        t1 = time.time() # current time
        num_seconds = t1 - t0 # diff

        if num_seconds > 30:  # e.g. break after 30 seconds
            break

        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y+h, h:h+w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            emotion = face_emotion_estimator.estimate(frame, face_image)

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), 
                (x, y-45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), 
                (x, y-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), 
                (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)

            df = df.append({'Gender':gender}, ignore_index=True)


        #print(dfg)    
        # Display updated frame to web app
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')

    
    df1 = df.groupby('Gender').size()
    data = df1.values.tolist()
    print(data)
    
    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
    return data
Esempio n. 17
0
def process_faceenrollment(model_detector, cam_index, cam_resolution):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
    except:
        print("Warning, check if models and trained dataset models exists!")

    print("")
    print("Press SPACEBAR to record video or ENTER to capture picture!")
    print("Make sure that your face is inside the circular region!")
    print("")

    saveVideo = False
    out = None
    color_recording = (255, 255, 255)
    is_windows = (os.name == 'nt')

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            #print("{} {} {} {}".format(x,y,w,h))

            if saveVideo and len(faces) == 1:
                out.write(frame)

            # Set text and bounding box on face
            label_face(frame, (x, y, w, h))

            # Process 1 face only
            break

        mask = np.full((frame.shape[0], frame.shape[1]), 0,
                       dtype=np.uint8)  # mask is only
        cv2.circle(mask,
                   (int(cam_resolution[0] / 2), int(cam_resolution[1] / 2)),
                   110, (255, 255, 255), -1, cv2.LINE_AA)
        fg = cv2.bitwise_or(frame, frame, mask=mask)
        cv2.circle(fg,
                   (int(cam_resolution[0] / 2), int(cam_resolution[1] / 2)),
                   110, color_recording, 15, cv2.LINE_AA)
        for i in range(120):
            cv2.line(fg, (s1[i, 0], s1[i, 1]), (s2[i, 0], s2[i, 1]), (0, 0, 0),
                     2, cv2.LINE_AA)

        # Display updated frame
        if is_windows:
            fg = imutils.resize(fg, height=480)
        cv2.imshow(WINDOW_NAME, fg)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC to exit
            break
        elif keyPressed == 32:  # Space to save video
            saveVideo, out = save_video(saveVideo, out, frame.shape[:2],
                                        WINDOW_NAME + ".avi")
            if out is not None:
                color_recording = (0, 255, 0)
            else:
                color_recording = (0, 0, 0)
                break
        elif keyPressed == 13:  # Enter to capture picture
            cv2.imwrite(
                WINDOW_NAME + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg",
                frame)

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 18
0
def process_facerecognition(model_detector, model_recognizer,
                            model_speech_synthesizer, cam_index,
                            cam_resolution):

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)

        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)

        # Initialize text-to-speech synthesizer
        speech_synthesizer = SpeechSynthesizer(model=model_speech_synthesizer,
                                               path=None,
                                               path_output=None,
                                               training=False)
    except:
        #face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)

    frame_count = 0
    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face
            # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
            if face_encoder is not None:
                face_id, confidence = face_encoder.identify(
                    frame, (x, y, w, h))
            # Set text and bounding box on face
            label_face(frame, (x, y, w, h), face_id, confidence)

            # Play audio file corresponding to the recognized name
            if (frame_count % 30 == 0):
                if len(faces) == 1 and (face_id is not None) and (face_id !=
                                                                  "Unknown"):
                    speech_synthesizer.playaudio(INPUT_DIR_AUDIOSET,
                                                 face_id,
                                                 block=False)

            # Process 1 face only
            break

        # Display updated frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        if cv2.waitKey(1) & 0xFF == 27:  # ESC
            break

        frame_count += 1

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 19
0
def process_facerecognition(cam_resolution,
                            out_resolution,
                            framecount,
                            image=None,
                            model_detector=0,
                            model_recognizer=0):

    # Initialize the camera
    if image is not None:
        cap = cv2.VideoCapture(image)
    else:
        cap = cam_init(cam_resolution[0], cam_resolution[1])

    ###############################################################################
    # FACE DETECTION
    ###############################################################################
    # Initialize face detection
    face_detector = FaceDetector(model=model_detector,
                                 path=INPUT_DIR_MODEL_DETECTION,
                                 optimize=True)

    ###############################################################################
    # FACE RECOGNITION
    ###############################################################################
    # Initialize face recognizer
    face_encoder = FaceEncoder(model=model_recognizer,
                               path=INPUT_DIR_MODEL_ENCODING,
                               path_training=INPUT_DIR_MODEL_TRAINING,
                               training=False)
    face_id, confidence = ("Unknown", 0)

    # Initialize fps counter
    fps_frames = 0
    fps_start = time()
    fps = 0
    saveVideo = False
    out = None

    # Optimization
    skip_frames = True
    skip_frames_count = 0
    skip_frames_set = 2

    while (True):

        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == 0:
            print("Unexpected error! " + image)
            break

        ###############################################################################
        # FACE DETECTION and FACE RECOGNITION
        ###############################################################################
        # Detect and recognize each face in the images

        # Resize to QVGA so that RPI we can have acceptable fps
        if out_resolution is not None:
            #frame = imutils.resize(frame, width=out_resolution[0])
            (h, w) = image.shape[:2]
            frame = cv2.resize(
                frame,
                (out_resolution[0], int(h * out_resolution[0] / float(w))))

        ###############################################################################
        # FACE DETECTION
        ###############################################################################
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            ###############################################################################
            # FACE RECOGNITION
            ###############################################################################
            face_id, confidence = face_encoder.identify(frame, (x, y, w, h))

            # Set bounding box and text
            label_face(frame, (x, y, w, h), face_id, confidence)

        # Update frame count
        fps_frames += 1
        if (framecount != 0 and fps_frames >= framecount):
            break
        if (fps_frames % 30 == 29):
            fps = fps_frames / (time() - fps_start)
            fps_frames = 0
            fps_start = time()
        cv2.putText(frame, "FPS {:.2f}".format(fps), (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Save the frame to a video
        if saveVideo:
            out.write(frame)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC
            break
        elif keyPressed == 32:  # Space
            saveVideo, out = save_video(saveVideo, out, frame.shape[:2],
                                        "facial_recognition_rpi3.avi")

    # Set the fps
    time_diff = time() - fps_start
    if time_diff:
        fps = fps_frames / time_diff

    if image is not None:
        cv2.waitKey(3000)

    if saveVideo == True:
        out.release()

    # Release the camera
    cam_release(cap)

    return fps
Esempio n. 20
0
def process_facerecognition_livenessdetection_poseagegenderemotion(
        cam_resolution,
        out_resolution,
        framecount,
        image=None,
        model_detector=0,
        model_recognizer=0):

    from libfaceid.liveness import FaceLivenessDetectorModels, FaceLiveness
    from libfaceid.pose import FacePoseEstimatorModels, FacePoseEstimator
    from libfaceid.age import FaceAgeEstimatorModels, FaceAgeEstimator
    from libfaceid.gender import FaceGenderEstimatorModels, FaceGenderEstimator
    from libfaceid.emotion import FaceEmotionEstimatorModels, FaceEmotionEstimator
    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    if image is not None:
        cap = cv2.VideoCapture(image)
    else:
        cap = cam_init(cam_resolution[0], cam_resolution[1])

    ###############################################################################
    # FACE DETECTION
    ###############################################################################
    # Initialize face detection
    face_detector = FaceDetector(
        model=model_detector,
        path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)

    ###############################################################################
    # FACE RECOGNITION
    ###############################################################################
    # Initialize face recognizer
    face_encoder = FaceEncoder(model=model_recognizer,
                               path=INPUT_DIR_MODEL_ENCODING,
                               path_training=INPUT_DIR_MODEL_TRAINING,
                               training=False)

    ###############################################################################
    # EYE BLINKING DETECTOR
    ###############################################################################
    # Initialize detector for blinking eyes
    face_liveness = FaceLiveness(model=FaceLivenessDetectorModels.EYEBLINKING,
                                 path=INPUT_DIR_MODEL_ESTIMATION)
    face_liveness.initialize()
    (eye_counter, total_eye_blinks) = (0, 0)

    ###############################################################################
    # FACE POSE/AGE/GENDER/EMOTION ESTIMATION
    ###############################################################################
    # Initialize pose/age/gender/emotion estimation
    if model_poseestimator is not None:
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_ageestimator is not None:
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
    if model_genderestimator is not None:
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_emotionestimator is not None:
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    (age, gender, emotion) = (None, None, None)

    # Initialize fps counter
    fps_frames = 0
    fps_start = time()
    fps = 0
    saveVideo = False
    out = None

    while (True):

        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == 0:
            print("Unexpected error! " + image)
            break

        ###############################################################################
        # FACE DETECTION and FACE RECOGNITION
        ###############################################################################
        # Detect and recognize each face in the images

        # Resize to QVGA so that RPI we can have acceptable fps
        if out_resolution is not None:
            #frame = imutils.resize(frame, width=out_resolution[0])
            (h, w) = image.shape[:2]
            frame = cv2.resize(
                frame,
                (out_resolution[0], int(h * out_resolution[0] / float(w))))

        ###############################################################################
        # FACE DETECTION
        ###############################################################################
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            ###############################################################################
            # FACE AGE/GENDER/EMOTION ESTIMATION
            ###############################################################################
            face_image = frame[y:y + h, h:h + w]
            if model_ageestimator is not None:
                age = face_age_estimator.estimate(frame, face_image)
            if model_genderestimator is not None:
                gender = face_gender_estimator.estimate(frame, face_image)
            if model_emotionestimator is not None:
                emotion = face_emotion_estimator.estimate(frame, face_image)

            ###############################################################################
            # FACE RECOGNITION
            ###############################################################################
            face_id, confidence = face_encoder.identify(frame, (x, y, w, h))

            ###############################################################################
            # EYE BLINKING DETECTION
            ###############################################################################
            total_eye_blinks, eye_counter = face_liveness.detect(
                frame, (x, y, w, h), total_eye_blinks, eye_counter)

            ###############################################################################
            # FACE POSE ESTIMATION
            ###############################################################################
            # Detect and draw face pose locations
            if model_poseestimator is not None:
                shape = face_pose_estimator.detect(frame, face)
                face_pose_estimator.add_overlay(frame, shape)

            # Display name, age, gender, emotion
            cv2.putText(frame, "Age: {}".format(age), (20, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Gender: {}".format(gender), (20, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, "Emotion: {}".format(emotion), (20, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame,
                        "Name: {} [{:.2f}%]".format(face_id, confidence),
                        (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (255, 255, 255), 1, cv2.LINE_AA)

        ###############################################################################
        # EYE BLINKING DETECTION
        ###############################################################################
        cv2.putText(frame, "Blinks: {}".format(total_eye_blinks), (20, 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Update frame count
        fps_frames += 1
        if (framecount != 0 and fps_frames >= framecount):
            break
        if (fps_frames % 30 == 29):
            fps = fps_frames / (time() - fps_start)
            fps_frames = 0
            fps_start = time()
        cv2.putText(frame, "FPS {:.2f}".format(fps), (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)

        # Save the frame to a video
        if saveVideo:
            out.write(frame)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC
            break
        elif keyPressed == 32:  # Space
            saveVideo, out = save_video(saveVideo, out, frame.shape[:2],
                                        WINDOW_NAME + ".avi")
        elif keyPressed == 13:  # Enter
            save_photo(
                frame, WINDOW_NAME + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg")

    # Set the fps
    time_diff = time() - fps_start
    if time_diff:
        fps = fps_frames / time_diff

    if image is not None:
        cv2.waitKey(3000)

    if saveVideo == True:
        out.release()

    # Release the camera
    cam_release(cap)

    return fps
Esempio n. 21
0
def process_livenessdetection():

    cam_index = 0
    cam_resolution = RESOLUTION_QVGA
    #    model_detector=FaceDetectorModels.HAARCASCADE
    model_detector = FaceDetectorModels.DLIBHOG
    #    model_detector=FaceDetectorModels.DLIBCNN
    #    model_detector=FaceDetectorModels.SSDRESNET
    #    model_detector=FaceDetectorModels.MTCNN
    #    model_detector=FaceDetectorModels.FACENET

    #    model_recognizer=FaceEncoderModels.LBPH
    #    model_recognizer=FaceEncoderModels.OPENFACE
    model_recognizer = FaceEncoderModels.DLIBRESNET
    #    model_recognizer=FaceEncoderModels.FACENET
    #liveness=FaceLivenessModels.EYESBLINK_MOUTHOPEN
    liveness = FaceLivenessModels.COLORSPACE_YCRCBLUV

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])
    check = int(
        input(
            "Enter number 1 if you want to show live video and vice versa: "))
    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
        # Initialize face liveness detection
        face_liveness = FaceLiveness(
            model=FaceLivenessModels.EYESBLINK_MOUTHOPEN,
            path=INPUT_DIR_MODEL_LIVENESS)
        face_liveness2 = FaceLiveness(
            model=FaceLivenessModels.COLORSPACE_YCRCBLUV,
            path=INPUT_DIR_MODEL_LIVENESS)

    except:
        #face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
        return
    face_id, confidence = (None, 0)

    ##edit
    eyes_close, eyes_ratio = (False, 0)
    total_eye_blinks, eye_counter, eye_continuous_close = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate
    mouth_open, mouth_ratio = (False, 0)
    total_mouth_opens, mouth_counter, mouth_continuous_open = (
        0, 0, 1)  # eye_continuous_close should depend on frame rate

    time_start = time()
    time_elapsed = 0
    frame_count = 0
    identified_unique_faces = {}  # dictionary
    runtime = 1000  # monitor for 10 seconds only
    is_fake_count_print = 0
    is_fake_count_replay = 0
    face_count = 0
    ##edit
    time_recognition = 5
    checkface = False

    print("Note: this will run for {} seconds only".format(runtime))

    while (time_elapsed < runtime):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        ## Detect and identify faces in the frame
        #faces = face_detector.detect(frame)
        #for (index, face) in enumerate(faces):
        #(x, y, w, h) = face
        ## Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        #if face_encoder is not None:
        #face_id, confidence = face_encoder.identify(frame, (x, y, w, h))
        ## Set text and bounding box on face
        #label_face(frame, (x, y, w, h), face_id, confidence)

        # Process 1 face only
        #break
        # Detect and identify faces in the frame
        # Indentify face based on trained dataset (note: should run facial_recognition_training.py)
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):

            # Check if eyes are close and if mouth is open
            eyes_close, eyes_ratio = face_liveness.is_eyes_close(frame, face)
            mouth_open, mouth_ratio = face_liveness.is_mouth_open(frame, face)
            #print("eyes_close={}, eyes_ratio ={:.2f}".format(mouth_open, mouth_ratio))
            #print("mouth_open={}, mouth_ratio={:.2f}".format(mouth_open, mouth_ratio))

            # Detect if frame is a print attack or replay attack based on colorspace
            is_fake_print = face_liveness2.is_fake(frame, face)
            is_fake_replay = face_liveness2.is_fake(frame, face, flag=1)

            # Identify face only if it is not fake and eyes are open and mouth is close
            if is_fake_print:
                is_fake_count_print += 1
                face_id, confidence = ("Fake", None)
            elif is_fake_replay:
                is_fake_count_replay += 1
                face_id, confidence = ("Fake", None)
            elif not eyes_close and not mouth_open:
                face_id, confidence = face_encoder.identify(frame, face)
                if (face_id
                        not in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] = 1
                elif (face_id in identified_unique_faces) & (confidence > 50):
                    identified_unique_faces[face_id] += 1

            if (face_count > 100) | (face_id == "Fake"):
                face_count = 0
            elif (face_id != "Fake") & (confidence > 50):
                face_count += 1

            print("Identifying: {:.2f} %".format((face_count / 100) * 100))

            label_face(frame, face, face_id,
                       confidence)  # Set text and bounding box on face
            break  # Process 1 face only

        # Monitor eye blinking and mouth opening for liveness detection
        total_eye_blinks, eye_counter = monitor_eye_blinking(
            eyes_close, eyes_ratio, total_eye_blinks, eye_counter,
            eye_continuous_close)
        total_mouth_opens, mouth_counter = monitor_mouth_opening(
            mouth_open, mouth_ratio, total_mouth_opens, mouth_counter,
            mouth_continuous_open)

        # Update frame count
        frame_count += 1
        time_elapsed = time() - time_start

        #cv2.imshow(WINDOW_NAME, frame)
        if face_count > 99:
            checkface = True
            break
        else:
            checkface = False

        # Display updated frame to web app
        if check == 1:
            yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
                   cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')
    if checkface == True:
        print("Hello {}!!!".format(
            max(identified_unique_faces, key=identified_unique_faces.get)))
    else:
        print("Can not indentify your face, please try again!")
    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 22
0
def process_facedetection(cam_resolution,
                          out_resolution,
                          framecount,
                          model_detector=0):

    from libfaceid.pose import FacePoseEstimatorModels, FacePoseEstimator
    from libfaceid.age import FaceAgeEstimatorModels, FaceAgeEstimator
    from libfaceid.gender import FaceGenderEstimatorModels, FaceGenderEstimator
    from libfaceid.emotion import FaceEmotionEstimatorModels, FaceEmotionEstimator
    model_poseestimator = FacePoseEstimatorModels.DEFAULT
    model_ageestimator = FaceAgeEstimatorModels.DEFAULT
    model_genderestimator = FaceGenderEstimatorModels.DEFAULT
    model_emotionestimator = FaceEmotionEstimatorModels.DEFAULT

    # Initialize the camera
    cap = cam_init(cam_resolution[0], cam_resolution[1])

    ###############################################################################
    # FACE DETECTION
    ###############################################################################
    # Initialize face detection
    face_detector = FaceDetector(
        model=model_detector,
        path=INPUT_DIR_MODEL_DETECTION)  #, optimize=True)

    ###############################################################################
    # FACE POSE/AGE/GENDER/EMOTION ESTIMATION
    ###############################################################################
    # Initialize face pose/age/gender/emotion estimation
    if model_poseestimator is not None:
        face_pose_estimator = FacePoseEstimator(
            model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_ageestimator is not None:
        face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                              path=INPUT_DIR_MODEL_ESTIMATION)
    if model_genderestimator is not None:
        face_gender_estimator = FaceGenderEstimator(
            model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    if model_emotionestimator is not None:
        face_emotion_estimator = FaceEmotionEstimator(
            model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    (age, gender, emotion) = (None, None, None)

    # Initialize fps counter
    fps_frames = 0
    fps_start = time()

    while (True):

        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == 0:
            break

        # Resize to QVGA so that RPI we can have acceptable fps
        if out_resolution is not None:
            frame = cv2.resize(frame, out_resolution)

        ###############################################################################
        # FACE DETECTION
        ###############################################################################
        # Detect faces and set bounding boxes
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            ###############################################################################
            # FACE AGE/GENDER/EMOTION ESTIMATION
            ###############################################################################
            face_image = frame[y:y + h, h:h + w]
            if model_ageestimator is not None:
                age = face_age_estimator.estimate(frame, face_image)
            if model_genderestimator is not None:
                gender = face_gender_estimator.estimate(frame, face_image)
            if model_emotionestimator is not None:
                emotion = face_emotion_estimator.estimate(frame, face_image)

            ###############################################################################
            # FACE POSE ESTIMATION
            ###############################################################################
            # Detect and draw face pose locations
            if model_poseestimator is not None:
                shape = face_pose_estimator.detect(frame, face)
                face_pose_estimator.add_overlay(frame, shape)
            else:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255),
                              1)

            # Display age, gender, emotion
            if age is not None and gender is not None and emotion is not None:
                cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frame, "Emotion: {}".format(emotion), (x, y - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Update frame count
        fps_frames += 1
        if (framecount != 0 and fps_frames >= framecount):
            break

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:
            break

    # Set the fps
    fps = fps_frames / (time() - fps_start)

    # Release the camera
    cam_release(cap)

    return fps
def process_facedetection(model_detector, model_poseestimator,
                          model_ageestimator, model_genderestimator,
                          cam_resolution, cam_index):  #model_emotionestimator

    # Initialize the camera
    camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])

    # try:
    # Initialize face detection
    face_detector = FaceDetector(model=model_detector,
                                 path=INPUT_DIR_MODEL_DETECTION,
                                 minfacesize=120)
    # Initialize face pose/age/gender estimation
    face_pose_estimator = FacePoseEstimator(model=model_poseestimator,
                                            path=INPUT_DIR_MODEL_ESTIMATION)
    face_age_estimator = FaceAgeEstimator(model=model_ageestimator,
                                          path=INPUT_DIR_MODEL_ESTIMATION)
    face_gender_estimator = FaceGenderEstimator(
        model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    # face_emotion_estimator = FaceEmotionEstimator(model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
    # except:
    #     print("Warning, check if models and trained dataset models exists!")
    # (age, gender, emotion) = (None, None, None)

    while (True):

        # Capture frame from webcam
        ret, frame = camera.read()
        if frame is None:
            print("Error, check if camera is connected!")
            break

        # Detect and identify faces in the frame
        faces = face_detector.detect(frame)
        for (index, face) in enumerate(faces):
            (x, y, w, h) = face

            # Detect age, gender, emotion
            face_image = frame[y:y + h, h:h + w]
            age = face_age_estimator.estimate(frame, face_image)
            gender = face_gender_estimator.estimate(frame, face_image)
            # emotion = face_emotion_estimator.estimate(frame, face_image)
            # lable_face(frame, face, )

            # Detect and draw face pose locations
            shape = face_pose_estimator.detect(frame, face)
            # face_pose_estimator.add_overlay(frame, shape)

            # Display age, gender, emotion
            if True:  # Added condition to easily disable text
                cv2.putText(frame, "Age: {}".format(age), (x, y - 45),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frame, "Gender: {}".format(gender), (x, y - 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255),
                              1)
                # cv2.putText(frame, "Emotion: {}".format(emotion),
                #     (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)

        # Display the resulting frame
        cv2.imshow(WINDOW_NAME, frame)

        # Check for user actions
        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC
            break
        elif keyPressed == 13:  # Enter
            cv2.imwrite(
                WINDOW_NAME + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg",
                frame)

    # Release the camera
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 24
0
def cumucount():
    input_video = "tiv.mp4"
    #	input_video = "http://*****:*****@192.168.0.105:80/1"

    # By default I use an "SSD with Mobilenet" model here. See the detection model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
    detection_graph, category_index = backbone.set_model(
        'ssd_mobilenet_v1_coco_2017_11_17')

    targeted_objects = "person"
    fps = 24  # change it with your input video fps
    width = 640  # change it with your input video width
    height = 480  # change it with your input vide height
    is_color_recognition_enabled = 0  # set it to 1 for enabling the color prediction for the detected objects
    roi = 200  # roi line position
    deviation = 5  # the constant that represents the object counting area
    total_passed_vehicle = 0
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    output_movie = cv2.VideoWriter('the_output.avi', fourcc, fps,
                                   (width, height))
    # input video
    cap = cv2.VideoCapture(input_video)
    cam_resolution = RESOLUTION_VGA
    model_detector = FaceDetectorModels.HAARCASCADE
    model_recognizer = FaceEncoderModels.LBPH
    try:
        # Initialize face detection
        face_detector = FaceDetector(model=model_detector,
                                     path=INPUT_DIR_MODEL_DETECTION)
        # Initialize face recognizer
        face_encoder = FaceEncoder(model=model_recognizer,
                                   path=INPUT_DIR_MODEL_ENCODING,
                                   path_training=INPUT_DIR_MODEL_TRAINING,
                                   training=False)
    except:
        face_encoder = None
        print("Warning, check if models and trained dataset models exists!")
    face_id, confidence = (None, 0)
    total_passed_vehicle = 0
    counting_mode = "..."
    width_heigh_taken = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            # for all the frames that are extracted from input video
            while (cap.isOpened()):
                ret, frame = cap.read()

                if not ret:
                    print("end of the video file...")
                    break
                input_frame = frame
                # Detect and identify faces in the frame
                faces = face_detector.detect(input_frame)
                for (index, face) in enumerate(faces):
                    (x, y, w, h) = face
                    # Identify face based on trained dataset (note: should run facial_recognition_training.py)
                    if face_encoder is not None:
                        face_id, confidence = face_encoder.identify(
                            input_frame, (x, y, w, h))
                    # Set text and bounding box on face
                    label_face(input_frame, (x, y, w, h), face_id, confidence)
                    # Process 1 face only
                    #break
                    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(input_frame, axis=0)
                # Actual detection
                (boxes, scores, classes,
                 num) = sess.run([
                     detection_boxes, detection_scores, detection_classes,
                     num_detections
                 ],
                                 feed_dict={image_tensor: image_np_expanded})
                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX

                # Visualization of the results of a detection.
                counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_y_axis(
                    cap.get(1),
                    input_frame,
                    2,
                    is_color_recognition_enabled,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    targeted_objects="person",
                    y_reference=roi,
                    deviation=deviation,
                    use_normalized_coordinates=True,
                    line_thickness=4)
                # when the vehicle passed over line and counted, make the color of ROI line green
                if counter == 1:
                    cv2.line(input_frame, (0, roi), (width, roi), (0, 0xFF, 0),
                             5)
                else:
                    cv2.line(input_frame, (0, roi), (width, roi), (0, 0, 0xFF),
                             5)

                total_passed_vehicle = total_passed_vehicle + counter
                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    input_frame,
                    'Detected: ' + str(total_passed_vehicle),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0xFF),
                    2,
                    cv2.FONT_HERSHEY_SIMPLEX,
                )
                cv2.putText(
                    input_frame,
                    'ROI Line',
                    (545, roi - 10),
                    font,
                    0.6,
                    (0, 0, 0xFF),
                    2,
                    cv2.LINE_AA,
                )
                output_movie.write(input_frame)
                #print ("writing frame")
                #cv2.imshow('object counting',input_frame)
                #if cv2.waitKey(1) & 0xFF == ord('q'):
                #break
                # Display updated frame to web app
                yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
                       cv2.imencode('.jpg', frame)[1].tobytes() + b'\r\n\r\n')
            cap.release()
            cv2.destroyAllWindows()