Beispiel #1
0
def mouth_opening_detection(img):
    rects = find_faces(img, face_model)
    isOpen = 0
    try:
        for rect in rects:
            shape = detect_marks(img, landmark_model, rect)
            cnt_outer = 0
            cnt_inner = 0
            draw_marks(img, shape[48:])
            for i, (p1, p2) in enumerate(outer_points):
                if d_outer[i] + 3 < shape[p2][1] - shape[p1][1]:
                    cnt_outer += 1
            for i, (p1, p2) in enumerate(inner_points):
                if d_inner[i] + 2 < shape[p2][1] - shape[p1][1]:
                    cnt_inner += 1
            if cnt_outer > 3 and cnt_inner > 2:
                isOpen = 1
            else:
                isOpen = 0
                # print('Mouth open')
                # cv2.putText(img, 'Mouth open', (30, 30), font,
                #         1, (0, 255, 255), 2)
            # show the output image with the face detections + facial landmarks
        # cv2.imshow("Output", img)
        return isOpen
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    except:
        print('Exception Catched')
Beispiel #2
0
def initialize_mouth_model(img, ret, face_model, landmark_model):
    if not ret:
        return
    rects = find_faces(img, face_model)
    shape = None
    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        draw_marks(img, shape)
    return img, shape
Beispiel #3
0
def find_distance(img):
    while (True):
        rects = find_faces(img, face_model)
        try:
            for rect in rects:
                shape = detect_marks(img, landmark_model, rect)
                draw_marks(img, shape)
                # cv2.putText(img, 'Press r to record Mouth distances', (30, 30), font,
                #             1, (0, 255, 255), 2)
                # cv2.imshow("Output", img)
            # if cv2.waitKey(1) & 0xFF == ord('r'):
            for i in range(100):
                for i, (p1, p2) in enumerate(outer_points):
                    d_outer[i] += shape[p2][1] - shape[p1][1]
                for i, (p1, p2) in enumerate(inner_points):
                    d_inner[i] += shape[p2][1] - shape[p1][1]
            d_outer[:] = [x / 100 for x in d_outer]
            d_inner[:] = [x / 100 for x in d_inner]
            break
        except:
            print('Exception Catched')
Beispiel #4
0
face_model = get_face_detector()
landmark_model = get_landmark_model()
outer_points = [[49, 59], [50, 58], [51, 57], [52, 56], [53, 55]]
d_outer = [0] * 5
inner_points = [[61, 67], [62, 66], [63, 65]]
d_inner = [0] * 3
font = cv2.FONT_HERSHEY_SIMPLEX
cap = cv2.VideoCapture(0)

while (True):
    ret, img = cap.read()
    rects = find_faces(img, face_model)
    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        draw_marks(img, shape)
        cv2.putText(img, 'Press r to record Mouth distances', (30, 30), font,
                    1, (0, 255, 255), 2)
        cv2.imshow("Output", img)
    if cv2.waitKey(1) & 0xFF == ord('r'):
        for i in range(100):
            for i, (p1, p2) in enumerate(outer_points):
                d_outer[i] += shape[p2][1] - shape[p1][1]
            for i, (p1, p2) in enumerate(inner_points):
                d_inner[i] += shape[p2][1] - shape[p1][1]
        break
cv2.destroyAllWindows()
d_outer[:] = [x / 100 for x in d_outer]
d_inner[:] = [x / 100 for x in d_inner]
mouth_open = 0
speeking_count = 0
Beispiel #5
0
def main(debug=False):
    # Load face detect model & face landmark model
    face_model = face_detector.get_face_detector()
    landmark_model = face_landmarks.get_landmark_model()

    cv2.namedWindow('debug_window')

    # Create camera instance for capture picture,  speed up processing on windows platform by using cv2.CAP_DSHOW.
    if platform.system().lower() == 'windows':
        camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
    else:
        camera = cv2.VideoCapture(0)

    _, sample_img = camera.read()  # grab one frame, for getting resolution
    height, width, channel = sample_img.shape  # resolution & color channel (1 or None = GrayScale, 3 = BGR colors)

    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),             # Nose tip
        (0.0, -330.0, -65.0),        # Chin
        (-225.0, 170.0, -135.0),     # Left eye left corner
        (225.0, 170.0, -135.0),      # Right eye right corne
        (-150.0, -150.0, -125.0),    # Left Mouth corner
        (150.0, -150.0, -125.0)      # Right mouth corner
    ])

    # Camera internals
    focal_length = width
    center = (width/2, height/2)
    camera_matrix = np.array([
        [focal_length, 0, center[0]],
        [0, focal_length, center[1]],
        [0, 0, 1]
    ], dtype='double')

    # Lens distance coefficient: assuming no lens distortion
    dist_coefficient = np.zeros((4, 1))

    while True:
        # start time stamp this frame
        time_start = datetime.now()

        # 1. capture image (1 frame)
        ret, frame = camera.read()
        if ret:
            # 2. detect faces
            faces = face_detector.find_faces(frame, face_model)
            if faces:
                if debug:
                    face_detector.draw_faces(frame, faces)

                for face in faces:

                    # 3. detect face landmarks
                    try:
                        landmarks = face_landmarks.detect_marks(frame, landmark_model, face)
                    except cv2.error:
                        # Skip this round if failed to detect landmarks.
                        break

                    if debug:
                        face_landmarks.draw_marks(frame, landmarks, color=(0, 255, 0))

                    frame_points = np.array([
                        landmarks[30],  # Nose tip
                        landmarks[8],   # Chin
                        landmarks[36],  # Left eye left corner
                        landmarks[45],  # Right eye right corne
                        landmarks[48],  # Left Mouth corner
                        landmarks[54]   # Right mouth corner
                    ], dtype='double')

                    # 4. get rotation & translation vector
                    success, rotation_vector, translation_vector = cv2.solvePnP(
                        model_points, frame_points, camera_matrix, dist_coefficient, flags=cv2.SOLVEPNP_UPNP
                    )

                    if debug:
                        print(' ' * 13, 'solvePnP:',
                              success, rotation_vector.tolist(), translation_vector.tolist(), end='\r')
                    # TODO: calculate iris, mouth and head's roll pitch yaw
                    # TODO: create socket client, send calculated data to unity

        cv2.imshow('debug_window', frame)

        # end time stamp of this frame
        time_delta = datetime.now() - time_start
        # calculate Frame Per Second
        fps = 1e6 / time_delta.microseconds
        print(' FPS:', fps, end='\r')

        if cv2.waitKey(1) & 0xFF in (27, ord('q')):
            break

    if debug:
        cv2.destroyAllWindows()
    camera.release()