예제 #1
0
def mouth_opening_detection(img):
    rects = find_faces(img, face_model)
    isOpen = 0
    try:
        for rect in rects:
            shape = detect_marks(img, landmark_model, rect)
            cnt_outer = 0
            cnt_inner = 0
            draw_marks(img, shape[48:])
            for i, (p1, p2) in enumerate(outer_points):
                if d_outer[i] + 3 < shape[p2][1] - shape[p1][1]:
                    cnt_outer += 1
            for i, (p1, p2) in enumerate(inner_points):
                if d_inner[i] + 2 < shape[p2][1] - shape[p1][1]:
                    cnt_inner += 1
            if cnt_outer > 3 and cnt_inner > 2:
                isOpen = 1
            else:
                isOpen = 0
                # print('Mouth open')
                # cv2.putText(img, 'Mouth open', (30, 30), font,
                #         1, (0, 255, 255), 2)
            # show the output image with the face detections + facial landmarks
        # cv2.imshow("Output", img)
        return isOpen
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    except:
        print('Exception Catched')
예제 #2
0
def initialize_mouth_model(img, ret, face_model, landmark_model):
    if not ret:
        return
    rects = find_faces(img, face_model)
    shape = None
    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        draw_marks(img, shape)
    return img, shape
예제 #3
0
def run_mouth_open(img, ret, face_model, landmark_model, font, outer_points,
                   d_outer, inner_points, d_inner):

    if not ret:
        return

    rects = find_faces(img, face_model)

    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        cnt_outer = 0
        cnt_inner = 0
        for i, (p1, p2) in enumerate(outer_points):
            if d_outer[i] + 3 < shape[p2][1] - shape[p1][1]:
                cnt_outer += 1
        for i, (p1, p2) in enumerate(inner_points):
            if d_inner[i] + 2 < shape[p2][1] - shape[p1][1]:
                cnt_inner += 1
        if cnt_outer > 3 and cnt_inner > 2:
            return True
    return False
예제 #4
0
def find_distance(img):
    while (True):
        rects = find_faces(img, face_model)
        try:
            for rect in rects:
                shape = detect_marks(img, landmark_model, rect)
                draw_marks(img, shape)
                # cv2.putText(img, 'Press r to record Mouth distances', (30, 30), font,
                #             1, (0, 255, 255), 2)
                # cv2.imshow("Output", img)
            # if cv2.waitKey(1) & 0xFF == ord('r'):
            for i in range(100):
                for i, (p1, p2) in enumerate(outer_points):
                    d_outer[i] += shape[p2][1] - shape[p1][1]
                for i, (p1, p2) in enumerate(inner_points):
                    d_inner[i] += shape[p2][1] - shape[p1][1]
            d_outer[:] = [x / 100 for x in d_outer]
            d_inner[:] = [x / 100 for x in d_inner]
            break
        except:
            print('Exception Catched')
    (-150.0, -150.0, -125.0),  # Left Mouth corner
    (150.0, -150.0, -125.0)  # Right mouth corner
])

# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
    [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]],
    dtype="double")
while True:
    ret, img = cap.read()
    if ret == True:
        faces = find_faces(img, face_model)
        for face in faces:
            marks = detect_marks(img, landmark_model, face)
            # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
            image_points = np.array(
                [
                    marks[30],  # Nose tip
                    marks[8],  # Chin
                    marks[36],  # Left eye left corner
                    marks[45],  # Right eye right corne
                    marks[48],  # Left Mouth corner
                    marks[54]  # Right mouth corner
                ],
                dtype="double")
            dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
예제 #6
0
from face_landmarks import get_landmark_model, detect_marks, draw_marks

face_model = get_face_detector()
landmark_model = get_landmark_model()
outer_points = [[49, 59], [50, 58], [51, 57], [52, 56], [53, 55]]
d_outer = [0] * 5
inner_points = [[61, 67], [62, 66], [63, 65]]
d_inner = [0] * 3
font = cv2.FONT_HERSHEY_SIMPLEX
cap = cv2.VideoCapture(0)

while (True):
    ret, img = cap.read()
    rects = find_faces(img, face_model)
    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        draw_marks(img, shape)
        cv2.putText(img, 'Press r to record Mouth distances', (30, 30), font,
                    1, (0, 255, 255), 2)
        cv2.imshow("Output", img)
    if cv2.waitKey(1) & 0xFF == ord('r'):
        for i in range(100):
            for i, (p1, p2) in enumerate(outer_points):
                d_outer[i] += shape[p2][1] - shape[p1][1]
            for i, (p1, p2) in enumerate(inner_points):
                d_inner[i] += shape[p2][1] - shape[p1][1]
        break
cv2.destroyAllWindows()
d_outer[:] = [x / 100 for x in d_outer]
d_inner[:] = [x / 100 for x in d_inner]
mouth_open = 0
예제 #7
0
def main(debug=False):
    # Load face detect model & face landmark model
    face_model = face_detector.get_face_detector()
    landmark_model = face_landmarks.get_landmark_model()

    cv2.namedWindow('debug_window')

    # Create camera instance for capture picture,  speed up processing on windows platform by using cv2.CAP_DSHOW.
    if platform.system().lower() == 'windows':
        camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
    else:
        camera = cv2.VideoCapture(0)

    _, sample_img = camera.read()  # grab one frame, for getting resolution
    height, width, channel = sample_img.shape  # resolution & color channel (1 or None = GrayScale, 3 = BGR colors)

    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),             # Nose tip
        (0.0, -330.0, -65.0),        # Chin
        (-225.0, 170.0, -135.0),     # Left eye left corner
        (225.0, 170.0, -135.0),      # Right eye right corne
        (-150.0, -150.0, -125.0),    # Left Mouth corner
        (150.0, -150.0, -125.0)      # Right mouth corner
    ])

    # Camera internals
    focal_length = width
    center = (width/2, height/2)
    camera_matrix = np.array([
        [focal_length, 0, center[0]],
        [0, focal_length, center[1]],
        [0, 0, 1]
    ], dtype='double')

    # Lens distance coefficient: assuming no lens distortion
    dist_coefficient = np.zeros((4, 1))

    while True:
        # start time stamp this frame
        time_start = datetime.now()

        # 1. capture image (1 frame)
        ret, frame = camera.read()
        if ret:
            # 2. detect faces
            faces = face_detector.find_faces(frame, face_model)
            if faces:
                if debug:
                    face_detector.draw_faces(frame, faces)

                for face in faces:

                    # 3. detect face landmarks
                    try:
                        landmarks = face_landmarks.detect_marks(frame, landmark_model, face)
                    except cv2.error:
                        # Skip this round if failed to detect landmarks.
                        break

                    if debug:
                        face_landmarks.draw_marks(frame, landmarks, color=(0, 255, 0))

                    frame_points = np.array([
                        landmarks[30],  # Nose tip
                        landmarks[8],   # Chin
                        landmarks[36],  # Left eye left corner
                        landmarks[45],  # Right eye right corne
                        landmarks[48],  # Left Mouth corner
                        landmarks[54]   # Right mouth corner
                    ], dtype='double')

                    # 4. get rotation & translation vector
                    success, rotation_vector, translation_vector = cv2.solvePnP(
                        model_points, frame_points, camera_matrix, dist_coefficient, flags=cv2.SOLVEPNP_UPNP
                    )

                    if debug:
                        print(' ' * 13, 'solvePnP:',
                              success, rotation_vector.tolist(), translation_vector.tolist(), end='\r')
                    # TODO: calculate iris, mouth and head's roll pitch yaw
                    # TODO: create socket client, send calculated data to unity

        cv2.imshow('debug_window', frame)

        # end time stamp of this frame
        time_delta = datetime.now() - time_start
        # calculate Frame Per Second
        fps = 1e6 / time_delta.microseconds
        print(' FPS:', fps, end='\r')

        if cv2.waitKey(1) & 0xFF in (27, ord('q')):
            break

    if debug:
        cv2.destroyAllWindows()
    camera.release()
예제 #8
0
def head_pose(img):
    faces = find_faces(img, face_model)
    size = img.shape
    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")
    direction = 0
    for face in faces:
        marks = detect_marks(img, landmark_model, face)
        # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
        image_points = np.array(
            [
                marks[30],  # Nose tip
                marks[8],  # Chin
                marks[36],  # Left eye left corner
                marks[45],  # Right eye right corne
                marks[48],  # Left Mouth corner
                marks[54]  # Right mouth corner
            ],
            dtype="double")
        dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
        (success, rotation_vector,
         translation_vector) = cv2.solvePnP(model_points,
                                            image_points,
                                            camera_matrix,
                                            dist_coeffs,
                                            flags=cv2.SOLVEPNP_UPNP)

        # Project a 3D point (0, 0, 1000.0) onto the image plane.
        # We use this to draw a line sticking out of the nose

        (nose_end_point2D,
         jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                       rotation_vector, translation_vector,
                                       camera_matrix, dist_coeffs)

        for p in image_points:
            cv2.circle(img, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

        p1 = (int(image_points[0][0]), int(image_points[0][1]))
        p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
        x1, x2 = head_pose_points(img, rotation_vector, translation_vector,
                                  camera_matrix)

        cv2.line(img, p1, p2, (0, 255, 255), 2)
        cv2.line(img, tuple(x1), tuple(x2), (255, 255, 0), 2)
        # for (x, y) in marks:
        #     cv2.circle(img, (x, y), 4, (255, 255, 0), -1)
        # cv2.putText(img, str(p1), p1, font, 1, (0, 255, 255), 1)
        try:
            m = (p2[1] - p1[1]) / (p2[0] - p1[0])
            ang1 = int(math.degrees(math.atan(m)))
        except:
            ang1 = 90

        try:
            m = (x2[1] - x1[1]) / (x2[0] - x1[0])
            ang2 = int(math.degrees(math.atan(-1 / m)))
        except:
            ang2 = 90

            # print('div by zero error')
        if ang1 >= 48:
            direction = 1
            # print('Head down')
            # cv2.putText(img, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)
        elif ang1 <= -48:
            direction = 2
            # print('Head up')
            # cv2.putText(img, 'Head up', (30, 30), font, 2, (255, 255, 128), 3)

        if ang2 >= 48:
            direction = 3
            # print('Head right')
            # cv2.putText(img, 'Head right', (90, 30), font, 2, (255, 255, 128), 3)
        elif ang2 <= -48:
            direction = 4
            # print('Head left')
            # cv2.putText(img, 'Head left', (90, 30), font, 2, (255, 255, 128), 3)

    return direction
예제 #9
0
    (0.0, -330.0, -65.0),  # Chin
    (-225.0, 170.0, -135.0),  # Left eye left corner
    (225.0, 170.0, -135.0),  # Right eye right corne
    (-150.0, -150.0, -125.0),  # Left Mouth corner
    (150.0, -150.0, -125.0)  # Right mouth corner
])

# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
    [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]],
    dtype="double")
faces = find_faces(imagereal, face_model)
for face in faces:
    marks = detect_marks(imagereal, landmark_model, face)
    for i in range(100):
        for i, (p1, p2) in enumerate(outer_points):
            d_outer[i] += marks[p2][1] - marks[p1][1]
        for i, (p1, p2) in enumerate(inner_points):
            d_inner[i] += marks[p2][1] - marks[p1][1]
d_outer[:] = [x / 100 for x in d_outer]
d_inner[:] = [x / 100 for x in d_inner]
tc = 0  #facespoofcount
mouth_open = 0
speeking_count = 0
speech_checking = 0
BUFFER_SIZE = 2048
CHANNELS = 1
FORMAT = pyaudio.paFloat32
METHOD = "default"
예제 #10
0
def get_head_position(File_name):
    face_model = get_face_detector()
    landmark_model = get_landmark_model()
    cap = cv2.VideoCapture(File_name)
    ret, img = cap.read()
    size = img.shape
    font = cv2.FONT_HERSHEY_SIMPLEX
    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])

    # Camera internals
    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    head_directions = [0] * 4  # 아래, 위, 오른쪽, 왼쪽
    while True:
        ret, img = cap.read()
        if ret == True:
            faces = find_faces(img, face_model)
            for face in faces:
                marks = detect_marks(img, landmark_model, face)
                # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
                image_points = np.array(
                    [
                        marks[30],  # Nose tip
                        marks[8],  # Chin
                        marks[36],  # Left eye left corner
                        marks[45],  # Right eye right corne
                        marks[48],  # Left Mouth corner
                        marks[54]  # Right mouth corner
                    ],
                    dtype="double")
                dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
                (success, rotation_vector,
                 translation_vector) = cv2.solvePnP(model_points,
                                                    image_points,
                                                    camera_matrix,
                                                    dist_coeffs,
                                                    flags=cv2.SOLVEPNP_UPNP)

                # Project a 3D point (0, 0, 1000.0) onto the image plane.
                # We use this to draw a line sticking out of the nose

                (nose_end_point2D,
                 jacobian) = cv2.projectPoints(np.array([
                     (0.0, 0.0, 1000.0)
                 ]), rotation_vector, translation_vector, camera_matrix,
                                               dist_coeffs)

                for p in image_points:
                    cv2.circle(img, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

                p1 = (int(image_points[0][0]), int(image_points[0][1]))
                p2 = (int(nose_end_point2D[0][0][0]),
                      int(nose_end_point2D[0][0][1]))
                x1, x2 = head_pose_points(img, rotation_vector,
                                          translation_vector, camera_matrix)

                cv2.line(img, p1, p2, (0, 255, 255), 2)
                cv2.line(img, tuple(x1), tuple(x2), (255, 255, 0), 2)
                # for (x, y) in marks:
                #     cv2.circle(img, (x, y), 4, (255, 255, 0), -1)
                # cv2.putText(img, str(p1), p1, font, 1, (0, 255, 255), 1)
                try:
                    m = (p2[1] - p1[1]) / (p2[0] - p1[0])
                    ang1 = int(math.degrees(math.atan(m)))
                except:
                    ang1 = 90

                try:
                    m = (x2[1] - x1[1]) / (x2[0] - x1[0])
                    ang2 = int(math.degrees(math.atan(-1 / m)))
                except:
                    ang2 = 90

                    # print('div by zero error')
                if ang1 >= 48:
                    head_directions[0] += 1
                    # print('Head down')
                    # cv2.putText(img, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)
                elif ang1 <= -48:
                    head_directions[1] += 1
                    # print('Head up')
                    # cv2.putText(img, 'Head up', (30, 30), font, 2, (255, 255, 128), 3)

                if ang2 >= 48:
                    head_directions[2] += 1
                    # print('Head right')
                    # cv2.putText(img, 'Head right', (90, 30), font, 2, (255, 255, 128), 3)
                elif ang2 <= -48:
                    head_directions[3] += 1
                    # print('Head left')
                    # cv2.putText(img, 'Head left', (90, 30), font, 2, (255, 255, 128), 3)

            #     cv2.putText(img, str(ang1), tuple(p1), font, 2, (128, 255, 255), 3)
            #     cv2.putText(img, str(ang2), tuple(x1), font, 2, (255, 255, 128), 3)
            # cv2.imshow('img', img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cv2.destroyAllWindows()
    cap.release()
    return head_directions
def get_frame(imgData):
    nparr = np.frombuffer(base64.b64decode(imgData), np.uint8)
    image = cv2.imdecode(nparr, cv2.COLOR_BGR2GRAY)
    ret = True

    size = image.shape
    font = cv2.FONT_HERSHEY_SIMPLEX
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])

    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (320, 320))
    img = img.astype(np.float32)
    img = np.expand_dims(img, 0)
    img = img / 255
    class_names = [c.strip() for c in open("models/classes.TXT").readlines()]
    boxes, scores, classes, nums = yolo(img)
    count = 0
    mob_status = ""
    person_status = ""
    for i in range(nums[0]):
        if int(classes[0][i] == 0):
            count += 1
        if int(classes[0][i] == 67):
            print('Mobile Phone detected')
            mob_status = 1
        else:
            print('Not Mobile Phone detected')
            mob_status = 0
        print(mob_status)

    if count == 0:
        print('No person detected')
        person_status = 1
    elif count > 1:
        print('More than one person detected')
        person_status = 2
    else:
        print('Normal')
        person_status = 0

    image = draw_outputs(image, (boxes, scores, classes, nums), class_names)

    user_move1 = ""
    user_move2 = ""
    if ret == True:
        faces = find_faces(image, face_model)
        for face in faces:
            marks = detect_marks(image, landmark_model, face)
            image_points = np.array(
                [
                    marks[30],  # Nose tip
                    marks[8],  # Chin
                    marks[36],  # Left eye left corner
                    marks[45],  # Right eye right corne
                    marks[48],  # Left Mouth corner
                    marks[54]  # Right mouth corner
                ],
                dtype="double")
            dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
                                                camera_matrix,
                                                dist_coeffs,
                                                flags=cv2.SOLVEPNP_UPNP)

            (nose_end_point2D,
             jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                           rotation_vector, translation_vector,
                                           camera_matrix, dist_coeffs)

            for p in image_points:
                cv2.circle(image, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

            p1 = (int(image_points[0][0]), int(image_points[0][1]))
            p2 = (int(nose_end_point2D[0][0][0]),
                  int(nose_end_point2D[0][0][1]))
            x1, x2 = head_pose_points(image, rotation_vector,
                                      translation_vector, camera_matrix)

            try:
                m = (p2[1] - p1[1]) / (p2[0] - p1[0])
                ang1 = int(math.degrees(math.atan(m)))
            except:
                ang1 = 90

            try:
                m = (x2[1] - x1[1]) / (x2[0] - x1[0])
                ang2 = int(math.degrees(math.atan(-1 / m)))
            except:
                ang2 = 90

            if ang1 >= 48:
                user_move1 = 2
                print('Head down')
            elif ang1 <= -48:
                user_move1 = 1
                print('Head up')
            else:
                user_move1 = 0

            if ang2 >= 48:
                print('Head right')
                user_move2 = 4
            elif ang2 <= -48:
                print('Head left')
                user_move2 = 3
            else:
                user_move2 = 0

    ret, jpeg = cv2.imencode('.jpg', image)
    jpg_as_text = base64.b64encode(jpeg)

    gaze.refresh(image)

    frame = gaze.annotated_frame()
    eye_movements = ""

    if gaze.is_blinking():
        eye_movements = 1
        print("Blinking")
    elif gaze.is_right():
        eye_movements = 4
        print("Looking right")
    elif gaze.is_left():
        eye_movements = 3
        print("Looking left")
    elif gaze.is_center():
        eye_movements = 2
        print("Looking center")
    else:
        eye_movements = 0
        print("Not found!")
    print(eye_movements)

    proctorDict = dict()
    proctorDict['jpg_as_text'] = jpg_as_text
    proctorDict['mob_status'] = mob_status
    proctorDict['person_status'] = person_status
    proctorDict['user_move1'] = user_move1
    proctorDict['user_move2'] = user_move2
    proctorDict['eye_movements'] = eye_movements

    return proctorDict

def nothing(x):
    pass


cv2.createTrackbar('threshold', 'image', 75, 255, nothing)

j = 0
jc = 0
while True:
    ret, img2 = cap.read()
    if ret == True:
        faces = find_faces(img2, face_model)
        for face in faces:
            marks = detect_marks(img2, landmark_model, face)
            image_points = np.array(
                [
                    marks[30],  # Nose tip
                    marks[8],  # Chin
                    marks[36],  # Left eye left corner
                    marks[45],  # Right eye right corne
                    marks[48],  # Left Mouth corner
                    marks[54]  # Right mouth corner
                ],
                dtype="double")
            dist_coeffs = np.zeros((4, 1))
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
                                                camera_matrix,