示例#1
0
def mouth_opening_detection(img):
    rects = find_faces(img, face_model)
    isOpen = 0
    try:
        for rect in rects:
            shape = detect_marks(img, landmark_model, rect)
            cnt_outer = 0
            cnt_inner = 0
            draw_marks(img, shape[48:])
            for i, (p1, p2) in enumerate(outer_points):
                if d_outer[i] + 3 < shape[p2][1] - shape[p1][1]:
                    cnt_outer += 1
            for i, (p1, p2) in enumerate(inner_points):
                if d_inner[i] + 2 < shape[p2][1] - shape[p1][1]:
                    cnt_inner += 1
            if cnt_outer > 3 and cnt_inner > 2:
                isOpen = 1
            else:
                isOpen = 0
                # print('Mouth open')
                # cv2.putText(img, 'Mouth open', (30, 30), font,
                #         1, (0, 255, 255), 2)
            # show the output image with the face detections + facial landmarks
        # cv2.imshow("Output", img)
        return isOpen
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    except:
        print('Exception Catched')
示例#2
0
def analyze_picture(model_gender, path, window_size, window_name='static'):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    image = cv2.imread(path, 1)
    for normalized_face, (x, y, w, h) in find_faces(image):
        gender_prediction = model_gender.predict(normalized_face)
        if (gender_prediction[0] == 0):
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
        else:
            cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
        text1 = "Male"
        text2 = "Female"
        if (gender_prediction[0] == 0):
            cv2.putText(image, text2, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)
        else:
            cv2.putText(image, text1, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)
    cv2.imshow(window_name, image)
    key = cv2.waitKey(0)
    if key == ESC:
        cv2.destroyWindow(window_name)
示例#3
0
def initialize_mouth_model(img, ret, face_model, landmark_model):
    if not ret:
        return
    rects = find_faces(img, face_model)
    shape = None
    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        draw_marks(img, shape)
    return img, shape
示例#4
0
def predict():
    if request.method == "POST":
        flag = 0
        r = request.data.decode('utf-8')
        _, encoded = r.split(",", 1) 

        imgdata = base64.b64decode(encoded)
        im_arr = np.frombuffer(imgdata, dtype=np.uint8)  # im_arr is one-dim Numpy array
        img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)

        print(img.shape)

        # detector = dlib.get_frontal_face_detector()
        face_model = get_face_detector()
        predictor = dlib.shape_predictor('shape_68.dat')

        left = [36, 37, 38, 39, 40, 41]
        right = [42, 43, 44, 45, 46, 47]

        kernel = np.ones((9, 9), np.uint8)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        rects = find_faces(img, face_model)
        # rects = detector(gray, 1)

        for rect in rects:
            # print(type(rect[0]))
            rec = dlib.rectangle(int(rect[0]), int(rect[1]), int(rect[2]), int(rect[3]))
            shape = predictor(gray, rec)
            shape = shape_to_np(shape)
            mask = np.zeros(img.shape[:2], dtype=np.uint8)
            mask, end_points_left = eye_on_mask(mask, left, shape)
            mask, end_points_right = eye_on_mask(mask, right, shape)
            mask = cv2.dilate(mask, kernel, 5)
            eyes = cv2.bitwise_and(img, img, mask=mask)
            mask = (eyes == [0, 0, 0]).all(axis=2)
            eyes[mask] = [255, 255, 255]
            mid = (shape[42][0] + shape[39][0]) // 2
            eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
            # threshold = cv2.getTrackbarPos('threshold', 'image')
            _, thresh = cv2.threshold(eyes_gray, 90, 255, cv2.THRESH_BINARY)
            thresh = process_thresh(thresh)
            eyeball_pos_left = contouring(thresh[:, 0:mid], mid, end_points_left)
            eyeball_pos_right = contouring(thresh[:, mid:], mid, end_points_right, True)

            flag = print_eye_pos(eyeball_pos_left, eyeball_pos_right)
            op = dict()
            op['flag'] = json.dumps(flag)

            return jsonify(op)
    def web(self):
        model_gender = cv2.face.FisherFaceRecognizer_create()
        model_gender.read('models/gender_classifier_model.xml')
        window_size = (1280, 720)
        window_name = 'live'
        update_time = 1
        cv2.namedWindow(window_name, WINDOW_NORMAL)
        if window_size:
            width, height = window_size
            cv2.resizeWindow(window_name, width, height)

        video_feed = cv2.VideoCapture(0)
        video_feed.set(3, width)
        video_feed.set(4, height)
        read_value, webcam_image = video_feed.read()

        delay = 0
        init = True
        while read_value:
            read_value, webcam_image = video_feed.read()
            for normalized_face, (x, y, w, h) in find_faces(webcam_image):
                if init or delay == 0:
                    init = False
                    gender_prediction = model_gender.predict(normalized_face)
                if (gender_prediction[0] == 0):
                    cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)
                else:
                    cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                                  (255, 0, 0), 2)
                text1 = "Male"
                text2 = "Female"
                if (gender_prediction[0] == 0):
                    cv2.putText(webcam_image, text2, (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                                cv2.LINE_AA)
                else:
                    cv2.putText(webcam_image, text1, (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                                cv2.LINE_AA)
            delay += 1
            delay %= 20
            cv2.imshow(window_name, webcam_image)
            key = cv2.waitKey(update_time)
            if key == ESC:
                break
        cv2.destroyWindow(window_name)
示例#6
0
    def open(self):
        root = Tk()
        root.withdraw()
        root.filename = filedialog.askopenfilename(
            initialdir=
            "C:\\Users\\Salman\\Documents\\gender_rec_opencv\\test_sample",
            title="Select file",
            filetypes=(("Image File", "*.jpg"), ("all files", "*.*")))
        name = root.filename

        fisher_face_gender = cv2.face.FisherFaceRecognizer_create()
        fisher_face_gender.read('models/gender_classifier_model.xml')
        run_loop = True
        if os.path.isfile(name):
            print('true')
            image = cv2.imread(name, 1)

            image1 = cv2.imread(name, 1)
            image1 = cv2.resize(image1, (361, 301), cv2.COLOR_BGR2RGB)
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            height2, width2, channel2 = image1.shape
            step2 = channel2 * width2
            qImg2 = QImage(image1.data, width2, height2, step2,
                           QImage.Format_RGB888)
            self.label.setPixmap(QPixmap.fromImage(qImg2))

            for normalized_face, (x, y, w, h) in find_faces(image):
                gender_prediction = fisher_face_gender.predict(normalized_face)
                if (gender_prediction[0] == 0):
                    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255),
                                  2)
                else:
                    cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)
                if (gender_prediction[0] == 0):
                    self.textBrowser.setText("Female")
                else:
                    self.textBrowser.setText("Male")

            key = cv2.waitKey(0)
            if key == ESC:
                cv2.destroyWindow(window_name)

        else:
            print('false')
示例#7
0
def find_distance(img):
    while (True):
        rects = find_faces(img, face_model)
        try:
            for rect in rects:
                shape = detect_marks(img, landmark_model, rect)
                draw_marks(img, shape)
                # cv2.putText(img, 'Press r to record Mouth distances', (30, 30), font,
                #             1, (0, 255, 255), 2)
                # cv2.imshow("Output", img)
            # if cv2.waitKey(1) & 0xFF == ord('r'):
            for i in range(100):
                for i, (p1, p2) in enumerate(outer_points):
                    d_outer[i] += shape[p2][1] - shape[p1][1]
                for i, (p1, p2) in enumerate(inner_points):
                    d_inner[i] += shape[p2][1] - shape[p1][1]
            d_outer[:] = [x / 100 for x in d_outer]
            d_inner[:] = [x / 100 for x in d_inner]
            break
        except:
            print('Exception Catched')
示例#8
0
def run_mouth_open(img, ret, face_model, landmark_model, font, outer_points,
                   d_outer, inner_points, d_inner):

    if not ret:
        return

    rects = find_faces(img, face_model)

    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        cnt_outer = 0
        cnt_inner = 0
        for i, (p1, p2) in enumerate(outer_points):
            if d_outer[i] + 3 < shape[p2][1] - shape[p1][1]:
                cnt_outer += 1
        for i, (p1, p2) in enumerate(inner_points):
            if d_inner[i] + 2 < shape[p2][1] - shape[p1][1]:
                cnt_inner += 1
        if cnt_outer > 3 and cnt_inner > 2:
            return True
    return False
示例#9
0
def extract_faces(genders):
    print("Extracting faces")
    if not os.path.exists('../data'):
        os.makedirs('../data')
    if not os.path.exists('../data/gender'):
        os.makedirs('../data/gender')
    for gender in genders:
        images = glob.glob('../data/raw_gender/%s/*.jpg' % gender)

        if not os.path.exists('../data/gender/%s' % gender):
            os.makedirs('../data/gender/%s' % gender)
        for file_number, image in enumerate(images):
            frame = cv2.imread(image)
            faces = find_faces(frame)

            for face in faces:
                try:
                    cv2.imwrite("../data/gender/%s/%s.jpg" % (gender, (file_number + 1)), face[0])
                except:
                    print("Error in processing %s" % image)

    print("Face extraction finished")
def head_pose(img):
    faces = find_faces(img, face_model)
    size = img.shape
    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")
    direction = 0
    for face in faces:
        marks = detect_marks(img, landmark_model, face)
        # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
        image_points = np.array(
            [
                marks[30],  # Nose tip
                marks[8],  # Chin
                marks[36],  # Left eye left corner
                marks[45],  # Right eye right corne
                marks[48],  # Left Mouth corner
                marks[54]  # Right mouth corner
            ],
            dtype="double")
        dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
        (success, rotation_vector,
         translation_vector) = cv2.solvePnP(model_points,
                                            image_points,
                                            camera_matrix,
                                            dist_coeffs,
                                            flags=cv2.SOLVEPNP_UPNP)

        # Project a 3D point (0, 0, 1000.0) onto the image plane.
        # We use this to draw a line sticking out of the nose

        (nose_end_point2D,
         jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                       rotation_vector, translation_vector,
                                       camera_matrix, dist_coeffs)

        for p in image_points:
            cv2.circle(img, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

        p1 = (int(image_points[0][0]), int(image_points[0][1]))
        p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
        x1, x2 = head_pose_points(img, rotation_vector, translation_vector,
                                  camera_matrix)

        cv2.line(img, p1, p2, (0, 255, 255), 2)
        cv2.line(img, tuple(x1), tuple(x2), (255, 255, 0), 2)
        # for (x, y) in marks:
        #     cv2.circle(img, (x, y), 4, (255, 255, 0), -1)
        # cv2.putText(img, str(p1), p1, font, 1, (0, 255, 255), 1)
        try:
            m = (p2[1] - p1[1]) / (p2[0] - p1[0])
            ang1 = int(math.degrees(math.atan(m)))
        except:
            ang1 = 90

        try:
            m = (x2[1] - x1[1]) / (x2[0] - x1[0])
            ang2 = int(math.degrees(math.atan(-1 / m)))
        except:
            ang2 = 90

            # print('div by zero error')
        if ang1 >= 48:
            direction = 1
            # print('Head down')
            # cv2.putText(img, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)
        elif ang1 <= -48:
            direction = 2
            # print('Head up')
            # cv2.putText(img, 'Head up', (30, 30), font, 2, (255, 255, 128), 3)

        if ang2 >= 48:
            direction = 3
            # print('Head right')
            # cv2.putText(img, 'Head right', (90, 30), font, 2, (255, 255, 128), 3)
        elif ang2 <= -48:
            direction = 4
            # print('Head left')
            # cv2.putText(img, 'Head left', (90, 30), font, 2, (255, 255, 128), 3)

    return direction
    (-225.0, 170.0, -135.0),  # Left eye left corner
    (225.0, 170.0, -135.0),  # Right eye right corne
    (-150.0, -150.0, -125.0),  # Left Mouth corner
    (150.0, -150.0, -125.0)  # Right mouth corner
])

# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
    [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]],
    dtype="double")
while True:
    ret, img = cap.read()
    if ret == True:
        faces = find_faces(img, face_model)
        for face in faces:
            marks = detect_marks(img, landmark_model, face)
            # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
            image_points = np.array(
                [
                    marks[30],  # Nose tip
                    marks[8],  # Chin
                    marks[36],  # Left eye left corner
                    marks[45],  # Right eye right corne
                    marks[48],  # Left Mouth corner
                    marks[54]  # Right mouth corner
                ],
                dtype="double")
            dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
            (success, rotation_vector,
def get_frame(imgData):
    nparr = np.frombuffer(base64.b64decode(imgData), np.uint8)
    image = cv2.imdecode(nparr, cv2.COLOR_BGR2GRAY)
    ret = True

    size = image.shape
    font = cv2.FONT_HERSHEY_SIMPLEX
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])

    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (320, 320))
    img = img.astype(np.float32)
    img = np.expand_dims(img, 0)
    img = img / 255
    class_names = [c.strip() for c in open("models/classes.TXT").readlines()]
    boxes, scores, classes, nums = yolo(img)
    count = 0
    mob_status = ""
    person_status = ""
    for i in range(nums[0]):
        if int(classes[0][i] == 0):
            count += 1
        if int(classes[0][i] == 67):
            print('Mobile Phone detected')
            mob_status = 1
        else:
            print('Not Mobile Phone detected')
            mob_status = 0
        print(mob_status)

    if count == 0:
        print('No person detected')
        person_status = 1
    elif count > 1:
        print('More than one person detected')
        person_status = 2
    else:
        print('Normal')
        person_status = 0

    image = draw_outputs(image, (boxes, scores, classes, nums), class_names)

    user_move1 = ""
    user_move2 = ""
    if ret == True:
        faces = find_faces(image, face_model)
        for face in faces:
            marks = detect_marks(image, landmark_model, face)
            image_points = np.array(
                [
                    marks[30],  # Nose tip
                    marks[8],  # Chin
                    marks[36],  # Left eye left corner
                    marks[45],  # Right eye right corne
                    marks[48],  # Left Mouth corner
                    marks[54]  # Right mouth corner
                ],
                dtype="double")
            dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
                                                camera_matrix,
                                                dist_coeffs,
                                                flags=cv2.SOLVEPNP_UPNP)

            (nose_end_point2D,
             jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                           rotation_vector, translation_vector,
                                           camera_matrix, dist_coeffs)

            for p in image_points:
                cv2.circle(image, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

            p1 = (int(image_points[0][0]), int(image_points[0][1]))
            p2 = (int(nose_end_point2D[0][0][0]),
                  int(nose_end_point2D[0][0][1]))
            x1, x2 = head_pose_points(image, rotation_vector,
                                      translation_vector, camera_matrix)

            try:
                m = (p2[1] - p1[1]) / (p2[0] - p1[0])
                ang1 = int(math.degrees(math.atan(m)))
            except:
                ang1 = 90

            try:
                m = (x2[1] - x1[1]) / (x2[0] - x1[0])
                ang2 = int(math.degrees(math.atan(-1 / m)))
            except:
                ang2 = 90

            if ang1 >= 48:
                user_move1 = 2
                print('Head down')
            elif ang1 <= -48:
                user_move1 = 1
                print('Head up')
            else:
                user_move1 = 0

            if ang2 >= 48:
                print('Head right')
                user_move2 = 4
            elif ang2 <= -48:
                print('Head left')
                user_move2 = 3
            else:
                user_move2 = 0

    ret, jpeg = cv2.imencode('.jpg', image)
    jpg_as_text = base64.b64encode(jpeg)

    gaze.refresh(image)

    frame = gaze.annotated_frame()
    eye_movements = ""

    if gaze.is_blinking():
        eye_movements = 1
        print("Blinking")
    elif gaze.is_right():
        eye_movements = 4
        print("Looking right")
    elif gaze.is_left():
        eye_movements = 3
        print("Looking left")
    elif gaze.is_center():
        eye_movements = 2
        print("Looking center")
    else:
        eye_movements = 0
        print("Not found!")
    print(eye_movements)

    proctorDict = dict()
    proctorDict['jpg_as_text'] = jpg_as_text
    proctorDict['mob_status'] = mob_status
    proctorDict['person_status'] = person_status
    proctorDict['user_move1'] = user_move1
    proctorDict['user_move2'] = user_move2
    proctorDict['eye_movements'] = eye_movements

    return proctorDict
示例#13
0
import cv2
from face_detector import get_face_detector, find_faces
from face_landmarks import get_landmark_model, detect_marks, draw_marks

face_model = get_face_detector()
landmark_model = get_landmark_model()
outer_points = [[49, 59], [50, 58], [51, 57], [52, 56], [53, 55]]
d_outer = [0] * 5
inner_points = [[61, 67], [62, 66], [63, 65]]
d_inner = [0] * 3
font = cv2.FONT_HERSHEY_SIMPLEX
cap = cv2.VideoCapture(0)

while (True):
    ret, img = cap.read()
    rects = find_faces(img, face_model)
    for rect in rects:
        shape = detect_marks(img, landmark_model, rect)
        draw_marks(img, shape)
        cv2.putText(img, 'Press r to record Mouth distances', (30, 30), font,
                    1, (0, 255, 255), 2)
        cv2.imshow("Output", img)
    if cv2.waitKey(1) & 0xFF == ord('r'):
        for i in range(100):
            for i, (p1, p2) in enumerate(outer_points):
                d_outer[i] += shape[p2][1] - shape[p1][1]
            for i, (p1, p2) in enumerate(inner_points):
                d_inner[i] += shape[p2][1] - shape[p1][1]
        break
cv2.destroyAllWindows()
d_outer[:] = [x / 100 for x in d_outer]
示例#14
0
def get_head_position(File_name):
    face_model = get_face_detector()
    landmark_model = get_landmark_model()
    cap = cv2.VideoCapture(File_name)
    ret, img = cap.read()
    size = img.shape
    font = cv2.FONT_HERSHEY_SIMPLEX
    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])

    # Camera internals
    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    head_directions = [0] * 4  # 아래, 위, 오른쪽, 왼쪽
    while True:
        ret, img = cap.read()
        if ret == True:
            faces = find_faces(img, face_model)
            for face in faces:
                marks = detect_marks(img, landmark_model, face)
                # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
                image_points = np.array(
                    [
                        marks[30],  # Nose tip
                        marks[8],  # Chin
                        marks[36],  # Left eye left corner
                        marks[45],  # Right eye right corne
                        marks[48],  # Left Mouth corner
                        marks[54]  # Right mouth corner
                    ],
                    dtype="double")
                dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
                (success, rotation_vector,
                 translation_vector) = cv2.solvePnP(model_points,
                                                    image_points,
                                                    camera_matrix,
                                                    dist_coeffs,
                                                    flags=cv2.SOLVEPNP_UPNP)

                # Project a 3D point (0, 0, 1000.0) onto the image plane.
                # We use this to draw a line sticking out of the nose

                (nose_end_point2D,
                 jacobian) = cv2.projectPoints(np.array([
                     (0.0, 0.0, 1000.0)
                 ]), rotation_vector, translation_vector, camera_matrix,
                                               dist_coeffs)

                for p in image_points:
                    cv2.circle(img, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

                p1 = (int(image_points[0][0]), int(image_points[0][1]))
                p2 = (int(nose_end_point2D[0][0][0]),
                      int(nose_end_point2D[0][0][1]))
                x1, x2 = head_pose_points(img, rotation_vector,
                                          translation_vector, camera_matrix)

                cv2.line(img, p1, p2, (0, 255, 255), 2)
                cv2.line(img, tuple(x1), tuple(x2), (255, 255, 0), 2)
                # for (x, y) in marks:
                #     cv2.circle(img, (x, y), 4, (255, 255, 0), -1)
                # cv2.putText(img, str(p1), p1, font, 1, (0, 255, 255), 1)
                try:
                    m = (p2[1] - p1[1]) / (p2[0] - p1[0])
                    ang1 = int(math.degrees(math.atan(m)))
                except:
                    ang1 = 90

                try:
                    m = (x2[1] - x1[1]) / (x2[0] - x1[0])
                    ang2 = int(math.degrees(math.atan(-1 / m)))
                except:
                    ang2 = 90

                    # print('div by zero error')
                if ang1 >= 48:
                    head_directions[0] += 1
                    # print('Head down')
                    # cv2.putText(img, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)
                elif ang1 <= -48:
                    head_directions[1] += 1
                    # print('Head up')
                    # cv2.putText(img, 'Head up', (30, 30), font, 2, (255, 255, 128), 3)

                if ang2 >= 48:
                    head_directions[2] += 1
                    # print('Head right')
                    # cv2.putText(img, 'Head right', (90, 30), font, 2, (255, 255, 128), 3)
                elif ang2 <= -48:
                    head_directions[3] += 1
                    # print('Head left')
                    # cv2.putText(img, 'Head left', (90, 30), font, 2, (255, 255, 128), 3)

            #     cv2.putText(img, str(ang1), tuple(p1), font, 2, (128, 255, 255), 3)
            #     cv2.putText(img, str(ang2), tuple(x1), font, 2, (255, 255, 128), 3)
            # cv2.imshow('img', img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cv2.destroyAllWindows()
    cap.release()
    return head_directions
示例#15
0
def main(debug=False):
    # Load face detect model & face landmark model
    face_model = face_detector.get_face_detector()
    landmark_model = face_landmarks.get_landmark_model()

    cv2.namedWindow('debug_window')

    # Create camera instance for capture picture,  speed up processing on windows platform by using cv2.CAP_DSHOW.
    if platform.system().lower() == 'windows':
        camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
    else:
        camera = cv2.VideoCapture(0)

    _, sample_img = camera.read()  # grab one frame, for getting resolution
    height, width, channel = sample_img.shape  # resolution & color channel (1 or None = GrayScale, 3 = BGR colors)

    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),             # Nose tip
        (0.0, -330.0, -65.0),        # Chin
        (-225.0, 170.0, -135.0),     # Left eye left corner
        (225.0, 170.0, -135.0),      # Right eye right corne
        (-150.0, -150.0, -125.0),    # Left Mouth corner
        (150.0, -150.0, -125.0)      # Right mouth corner
    ])

    # Camera internals
    focal_length = width
    center = (width/2, height/2)
    camera_matrix = np.array([
        [focal_length, 0, center[0]],
        [0, focal_length, center[1]],
        [0, 0, 1]
    ], dtype='double')

    # Lens distance coefficient: assuming no lens distortion
    dist_coefficient = np.zeros((4, 1))

    while True:
        # start time stamp this frame
        time_start = datetime.now()

        # 1. capture image (1 frame)
        ret, frame = camera.read()
        if ret:
            # 2. detect faces
            faces = face_detector.find_faces(frame, face_model)
            if faces:
                if debug:
                    face_detector.draw_faces(frame, faces)

                for face in faces:

                    # 3. detect face landmarks
                    try:
                        landmarks = face_landmarks.detect_marks(frame, landmark_model, face)
                    except cv2.error:
                        # Skip this round if failed to detect landmarks.
                        break

                    if debug:
                        face_landmarks.draw_marks(frame, landmarks, color=(0, 255, 0))

                    frame_points = np.array([
                        landmarks[30],  # Nose tip
                        landmarks[8],   # Chin
                        landmarks[36],  # Left eye left corner
                        landmarks[45],  # Right eye right corne
                        landmarks[48],  # Left Mouth corner
                        landmarks[54]   # Right mouth corner
                    ], dtype='double')

                    # 4. get rotation & translation vector
                    success, rotation_vector, translation_vector = cv2.solvePnP(
                        model_points, frame_points, camera_matrix, dist_coefficient, flags=cv2.SOLVEPNP_UPNP
                    )

                    if debug:
                        print(' ' * 13, 'solvePnP:',
                              success, rotation_vector.tolist(), translation_vector.tolist(), end='\r')
                    # TODO: calculate iris, mouth and head's roll pitch yaw
                    # TODO: create socket client, send calculated data to unity

        cv2.imshow('debug_window', frame)

        # end time stamp of this frame
        time_delta = datetime.now() - time_start
        # calculate Frame Per Second
        fps = 1e6 / time_delta.microseconds
        print(' FPS:', fps, end='\r')

        if cv2.waitKey(1) & 0xFF in (27, ord('q')):
            break

    if debug:
        cv2.destroyAllWindows()
    camera.release()
示例#16
0
ret, img2 = cap.read()
thresh = img2.copy()

cv2.namedWindow('image')
kernel = np.ones((9, 9), np.uint8)


def nothing(x):
    pass


cv2.createTrackbar('threshold', 'image', 75, 255, nothing)

while (True):
    ret, img2 = cap.read()
    rects = find_faces(img2, face_model)

    for rect in rects:
        shape = detect_marks(img2, landmark_model, rect)
        mask = np.zeros(img2.shape[:2], dtype=np.uint8)
        mask, end_points_left = eye_on_mask(mask, left, shape)
        mask, end_points_right = eye_on_mask(mask, right, shape)
        mask = cv2.dilate(mask, kernel, 5)

        eyes = cv2.bitwise_and(img2, img2, mask=mask)
        mask = (eyes == [0, 0, 0]).all(axis=2)
        eyes[mask] = [255, 255, 255]
        mid = (shape[42][0] + shape[39][0]) // 2
        eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
        threshold = cv2.getTrackbarPos('threshold', 'image')
        _, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)
示例#17
0
model_points = np.array([
    (0.0, 0.0, 0.0),  # Nose tip
    (0.0, -330.0, -65.0),  # Chin
    (-225.0, 170.0, -135.0),  # Left eye left corner
    (225.0, 170.0, -135.0),  # Right eye right corne
    (-150.0, -150.0, -125.0),  # Left Mouth corner
    (150.0, -150.0, -125.0)  # Right mouth corner
])

# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
    [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]],
    dtype="double")
faces = find_faces(imagereal, face_model)
for face in faces:
    marks = detect_marks(imagereal, landmark_model, face)
    for i in range(100):
        for i, (p1, p2) in enumerate(outer_points):
            d_outer[i] += marks[p2][1] - marks[p1][1]
        for i, (p1, p2) in enumerate(inner_points):
            d_inner[i] += marks[p2][1] - marks[p1][1]
d_outer[:] = [x / 100 for x in d_outer]
d_inner[:] = [x / 100 for x in d_inner]
tc = 0  #facespoofcount
mouth_open = 0
speeking_count = 0
speech_checking = 0
BUFFER_SIZE = 2048
CHANNELS = 1