def main():

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]

    # start the video stream thread

    vs = VideoStream(src=0).start()
    fileStream = False

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes

            leftEyeHull = cv2.convexHull(leftEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
def main():
    print("[INFO] starting video stream thread...")
    fileStream = False
    vs = VideoStream(src=0).start()
    time.sleep(1.0)

    while True:
        if fileStream and not vs.more():
            break

        frame = preprocessed_frame(vs)
        rects = detector(frame, 0)
        frame = blink_detector(frame, rects)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # 'q' key 를 누르면 루프 탈출
        if key == ord("q"):
            break
Example #3
0
def main():
    detector = dlib.get_frontal_face_detector()
    # predictor = dlib.shape_predictor(
    #     './shape_predictor_68_face_landmarks.dat')

    vs = VideoStream(src=0, resolution=(1280, 960)).start()
    fileStream = False

    cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Frame', 1000, 800)

    prev_face = None
    prev_idx = 0
    PREV_MAX = 100

    mask = cv2.imread('./mask.png')
    mask_h, mask_w, _ = mask.shape
    mask_x, mask_y = mask_w / 2, mask_h / 2

    while True:
        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=960)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        try:
            rects = detector(gray, 0)
            rects = sorted(rects,
                           key=lambda rect: rect.width() * rect.height(),
                           reverse=True)
            # 면적(인식된 범위)이 가장 커다란 사각형(얼굴)을 가져옴
            rect = rects[0]

        except IndexError:
            rect = None

        if rect:
            prev_idx = 0

        if not rect:
            if prev_face is not None and prev_idx < PREV_MAX:
                rect = prev_face  # 결과가 없는 경우 적절히 오래된(PREV_MAX) 이전 결과를 사용
                prev_idx += 1

        if rect:  # 얼굴을 인식한 경우(prev_face를 사용하는 경우 포함)
            prev_face = rect  # 저장

            # shape = get_shape(predictor, gray, rect)

            draw_dlib_rect(frame, rect)
            frame_x, frame_y = int((rect.right() + rect.left()) /
                                   2), int(rect.top() + rect.bottom() / 2)
            cv2.circle(frame, (frame_x, frame_y), 5, (0, 255, 0), -1)
            dx = (frame_x - mask_x)
            dy = (frame_y - mask_y)

            frame[int(dy):int(dy + mask_h), int(dx):int(dx + mask_w)] = mask

        cv2.imshow("Frame", frame)  # 프레임 표시

        # q 키를 눌러 종료
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    vs.stop()
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}

# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0

# start the frames per second throughput estimator
fps = FPS().start()

# loop over frames from the video stream
while vs.more():
	# grab the next frame and handle if we are reading from either
	# VideoCapture or VideoStream
	frame = vs.read()

	if vs.Q.qsize() == 0:
		time.sleep(0.01)
		break

	# resize the frame to have a maximum width of 500 pixels (the
	# less data we have, the faster we can process it), then convert
	# the frame from BGR to RGB for dlib
	frame = imutils.resize(frame, width=500)
	if args["camera"] == 'f':
		frame = cv2.flip(frame, -1)
	rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
def count_blink(min=3):
    # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-p", "--shape-predictor", required=True,
    #     help="path to facial landmark predictor")
    # ap.add_argument("-v", "--video", type=str, default="",
    #     help="path to input video file")
    # args = vars(ap.parse_args())

    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 3

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("\n[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("\n[INFO] starting video stream thread...")
    # vs = FileVideoStream(args["video"]).start()
    # fileStream = True
    # vs = VideoStream(src=0).start()
    # vs = VideoStream(usePiCamera=True).start() #Use for Raspberry Pi
    vs = VideoStream().start()
    fileStream = False
    time.sleep(1.0)

    j = 0
    result = False
    compareFrame = []

    # loop over frames from the video stream
    while True and result == False:
        j = j + 1
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=800)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)
        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                # reset the eye frame counter
                COUNTER = 0
                # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        # cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        # print(j)
        compareFrame = frame
        if TOTAL >= min:
            result = True
            break
        if j == 100:  # 5 seconds
            result = False
            break

        if result == True:
            check_extract_faces(compareFrame)
            yield (compareFrame)
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    # cv2.destroyAllWindows()
    vs.stop()

    return result
Example #6
0
def start_detector():
    global blinked
    global blinkstart
    print('Detector started')
    # construct the argument parse and parse the arguments
    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold
    EYE_AR_THRESH = 0.24
    EYE_AR_CONSEC_FRAMES = 10

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        "python_backend/shape_predictor_68_face_landmarks.dat")

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    # vs = FileVideoStream(args["video"]).start()
    fileStream = True
    vs = VideoStream(src=0).start()
    # vs = VideoStream(usePiCamera=True).start()
    fileStream = False
    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        time.sleep(0.03)
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        # flip the video
        frame = cv2.flip(frame, 1)
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if not blinked and ear < EYE_AR_THRESH:
                COUNTER += 1
                if COUNTER == 2:
                    blinkstart = True
                    print('Blink started')
                else:
                    blinkstart = False
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1
                    blinked = True
                    blinkstart = False
                    print('Blinked')
            else:
                blinkstart = False
                blinked = False
                COUNTER = 0

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #7
0
def main():
    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear <= EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER > EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1
                    cv2.imwrite('teste.png', frame)
                    img = mpimg.imread('teste.png')
                    imgplot = plt.imshow(img)
                    plt.show()

                # reset the eye frame counter
                COUNTER = 0

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #8
0
def registerBlFreq(newEAR):
    BlinkThresh = newEAR
    BlinkFrames = 3

    count = 0

    total = 0

    detector = dl.get_frontal_face_detector()
    predict = dl.shape_predictor("shape_predictor_68_face_landmarks.dat")

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    vs = VideoStream(src=0).start()
    fileStream = False
    time.sleep(1.0)

    a = time.time() + 10

    while time.time() < a:
        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        pixels = detector(gray, 0)

        for pixel in pixels:
            shape = predict(gray, pixel)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]

            leftEAR = EAR(leftEye)
            rightEAR = EAR(rightEye)

            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)

            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < BlinkThresh:
                count += 1
                # print(count)

            else:
                if count >= BlinkFrames:
                    total += 1

                count = 0

            cv2.imshow("Frame", frame)
            cv2.putText(frame, "Blinks: {}".format(total), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    vs.stop()
    return total
Example #9
0
# grab the indexes of the facial landmarks for the eyes
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

# start the video stream
print("[INFO] starting video stream thread...")
video = VideoStream(src=0).start()
# video = cv2.VideoCapture(0)
fileStream = False
time.sleep(1.0)

# loop over frames from the video stream
while True:
    # if this is a file video stream, then we need to check if
    # there any more frames left in the buffer to process
    if fileStream and not video.more():
        break

    # grab the frame from the threaded video file stream, resize
    # it, and convert it to grayscale
    # channels)
    frame = video.read()
    frame = cv2.flip(frame, 1)
    frame = imutils.resize(frame, width=1120)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # loop over the face detections
    for rect in rects:
Example #10
0
def main():
    # FINAL list of images to send to next step
    worstPhotos = []

    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    #initialize queue that holds the frames and ear before and after the blink
    beforeBlink = queue.Queue()
    afterBlink = queue.Queue()

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)

    # loop over frames from the video stream
    while len(worstPhotos) < args["numCapturedPhotos"]:
        try:
            # if this is a file video stream, then we need to check if
            # there any more frames left in the buffer to process
            if fileStream and not vs.more():
                break

            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale
            # channels)
            frame = vs.read()
            frame = imutils.resize(frame, width=1000)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # detect faces in the grayscale frame
            rects = detector(gray, 0)

            key = 0

            # loop over the face detections
            for rect in rects:
                # determine the facial landmarks for the face region, then
                # convert the facial landmark (x, y)-coordinates to a NumPy
                # array
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                # extract the left and right eye coordinates, then use the
                # coordinates to compute the eye aspect ratio for both eyes
                leftEye = shape[lStart:lEnd]
                rightEye = shape[rStart:rEnd]
                leftEAR = eye_aspect_ratio(leftEye)
                rightEAR = eye_aspect_ratio(rightEye)

                # average the eye aspect ratio together for both eyes
                ear = (leftEAR + rightEAR) / 2.0

                # compute the convex hull for the left and right eye, then
                # visualize each of the eyes
                if args["displayInfo"]:
                    leftEyeHull = cv2.convexHull(leftEye)
                    rightEyeHull = cv2.convexHull(rightEye)
                    cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                    cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

                # check to see if the eye aspect ratio is below the blink
                # threshold, and if so, increment the blink frame counter
                if ear < EYE_AR_THRESH:
                    # adds a delay after detecting the blink before taking the photo
                    # for i in range(args["pictureDelay"]):
                    # frame = vs.read()
                    # frame = imutils.resize(frame, width=450)
                    # cv2.imwrite("bad_photo.jpg", frame)

                    # empties the queue
                    afterBlink.empty()

                    # saves the blink frame
                    afterBlink.put((ear, frame))

                    # saves the next frames
                    for i in range(10):
                        if len(detector(gray, 0)) > 0:
                            try:
                                afterBlink.put(
                                    runFrames(vs, detector, predictor, TOTAL,
                                              ear))
                            except:
                                pass

                    # frames from the derivative method
                    derFrames = (earDerivative(beforeBlink, afterBlink))

                    # fig = plt.figure(figsize=(4, 8))
                    # columns = 1
                    # rows = 5
                    # for i in range(1, columns * rows + 1):
                    #     img = derFrames[0][i-1]
                    #     fig.add_subplot(rows, columns, i)
                    #     plt.imshow(img)
                    # plt.show()

                    # worstPhotos.append(derFrames[0][2])
                    # worstPhotos.append(derFrames[0][3])
                    # worstPhotos.append(derFrames[0][4])
                    #
                    # derFrames[0].pop(2)
                    # derFrames[0].pop(2)
                    # derFrames[0].pop(2)

                    if derFrames[1][args["pictureDelay"]] < args[
                            "upperEAR"] + 0.01 and derFrames[1][args[
                                "pictureDelay"]] > args["lowerEAR"] - 0.01:
                        worstPhotos.append(derFrames[0][args["pictureDelay"]])
                        derFrames[0].pop(args["pictureDelay"])
                        derFrames[1].pop(args["pictureDelay"])
                        TOTAL += 1
                        print(TOTAL)

                    # vets bad bad images
                    i = 0
                    while (i < len(derFrames[1])):
                        if derFrames[1][i] > args["upperEAR"] or derFrames[1][
                                i] < args["lowerEAR"]:
                            derFrames[0].pop(i)
                            derFrames[1].pop(i)
                        else:
                            i += 1

                    for photo in derFrames[0]:
                        worstPhotos.append(photo)
                        TOTAL += 1
                        print(TOTAL)

                    # fig = plt.figure(figsize=(4, 8))
                    # columns = 1
                    # rows = len(derFrames[0])
                    # for i in range(1, columns * rows + 1):
                    #     img = derFrames[0][i - 1]
                    #     fig.add_subplot(rows, columns, i)
                    #     plt.imshow(img)
                    # plt.show()

                # elif ear > 0.45:
                #     worstPhotos.append(frame)

                # otherwise, the eye aspect ratio is not below the blink 69
                # threshold
                else:
                    # removes the oldest queue item if 10 frames have already been saved
                    if beforeBlink.qsize() >= 20:
                        beforeBlink.get()

                    # adds to the queue of frames before the blink
                    beforeBlink.put((ear, frame))

                    # if the eyes were closed for a sufficient number of
                    # then increment the total number of blinks
                    if COUNTER >= EYE_AR_CONSEC_FRAMES:
                        TOTAL += 1

                    # reset the eye frame counter
                    COUNTER = 0

                    # draw the total number of blinks on the frame along with
                    # the computed eye aspect ratio for the frame
                    if args["displayInfo"]:
                        cv2.putText(frame, "Photos: {}".format(TOTAL),
                                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                    (0, 0, 255), 2)
                        cv2.putText(frame, "EAR: {:.2f}".format(ear),
                                    (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                    (0, 0, 255), 2)

                    # show the frame
                    cv2.imshow("Frame", frame)
                    key = cv2.waitKey(1) & 0xFF
        except:
            pass

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # fig = plt.figure(figsize=(4, 8))
    # columns = 3
    # rows = int(len(worstPhotos)/3)
    # for i in range(1, columns * rows + 1):
    #     img = worstPhotos[i - 1]
    #     fig.add_subplot(rows, columns, i)
    #     plt.imshow(img)
    # plt.show()

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()

    dicts = []
    for i in range(len(worstPhotos)):
        result = google_results(worstPhotos[i])
        result['id'] = i
        dicts.append(result)
    for meme in imgops.getMemeBuffer(dicts, worstPhotos):
        cv2.imshow("Meme", meme)
        cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #11
0
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

print("[INFO] starting video stream thread...")
#vs = FileVideoStream(args["video"]).start()
#fileStream = True
vs = VideoStream(src=0).start()
fileStream = False
time.sleep(1.0)

while True:
    if fileStream and not vs.more():  #check eof
        break

    frame = vs.read()
    frame = imutils.resize(frame, width=700)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    rects = detector(gray, 0)

    # loop over detections
    for rect in rects:

        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        leftEye = shape[lStart:lEnd]
def main():
    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']
    selected_cam = 0
    cameras = []
    ears = []

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video_dir'] is None:
        if args['video'] == "camera":
            vs = VideoStream(src=0).start()
            fileStream = False
    else:
        # vs = FileVideoStream(args["video"]).start()
        fileStream = True
        video_names = glob.glob(args['video_dir'] + '/*.mp4')
        print(video_names)
        video_names.sort()
        for i in range(len(video_names)):
            camera = FileVideoStream(video_names[i])
            cameras.append(camera)
        vs = cameras[selected_cam].start()

    time.sleep(1.0)

    subject = args['subject']
    if not os.path.isdir(os.path.join('./data_blink', subject)):
        os.mkdir(os.path.join('./data_blink', subject))
    # loop over frames from the video stream
    while True:
        # print(video_names[selected_cam], vs.more())
        if fileStream and not vs.more():
            if args['video_dir'] is None:
                break

            else:
                vs.stop()

                pos = video_names[selected_cam].find("_q")
                question_number = video_names[selected_cam][pos + 2:pos + 4]
                fn = os.path.join('./data_blink', subject,
                                  subject + '_' + question_number)
                data = np.array(ears)
                df = pd.DataFrame(data=data, columns=['EAR'])
                df.to_csv(fn + ".csv")

                if selected_cam == 0:
                    EYE_AR_THRESH = np.average(ears)
                    print('threshold: ', EYE_AR_THRESH)
                ears = []

                # Change to the next video (in the next camera)
                selected_cam += 1

                if selected_cam >= len(video_names): break

                vs = cameras[selected_cam].start()
                print('Changing to next video...{}'.format(
                    video_names[selected_cam]))

        # print(vs)
        try:
            frame = vs.read()
            frame = imutils.resize(frame, width=450)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            rects = detector(gray, 0)

            for rect in rects:
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                # extract the left and right eye coordinates, then use the
                # coordinates to compute the eye aspect ratio for both eyes
                leftEye = shape[lStart:lEnd]
                rightEye = shape[rStart:rEnd]
                leftEAR = eye_aspect_ratio(leftEye)
                rightEAR = eye_aspect_ratio(rightEye)

                # average the eye aspect ratio together for both eyes
                ear = (leftEAR + rightEAR) / 2.0
                ears.append(ear)

                # compute the convex hull for the left and right eye, then
                # visualize each of the eyes
                leftEyeHull = cv2.convexHull(leftEye)
                rightEyeHull = cv2.convexHull(rightEye)
                cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

                # check to see if the eye aspect ratio is below the blink
                # threshold, and if so, increment the blink frame counter
                if ear < EYE_AR_THRESH:
                    COUNTER += 1

                # otherwise, the eye aspect ratio is not below the blink
                # threshold
                else:
                    # if the eyes were closed for a sufficient number of
                    # then increment the total number of blinks
                    if COUNTER >= EYE_AR_CONSEC_FRAMES:
                        TOTAL += 1

                    # reset the eye frame counter
                    COUNTER = 0

                # draw the total number of blinks on the frame along with
                # the computed eye aspect ratio for the frame
                cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
            cv2.imshow("Frame", frame)

        except:
            pass
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #13
0
def main():
    #PDF LOAD
    text = loadPDF('') # Aqui você deve inserir o nome do pdf que deseja visualizar
    start = 0
    stop = 25
    step = 25
    turtle_writer = conf_tela()

    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']
    
    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0
    COUNTER_LEFT_EYE = 0
    COUNTER_RIGHT_EYE = 0
    TOTAL_LEFT = 0
    TOTAL_RIGHT = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])
    
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    
    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True
    
    time.sleep(1.0)
    
    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break
        
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        
        # detect faces in the grayscale frame
        rects = detector(gray, 0)
    
    	# loop over the face detections
        for rect in rects:
    	    # determine the facial landmarks for the face region, then
    	    # convert the facial landmark (x, y)-coordinates to a NumPy
    	    # array
    	    shape = predictor(gray, rect)
    	    shape = face_utils.shape_to_np(shape)
        
    	    # extract the left and right eye coordinates, then use the
    	    # coordinates to compute the eye aspect ratio for both eyes
    	    leftEye = shape[lStart:lEnd]
    	    rightEye = shape[rStart:rEnd]
    	    leftEAR = eye_aspect_ratio(leftEye)
    	    rightEAR = eye_aspect_ratio(rightEye)
        
    	    # average the eye aspect ratio together for both eyes
    	    ear = (leftEAR + rightEAR) / 2.0
        
    	    # compute the convex hull for the left and right eye, then
    	    # visualize each of the eyes
    	    leftEyeHull = cv2.convexHull(leftEye)
    	    rightEyeHull = cv2.convexHull(rightEye)
    	    cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
    	    cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
        
            # check to see if the eye aspect ratio is below the blink
    	    # # threshold, and if so, increment the blink frame counter
    	    if leftEAR and rightEAR > EYE_AR_THRESH:
                # COUNTER += 1
                if leftEAR - 0.05 < EYE_AR_THRESH and rightEAR > EYE_AR_THRESH - 3.0:
                    # print("Left Eye Blinked...")
                    COUNTER_LEFT_EYE += 1
       
                elif rightEAR - 0.05 < EYE_AR_THRESH and leftEAR > EYE_AR_THRESH - 3.0:
                    # print("Right Eye Blinked...")
                    COUNTER_RIGHT_EYE += 1

                else:
                    COUNTER += 1
        
    	    # otherwise, the eye aspect ratio is not below the blink
    	    # threshold
    	    else:
    	        # if the eyes were closed for a sufficient number of
    	        # then increment the total number of blinks
    	        if COUNTER_LEFT_EYE > EYE_AR_CONSEC_FRAMES:
                        TOTAL_LEFT += 1
                        print("{} - olho esquerdo :D".format(COUNTER_LEFT_EYE))
                        time.sleep(0.1)
                        
    	        elif COUNTER_RIGHT_EYE > EYE_AR_CONSEC_FRAMES:
                        TOTAL_RIGHT += 1
                        print("{} - olho direito :D".format(COUNTER_RIGHT_EYE))
                        time.sleep(0.1)
                        # COUNTER_RIGHT_EYE = 0
    	        elif COUNTER > EYE_AR_CONSEC_FRAMES:
                        TOTAL += 1
                        print("{} - Ambos os olhos estão piscando :D".format(TOTAL))
                        avancar_pagina(turtle_writer, text, start, stop, step)
                        time.sleep(0.1)
                        turtle_writer.screen.setworldcoordinates(-10,-90,450,7.5)

    	        # reset the eye frame counter
    	        COUNTER = 0
    	        COUNTER_LEFT_EYE = 0
    	        COUNTER_RIGHT_EYE = 0
            
    	    # draw the total number of blinks on the frame along with
    	    # the computed eye aspect ratio for the frame
    	    cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    	    cv2.putText(frame, "Right: {}".format(TOTAL_RIGHT), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    	    cv2.putText(frame, "Left: {}".format(TOTAL_LEFT), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    	    cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    	    cv2.putText(frame, "Right: {:.2f}".format(rightEAR), (300, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    	    cv2.putText(frame, "Left: {:.2f}".format(leftEAR), (300, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            

         
    	# show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
        
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #14
0
    def browSetup(self):
        print("[INFO] starting video stream thread...")
        vs = VideoStream(src=0).start()
        # vs = VideoStream(usePiCamera=True).start()
        fileStream = False
        time.sleep(1.0)
        '''returns BROW_EAR_THRESH'''
        browThreshs = []
        BROW_EAR_THRESH = None
        print('\n\n\tMOVE BACK AND FORTH')
        while bool(BROW_EAR_THRESH) == False:
            # if this is a file video stream, then we need to check if
            # there any more frames left in the buffer to process
            if fileStream and not vs.more():
                break
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale
            # channels)
            frame = vs.read()
            frame = imutils.resize(frame, width=450)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # detect faces in the grayscale frame
            rects = detector(gray, 0)

            # loop over the face detections
            for rect in rects:
                # determine the facial landmarks
                shape = self.predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                # values got from face_utils.FACIAL_LANDMARKS_IDXS
                # print(distLeft,distRight)
                rightBrowEye = np.array(
                    [shape[i] for i in [17, 18, 20, 21, 36, 39]])
                leftBrowEye = np.array(
                    [shape[i] for i in [22, 23, 25, 26, 45, 42]])

                leftBrowEar = eye_aspect_ratio(leftBrowEye)
                rightBrowEar = eye_aspect_ratio(rightBrowEye)

                browEar = (leftBrowEar + rightBrowEar) / 2.0
                browThreshs.append(browEar)

                leftBrowEyeHull = cv2.convexHull(leftBrowEye)
                rightBrowEyeHull = cv2.convexHull(rightBrowEye)

                # cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                # cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
                # cv2.drawContours(frame, [leftBrowHull], -1, (0, 255, 0), 1)
                # cv2.drawContours(frame, [rightBrowHull], -1, (0, 255, 0), 1)
                cv2.drawContours(frame, [leftBrowEyeHull], -1, (14, 237, 255),
                                 1)
                cv2.drawContours(frame, [rightBrowEyeHull], -1, (14, 237, 255),
                                 1)
                # cv2.drawContours(frame, [mouthHull], -1, (14,237,255), 1)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            if len(browThreshs) == 300:
                BROW_EAR_THRESH = avg(browThreshs)
                print('brow raise setup complete!!')
                cv2.destroyAllWindows()
                vs.stop()
                return BROW_EAR_THRESH

            # if pattern_list and pattern_list[-1]=='/':
            #     message = "".join(pattern_list)[:-1] # to exclude the backslash
            #     print(message)
            #     if message in morse_code.inverseMorseAlphabet.keys():
            #         message = morse_code.decrypt(message)
            #         print(message)
            #     pattern_list=[] # clear pattern memory if mouth is opened and morse decrypted

            # cv2.putText(frame, message, (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)
            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break
Example #15
0
def customization():
    audio = Audio('./assets/beep.wav')

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        './models/shape_predictor_68_face_landmarks.dat')

    (left_start, left_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (right_start, right_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    vs = VideoStream(src=0, resolution=(1280, 960)).start()
    fileStream = False

    cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Frame', 1000, 800)

    prev_face = None
    prev_idx = 0
    PREV_MAX = 100

    detect_iters = 0
    in_custom = False  # EAR 수집 진행 중인지 여부
    closed_iters = 0
    closed_ears = []

    while True:
        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=960)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        try:
            rects = detector(gray, 0)
            rects = sorted(rects,
                           key=lambda rect: rect.width() * rect.height(),
                           reverse=True)
            # 면적(인식된 범위)이 가장 커다란 사각형(얼굴)을 가져옴
            rect = rects[0]
        except IndexError:
            rect = None

        if rect:
            prev_idx = 0

        if not rect:
            if prev_face is not None and prev_idx < PREV_MAX:
                rect = prev_face  # 결과가 없는 경우 적절히 오래된(PREV_MAX) 이전 결과를 사용
                prev_idx += 1

        if rect:  # 얼굴을 인식한 경우(prev_face를 사용하는 경우 포함)
            prev_face = rect  # 저장

            shape = get_shape(predictor, gray, rect)

            left_eye_shape = get_eye_shape(shape, left_start, left_end)
            leftEAR = get_ear(left_eye_shape)

            right_eye_shape = get_eye_shape(shape, right_start, right_end)
            rightEAR = get_ear(right_eye_shape)

            draw_dlib_rect(frame, rect)
            draw_contours(frame, left_eye_shape)
            draw_contours(frame, right_eye_shape)

            (frame_h, frame_w, _) = frame.shape
            frame = put_korean(frame,
                               '얼굴이 인식되었습니다.', (frame_w / 2 - 100, 10),
                               color='GREEN')

            ear = round((leftEAR + rightEAR) / 2.0, 3)
            frame = put_korean(frame,
                               '현재 EAR: %s' % ear, (frame_w / 2 - 100, 52),
                               fontSacle=30,
                               color='WHITE')

            if not in_custom:
                detect_iters += 1
                if detect_iters > 50:  # 처음 얼굴 인식 후 일정 프레임 동안 안정적으로 인식됨
                    frame = put_korean(frame,
                                       '눈을 감아 주세요.', (frame_w / 2 - 100, 90),
                                       fontSacle=30,
                                       color='RED')
                    audio.play()
                    in_custom = True
                elif detect_iters > 10:
                    frame = put_korean(
                        frame,
                        '잠시 뒤 삐 소리가 들리면 다시 소리가 날 때까지 눈을 감아 주세요.', (180, 90),
                        fontSacle=20,
                        color='WHITE')

            if in_custom:
                closed_iters += 1
                if closed_iters > 100:
                    audio.play()
                    print(closed_ears)
                    ear_thresh = round(sum(closed_ears) / 100, 3)
                    print(ear_thresh)
                    frame = put_korean(frame,
                                       '[측정 완료] 평균 EAR: %s' % ear_thresh,
                                       (180, 90),
                                       fontSacle=30,
                                       color='GREEN')
                    save_ear_thresh(ear_thresh)
                    cv2.imshow("Frame", frame)  # 프레임 표시
                    key = cv2.waitKey(5000) & 0xFF
                    cv2.destroyAllWindows()
                    vs.stop()
                    exit(0)
                else:
                    frame = put_korean(frame,
                                       '눈을 감아 주세요.', (frame_w / 2 - 100, 90),
                                       fontSacle=30,
                                       color='RED')
                    closed_ears.append(ear)

        else:  # 얼굴이 인식되지 않음
            detect_iters = 0  # 초기화

            (frame_h, frame_w, _) = frame.shape
            frame = put_korean(frame,
                               '얼굴을 찾을 수 없습니다.', (frame_w / 2 - 100, 10),
                               color='RED')

        cv2.imshow("Frame", frame)  # 프레임 표시

        # q 키를 눌러 종료
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
Example #16
0
def main():
    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']
    INTERVAL = args['interval']
    ip_addr = args['send_address']
    ip_port = args['send_port']
    ip = (ip_addr, ip_port)
    udp_cli = socket(AF_INET, SOCK_DGRAM)
    df = pd.DataFrame(columns=["bid", "time"])
    # initialize the frame counters and the total number of blinks

    COUNTER = 0
    TOTAL = 0
    UDP_TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)
    ear = 0
    # loop over frames from the video stream
    start = datetime.now()
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        #frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    UDP_TOTAL += 1
                    TOTAL += 1
                    df.loc[df.shape[0] + 1] = {
                        "bid": TOTAL,
                        "time": datetime.now()
                    }
                    time.sleep(0.50)

                # reset the eye frame counter
                COUNTER = 0

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
        cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Are you tired ?", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q") or key == 27:
            filepath = str(int(time.time()))
            df.to_csv(filepath + "data")
            df.to_excel(filepath + "data.xlsx")
            break
        if (datetime.now() - start).seconds >= INTERVAL * 60:
            udp_cli.sendto(str(UDP_TOTAL).encode(), ip)
            UDP_TOTAL = 0
            start = datetime.now()
            filepath = str(int(time.time()))
            df.to_csv(filepath + "data")
            df.to_excel(filepath + "data.xlsx")

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #17
0
def mainfunc(condition):
    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 3

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    #vs = FileVideoStream(args["video"]).start()
    #fileStream = True
    vs = VideoStream(src=0).start()
    # vs = VideoStream(usePiCamera=True).start()
    fileStream = False
    time.sleep(1.0)

    i = 0
    min_ear = 100
    max_ear = 0
    ear = 0
    # loop over frames from the video stream
    while (condition):
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # print(shape)
            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1
                # reset the eye frame counter
                COUNTER = 0

            print(COUNTER)
            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        #if key == ord("q"):
        #	break

        if i < 50:
            if ear < min_ear:
                min_ear = ear
            elif ear > max_ear:
                max_ear = ear
        elif i == 50 or key == ord("r"):
            EYE_AR_THRESH = (min_ear + max_ear) / 2
        # print(ear)

        i += 1
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #18
0
def mask_video_simple(input_video_file,
                      output_video_file,
                      MASK_NAME,
                      add_corona_mask=False,
                      show_frames=False,
                      rotate=True,
                      video_stream=False):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        'drive/My Drive/Computer Vision/Project/models/shape_predictor_68_face_landmarks.dat'
    )
    # predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    if video_stream:  # real-time
        vs = VideoStream(src=0).start()
        out = cv2.VideoWriter(output_video_file,
                              cv2.VideoWriter_fourcc(*'mp4v'), 3, (253, 450),
                              True)

        while True:
            if vs.more():
                break

            frame = vs.read()
            frame = resize(frame, width=450)
            frame_c = frame.copy()
            if rotate:
                frame_c = cv2.rotate(frame_c, cv2.ROTATE_90_COUNTERCLOCKWISE)

            frame_c = mask_image(frame_c,
                                 MASK_NAME,
                                 add_corona_mask=add_corona_mask)
            result_frames.append(frame_c)
            frame_c = cv2.cvtColor(frame_c, cv2.COLOR_BGR2RGB)

            if show_frames:
                plt.figure(figsize=(8, 8))
                plt.imshow(frame_c)
                plt.show()

            # out.write(frame)

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break

            cv2.destroyAllWindows()
            vs.stop()
            out.release()

    else:  # from video
        vidcap = VidCap(input_video_file)
        frames = vidcap.extract_frames(frame_rate=0.5)  # list of frames

        result_frames = []

        for frame in frames:
            frame_c = frame.copy()
            if rotate:
                frame_c = cv2.rotate(frame_c, cv2.ROTATE_90_COUNTERCLOCKWISE)

            frame_c = mask_image(frame_c,
                                 MASK_NAME,
                                 add_corona_mask=add_corona_mask)
            result_frames.append(frame_c)
            frame_c = cv2.cvtColor(frame_c, cv2.COLOR_BGR2RGB)

            if show_frames:
                plt.figure(figsize=(8, 8))
                plt.imshow(frame_c)
                plt.show()

    # save frames to video
    height, width, _ = result_frames[0].shape
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    video = cv2.VideoWriter(output_video_file, fourcc, 3,
                            (width, height))  # the 3rd param is changable

    for img in result_frames:
        video.write(img)

    cv2.destroyAllWindows()
    video.release()
def main():
    args = vars(ap.parse_args())

    # create frame counter
    fps_counter = FPSCounter()

    # total number of blinks
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        vs.stream.set(cv2.CAP_PROP_FPS, 15)
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True
        fps = vs.stream.get(cv2.CAP_PROP_FPS)

    # create dataloggers
    datalogger = DataLogger(columns=['ear', 'adr'])

    # blink detector
    blink_detector = BlinkDetector(time_window=5,
                                   plot=args['graph'],
                                   frame_delay=10)

    # loop over frames from the video stream
    frame_cnt = 0
    INIT_TIME = None
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # get timestamp
        if fileStream:
            timestamp = frame_cnt / fps
        else:
            if INIT_TIME is None:
                INIT_TIME = time.time()
            timestamp = time.time() - INIT_TIME
            fps = fps_counter.tick()

        # get the new frame
        frame = vs.read()
        frame_cnt += 1
        if frame is None:
            break

        frame = imutils.resize(frame, width=450)
        # it, and convert it to grayscale channels)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # compute the area-over-distance metric
            adr = AreaDistanceRatio.compute(leftEye, rightEye)
            # log ADR
            datalogger.log(adr, 'adr', timestamp)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0
            # log EAR
            datalogger.log(ear, 'ear', timestamp)

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # send new data to blink detector and check if it detected new blinks
            blink_detector.send(adr, timestamp)
            blink = blink_detector.get_blink()
            if blink is not None:
                blink_time, blink_dur = blink
                TOTAL += 1
                print(f"[BLINK] time: {blink_time:.2f}  dur: {blink_dur:.2f}")

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "ADR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "FPS: {:.2f}".format(fps), (300, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # save datafile
    output_file = args['output_file']
    if output_file == 'ask':
        output_file = input("Enter filename to save: ")
    if output_file is not None:
        datalogger.save(output_file)

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #20
0
fps = FPS().start()

while True:
    frame = vs.read()
    # frame = imutils.resize(frame, width=400)

    cv2.imshow("Frame", frame)

    if args.get("video", None) is not None:
        time.sleep(0.05)

    key = cv2.waitKey(1) & 0xFF
    if key == ord("q"):
        break

    if args.get("video", None) is not None:
        key = cv2.waitKey(1)
        if vs.more() is False:
            break

    fps.update()

# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Example #21
0
print("Starting live video stream...")

vs = VideoStream(src=0).start()

fileStream = False
time.sleep(1.0)
currentCount = 0

mouse = Mouse()
face_cascade = cv2.CascadeClassifier('res/haarcascade_frontalface_default.xml')

while True:

    # if this is a file video stream, then we need to check if
    # there any more frames left in the buffer to process
    if fileStream and not vs.more():
        break

    # grab the frame from the threaded video file stream, resize
    # it, and convert it to grayscale
    # channels)
    frame = vs.read()
    frame = imutils.resize(frame, width=450)

    height, width, c = frame.shape

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.circle(frame, ((int)(width/2),(int)(height/2)), 4, (0,0,255), 2)
    cv2.circle(frame, ((int)(width/2),(int)(height/2)), 20, (128,0,128), 2)
    face = face_cascade.detectMultiScale(gray, 1.15)
Example #22
0
def start_blink_detection():
    global COUNTER
    global TOTAL
    rest_timer = InfiniteTimer(float(30), send_rest_notification)
    rest_timer.start()
    blink_timer = InfiniteTimer(
        float(get_timeout()) * 60, send_blink_notification,
        "Blink!! {}".format(TOTAL))
    blink_timer.start()
    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        r"data/shape_predictor_68_face_landmarks.dat")
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    # start the video stream thread
    print("[INFO] starting video stream thread...")
    # vs = FileVideoStream(args["video"]).start()
    # fileStream = True
    vs = VideoStream(src=0).start()
    # vs = VideoStream(usePiCamera=True).start()
    fileStream = False
    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            if rest_timer.is_running:
                rest_timer.cancel()
            rest_timer.start()
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1
            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1
                    if blink_timer.is_running:
                        blink_timer.cancel()
                    blink_timer.start()

                # reset the eye frame counter
                COUNTER = 0

                # draw the total number of blinks on the frame along with
                # the computed eye aspect ratio for the frame
                cv2.putText(frame, f"EAR: {ear:.2f}", (300, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("AI_Svasthya", frame)
        key = cv2.waitKey(1)
        if key == 27:
            break
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    # do a bit of cleanup
    if rest_timer.is_running:
        rest_timer.cancel()
    if blink_timer.is_running:
        blink_timer.cancel()
    cv2.destroyAllWindows()
    vs.stop()
Example #23
0
def main():
    with open(os.path.join(thresh_disk_dir, "threshold.txt"), "r") as f:
        threshold = float(f.read())
        print("Set the threshold parameter to: " + str(threshold))
        f.close()
    global status
    global stopButtonPressed
    args = vars(ap.parse_args())
    EYE_AR_THRESH = threshold
    EYE_AR_CONSEC_FRAMES = args['frames']

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0
    start_time = time.time()

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    status = "Loading facial landmark predictor..."
    print(status)
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    status = "Starting video stream thread..."
    print(status)
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)
    temp = []
    ear_temp = []
    status = "Data Collection Running!"
    print(status)

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                    temp_time = time.time() - start_time
                    temp.append(temp_time)
                    ear_temp.append(ear)

                # reset the eye frame counter
                COUNTER = 0

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        if showCamera:
            # show the frame
            cv2.imshow("Frame", frame)
            # print("eye thresh = " + str(EYE_AR_THRESH))
            key = cv2.waitKey(1) & 0xFF

        if stopButtonPressed:
            break

    # end of while loop

    end_time = time.time()
    print(TOTAL)
    print('time cost', end_time - start_time, 's')
    print(temp)
    currentDateTime = datetime.datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
    with open(os.path.join(disk_dir,
                           str(currentDateTime) + '.csv'),
              'w',
              newline='') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=',',
                                quoting=csv.QUOTE_MINIMAL)
        spamwriter.writerow(temp)
        spamwriter.writerow(ear_temp)
    DropboxSerializer(dbx).upload_file(
        os.path.join(disk_dir,
                     str(currentDateTime) + '.csv'),
        '/Collected Data/' + str(currentDateTime) + '.csv')
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
    status = "Data Collection Stopped."
    print(status)
def recognise():
    def eye_aspect_ratio(eye):
        # compute the euclidean distances between the two sets of
        # vertical eye landmarks (x, y)-coordinates
        A = dist.euclidean(eye[1], eye[5])
        B = dist.euclidean(eye[2], eye[4])

        # compute the euclidean distance between the horizontal
        # eye landmark (x, y)-coordinates
        C = dist.euclidean(eye[0], eye[3])

        # compute the eye aspect ratio
        ear = (A + B) / (2.0 * C)

        # return the eye aspect ratio
        return ear

    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold
    EYE_AR_THRESH = 0.25
    EYE_AR_CONSEC_FRAMES = 3

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    #flag used to identify if the person is live on camera
    FLAG = False

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor_path = "shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(predictor_path)

    recognizer = cv2.face.LBPHFaceRecognizer_create()
    recognizer.read('trainer/trainer.yml')

    #importing the json file to match the data with the faces
    names = ['None']

    with open('clients.txt') as json_file:
        data = json.load(json_file)
        for p in data['people']:
            id = p['id']
            name = p['username']
            names.insert(id, name)

    font = cv2.FONT_HERSHEY_SIMPLEX

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")

    #in case we need to test a video file for blink
    # vs = FileVideoStream(videofile).start()
    # fileStream = True
    vs = VideoStream(src=0).start()
    fileStream = False
    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        faces = detector(gray, 0)

        # loop over the face detections
        for face in faces:

            # compute the bounding box of the face and draw it on the
            # frame
            (x, y, w, h) = face_utils.rect_to_bb(face)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)

            id, confidence = recognizer.predict(
                gray[y:y + h, x:x + w])  # gray portion of the face

            #confidence index will return 0 if perfect match

            if (confidence < 100):
                name = names[id]
                confidence = "  {0}%".format(round(100 - confidence))
            else:
                name = "unknown"
                confidence = "  {0}%".format(abs(round(100 - confidence)))

            cv2.putText(frame, str(name), (x + 5, y - 5), font, 1,
                        (255, 255, 255), 2)

            cv2.putText(frame, str(confidence), (x + 5, y + h - 5), font, 1,
                        (255, 255, 0), 1)
            cv2.putText(frame, "PRESS 'ESC' TO EXIT", (10, 320), font, 1,
                        (0, 255, 0), 1)

            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, face)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                # reset the eye frame counter
                COUNTER = 0

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            if FLAG:
                cv2.putText(frame, "Real", (x + 7, y + h + 20), font, 1,
                            (255, 255, 255), 1)
            else:
                cv2.putText(frame, "Fake", (x + 7, y + h + 20), font, 1,
                            (255, 255, 255), 1)
                cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(10) & 0xFF

        # if the `ESC` key was pressed, break from the loop
        if TOTAL > 3:
            FLAG = True

        if key == 27:
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
    return FLAG


# recognise()
Example #25
0
def threaded_function2():
    m.FAILSAFE = False
    #taking size of screen
    (scrx,scry)=m.size()

    mLocOld = np.array([0,0])
    mouseLoc = np.array([0,0])
    DampingFactor = 15
    def calculateView(x,y):
        xvMax, yvMax = m.size()
        xvMin, yvMin = 0, 0
        xwMax, xwMin = 370, 270
        ywMax, ywMin = 290, 200
        sx = (xvMax - 0) // (xwMax - xwMin)
        sy = (yvMax - 0) // (ywMax - ywMin)
        xv = xvMin + (x - xwMin) * sx
        yv = yvMin + (y - ywMin) * sy
        return xv,yv
    def eye_aspect_ratio(eye):
       # compute the euclidean distances between the two sets of
       # vertical eye landmarks (x, y)-coordinates
       A = dist.euclidean(eye[1], eye[5])
       B = dist.euclidean(eye[2], eye[4])

       # compute the euclidean distance between the horizontal
       # eye landmark (x, y)-coordinates
       C = dist.euclidean(eye[0], eye[3])

       # compute the eye aspect ratio
       ear = (A + B) / (2.0 * C)

       # return the eye aspect ratio
       return ear
     
    # construct the argument parse and parse the arguments
    PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
    EYE_AR_THRESH = .15
    EYE_AR_CONSEC_FRAMES = 7
    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0
    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(PREDICTOR_PATH)


    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (nStart, nEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"]

    # start the video stream thread
    print("[INFO] camera sensor warming up...")
    ##vs = FileVideoStream(args["video"]).start()
    fileStream = True
    ###여기 아랫줄 주석 제거함
    vs = VideoStream(src=0).start()
    # vs = VideoStream(usePiCamera=True).start()
    fileStream = False
    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
       # if this is a file video stream, then we need to check if
       # there any more frames left in the buffer to process
       if fileStream and not vs.more():
          break

       # grab the frame from the threaded video file stream, resize
       # it, and convert it to grayscale
       # channels)
       frame = vs.read()
       frame = imutils.resize(frame, width=450)
       gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

       # detect faces in the grayscale frame
       rects = detector(gray, 0)

       # loop over the face detections
       for rect in rects:
          # determine the facial landmarks for the face region, then
          # convert the facial landmark (x, y)-coordinates to a NumPy
          # array
          shape = predictor(gray, rect)
          shape = face_utils.shape_to_np(shape)

          # extract the left and right eye coordinates, then use the
          # coordinates to compute the eye aspect ratio for both eyes
          global rightEAR
          leftEye = shape[lStart:lEnd]
          rightEye = shape[rStart:rEnd]
          nose = shape[nStart:nEnd]
          leftEAR = eye_aspect_ratio(leftEye)
          rightEAR = eye_aspect_ratio(rightEye)
          

          # average the eye aspect ratio together for both eyes
          ear = (leftEAR + rightEAR) / 2.0
          xv, yv = nose[0]
        
          xw = np.int(xv)
          yw = np.int(yv)
          print(type(xv))
          xv,yv = calculateView(xw,yw)
          # compute the convex hull for the left and right eye, then
          # visualize each of the eyes
          leftEyeHull = cv2.convexHull(leftEye)
          rightEyeHull = cv2.convexHull(rightEye)
          cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
          cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            #cv2.drawContours(frame, [noseHull], -1, (0, 255, 0), 1)
          if rightEAR < .15:
    ##          mouse.click(Button.left,1 )
             m.click(clicks = 1, button = 'left', pause = 1 )
                
    ##        if leftEAR < .15:
    ##            m.click(mouseLoc[0],mouseLoc[1],clicks = 1, button = 'right', pause = 0)
             mLocOld = mouseLoc

          # check to see if the eye aspect ratio is below the blink
          # threshold, and if so, increment the blink frame counter
          if ear < EYE_AR_THRESH:
             COUNTER += 1

          # otherwise, the eye aspect ratio is not below the blink
          # threshold
          else:
             # if the eyes were closed for a sufficient number of
             # then increment the total number of blinks
             if COUNTER >= EYE_AR_CONSEC_FRAMES:
                TOTAL += 1

             # reset the eye frame counter
             COUNTER = 0

          # draw the total number of blinks on the frame along with
          # the computed eye aspect ratio for the frame
          cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
             cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
          cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
             cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

       cv2.imshow("Frame", frame)
       key = cv2.waitKey(1) & 0xFF
     
        # if the `q` or 'esc' key was pressed, break from the loop
       if key == ord("q"):
           break
       elif key == 27:
           break
     
    cv2.destroyAllWindows()
def main():
    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']

    # Initializa en cero los parpadeos
    COUNTER = 0
    TOTAL = 0

    # Download the file if it does not exist
    filename = 'shape_predictor_68_face_landmarks.dat'

    if not os.path.isfile(filename):

        url = 'https://github.com/davisking/dlib-models/blob/master/shape_predictor_68_face_landmarks.dat.bz2?raw=true'

        # Download the file from `url` and save it locally under `file_name`:
        with urllib.request.urlopen(url) as response, open(
                filename + '.bz2', 'wb') as out_file:
            shutil.copyfileobj(response, out_file)

        zipfile = bz2.BZ2File(filename + '.bz2')  # open the file
        data = zipfile.read()  # get the decompressed data
        open(filename, 'wb').write(data)  # write a uncompressed file

    # Inicializa el predictor de rostros con la libreria dlib's face detector (HOG-based) y luego usa ==>
    # el predictor de puntos de referencia del rostro
    print("[INFO] cargando el predictor de puntos de referencia del rostro...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # Se seleccionan los indexes de los puntos de referencia del ojo derecho e izquierdo
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # Inicializa el video
    print("[INFO] inicia video stream...")
    print("[INFO] presione s para salir...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)

    # ciclo de procesado ppal
    # Si se trata de un archivo de flujo de vídeo, entonces tenemos que comprobar si
    # Hay más cuadros dejados en el búfer para procesar

    # Parte 2 del codigo:
    while True:
        if fileStream and not vs.more():
            break

        # Agarrar el marco de la secuencia de archivo de vídeo  cambiar el tamaño
        # y convertirlo a escala de grises
        frame = vs.read()
        frame = imutils.resize(frame, width=550)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detecta caras en la imagen en escala de grises
        rects = detector(gray, 0)
        # Parte 3 del codigo:
        # ciclo sobre las detecciones de la cara
        for rect in rects:
            # Determina las marcas faciales para la región de la cara, luego
            # Convierte el punto de referencia facial (x, y) a coordenada y se genera un array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # Extrae las coordenadas de los ojos izquierdo y derecho y calcula
            # la relación de aspecto (AR) del ojo para ambos ojos
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # Calcula la media AR (Average Ratio) Para ambos ojos
            ear = (leftEAR + rightEAR) / 2.0

            # hace la convex hull para los dos ojos
            # Se dibujan ambos ojos
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # Compruebe si la relación de aspecto del ojo está por debajo del parpadeo
            # Y si es así se incrementa el contador del marco intermitente
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # De lo contrario, la relación de aspecto del ojo no está por debajo del limite de parpadeo
            else:
                # Si los ojos estaban cerrados por un número suficiente de
                # Luego incrementar el número total de parpadeos
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                # Resetea el contador
                COUNTER = 0
            # Parte 4 del codigo:
            # Dibuja el número total de destellos en el marco junto con
            # la relación de aspecto calculada del ojo para el marco
            cv2.putText(frame, "Parpadeos: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # Se muestra el frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # Parte 5 del codigo:
        #  si pulsa s se sale de la app y se rompe el ciclo
        if key == ord("s"):
            break

    # Se limpia el entorno
    cv2.destroyAllWindows()
    vs.stop()
Example #27
0
class LieDetector:
    def __init__(self, algorithm):
        self.algorithm = algorithm
        self.initialize()
        self.frame_counter = 0
        self.blink_detector = BlinkDetector.BlinkDetector()
        self.pursed_lips_detector = PursedLipsDetector.PursedLipsDetector()
        self.blushing_detector = BlushingDetector.BlushingDetector()
        self.person = Person.Person()
        self.questions_counter = 1
        self.seconds = 0

    def initialize(self):
        # initialize dlib's face detector (HOG-based) and then create
        # the facial landmark predictor
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH)

        # self.video_stream = FileVideoStream(FILE_VIDEO_STREAM_PATH).start()
        # self.file_stream = True
        self.video_stream = VideoStream(src=0).start()
        self.file_stream = False
        time.sleep(1.0)

    def process(self):

        timeBefore = time.time()
        # loop over frames from the video stream
        while True:

            # if this is a file video stream, check if there are any more frames left in the buffer to process
            if self.file_stream and not self.video_stream.more():
                break

            # get the frame from the threaded video file stream, resize it, and convert it to grayscale
            frame = self.video_stream.read()
            frame = imutils.resize(frame, width=800)
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            self.frame_counter += 1

            # detect faces in the grayscale frame
            faces = self.detector(gray_frame, 0)

            if faces and faces[0]:
                face = faces[0]
                # determine the facial landmarks for the face region, then
                # convert the facial landmark (x, y)-coordinates to a NumPy array
                face_region = self.predictor(gray_frame, face)
                face_region = face_utils.shape_to_np(face_region)

                # inspect face and calculate average values of interest
                if self.frame_counter < NUMBER_OF_FRAMES_TO_INSPECT:

                    if self.frame_counter < NUMBER_OF_FRAMES_TO_INSPECT_EYES:
                        # calculate average eye and lips aspect ratio through the first couple of frames
                        self.calculate_eye_aspect_ratio(face_region)
                        self.calculate_lips_aspect_ratio(face_region)

                    elif self.frame_counter < NUMBER_OF_FRAMES_TO_INSPECT_EYES + 3:
                        # calculate eye and lips aspect ratio threshold value
                        # depending on which blink detector will detect blinks
                        self.blink_detector.calculate_eye_aspect_ratio_threshold(
                            self.person.eye_aspect_ratio)
                        self.pursed_lips_detector.calculate_lips_aspect_ratio_threshold(
                            self.person.lips_aspect_ratio)
                    else:
                        # calculate average number of blinks and lip pursing
                        self.blink_detector.detect(frame, face_region)
                        self.pursed_lips_detector.detect(frame, face_region)

                    # calculate average cheek color
                    self.calculate_average_cheek_color(frame, gray_frame,
                                                       face_region)

                elif self.frame_counter == NUMBER_OF_FRAMES_TO_INSPECT:
                    print("SET AVERAGE VALUES")
                    # set values of interest to the respective detectors
                    self.blushing_detector.set_average_cheek_color(
                        self.person.average_cheek_color)

                    now = time.time()
                    # set average number of blinks and lip pursing to the person
                    self.person.set_average_number_of_blinks(
                        self.blink_detector.get_and_reset_number_of_blinks(),
                        now - timeBefore)
                    self.person.set_average_number_of_lip_pursing(
                        self.pursed_lips_detector.
                        get_and_reset_number_of_lip_pursing())
                    print(self.person.average_cheek_color)
                    print(self.person.average_number_of_blinks)
                    print(self.person.average_number_of_lip_pursing)

                # detect blinks, lip pursing and blushing
                else:
                    self.blink_detector.detect(frame, face_region)
                    self.pursed_lips_detector.detect(frame, face_region)
                    self.blushing_detector.detect(frame, gray_frame,
                                                  face_region)

                cv2.putText(
                    frame,
                    "A_EAR: {:.4f}".format(self.person.eye_aspect_ratio),
                    (200, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                cv2.putText(
                    frame,
                    "A_LAR: {:.4f}".format(self.person.lips_aspect_ratio),
                    (500, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # show the frame
            cv2.imshow("Lie detector", frame)

            key = cv2.waitKey(1) & 0xFF

            if key == ord("x"):
                break

            elif key == ord("n"):

                # calculate number of seconds
                now = time.time()
                self.seconds = now - timeBefore
                self.detect_if_lie()

                self.questions_counter += 1
                timeBefore = time.time()

        now = time.time()
        self.seconds = now - timeBefore
        self.detect_if_lie()

    def calculate_average_cheek_color(self, frame, gray_frame, face_region):
        left_cheek = face_region[self.blushing_detector.left_cheek_idx]
        right_cheek = face_region[self.blushing_detector.right_cheek_idx]
        calculated_cheek_color = self.blushing_detector.calculate_cheeks_color(
            frame, gray_frame, right_cheek, left_cheek)
        self.person.calculate_average_color(calculated_cheek_color)

    def calculate_eye_aspect_ratio(self, face_region):
        EAR, left_eye, right_eye = self.blink_detector.calculate_eye_aspect_ratio(
            face_region)
        self.person.calculate_average_eye_aspect_ratio(EAR)

    def calculate_lips_aspect_ratio(self, face_region):
        LAR, mouth = self.pursed_lips_detector.lips_aspect_ratio(
            face_region, consider_smile=False)
        self.person.calculate_average_lips_aspect_ratio(LAR)

    def detect_if_lie(self):
        # get detected features
        number_of_blinks = self.blink_detector.get_and_reset_number_of_blinks()
        number_of_blushing_occurred = self.blushing_detector.get_number_of_blushing_occurred_and_reset(
        )
        number_of_lip_pursing_occurred = self.pursed_lips_detector.get_and_reset_number_of_lip_pursing(
        )

        if self.seconds > 0:
            number_of_blinks_per_second = number_of_blinks / self.seconds
        else:
            number_of_blinks_per_second = number_of_blinks

        to_predict = [
            self.person.average_number_of_blinks, number_of_blinks_per_second,
            number_of_lip_pursing_occurred, number_of_blushing_occurred
        ]

        if self.algorithm == "knn":
            prediction = kNN.predict([to_predict], DATASET_PATH)

        else:
            prediction = feedforward_nn.predict([to_predict])
            print(prediction)

        prediction = np.round(prediction[0])
        if prediction == 1:
            prediction = "truth"
        else:
            prediction = "lie"

        self.write_to_file(number_of_blinks, number_of_blushing_occurred,
                           number_of_lip_pursing_occurred,
                           number_of_blinks_per_second, prediction)

        # reset seconds counter
        self.seconds = 0

    def write_to_file(self, number_of_blinks, number_of_blushing_occurred,
                      number_of_lip_pursing_occurred,
                      number_of_blinks_per_second, prediction):
        # write report
        file = open("test.txt", "a")
        if self.questions_counter == 1:
            file.write(
                "\n\n******************************************************\n")
            file.write("Person averaged")
            file.write("\n\tblinks: " +
                       str(self.person.average_number_of_blinks))
            file.write("\n\tnumber of blinks per second: " +
                       str(number_of_blinks_per_second))
            file.write("\n\tEAR: " + str(self.person.eye_aspect_ratio))
            file.write("\n\tLAR: " + str(self.person.lips_aspect_ratio))
            file.write("\n\tlip pursing: " +
                       str(number_of_lip_pursing_occurred))
            file.write("\n\tcheek color: " +
                       "{:0.0f}".format(self.person.average_cheek_color[2]) +
                       ", " +
                       "{:0.0f}".format(self.person.average_cheek_color[1]) +
                       ", " +
                       "{:0.0f}".format(self.person.average_cheek_color[0]))

        file.write("\n\n" + str(self.questions_counter) + ". Detected:")
        file.write("\n\tblinks detected: " + str(number_of_blinks))
        file.write("\n\tnumber of blinks per second: " +
                   str(number_of_blinks_per_second))
        file.write("\n\tnumber of blushing occurred:  " +
                   str(number_of_blushing_occurred))
        file.write("\n\tnumber of pursing occurred:  " +
                   str(number_of_lip_pursing_occurred))
        file.write("\n\n\tPredicted:  " + prediction)

        file.close()

    def destroy(self):
        cv2.destroyAllWindows()
        self.video_stream.stop()
Example #28
0
def main():
    args = {
        'shape_predictor': "shape_predictor_68_face_landmarks.dat",
        'video': 'camera',
        'threshold': 0.27,
        'frames': 2
    }

    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']

    COUNTER = 0
    TOTAL = 0

    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    print("[INFO] starting video stream thread...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)
    fps = FPS().start()
    closedStateFps = FPS().start()
    while True:
        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=550)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        rects = detector(gray, 0)

        for rect in rects:
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            else:
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1
                    COUNTER = 0

            f = FPS().start()

            if ear < EYE_AR_THRESH:
                closedStateFps.update()
            else:
                closedStateFps = FPS().start()

            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        closedStateFps.stop()
        if closedStateFps.elapsed() > 2.0:
            cv2.putText(frame, "ALARM: Eyes are closed", (200, 200),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        fps.update()

        if key == ord("q"):
            break
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Example #29
0
def main():
	# import the necessary packages
	from scipy.spatial import distance as dist
	from imutils.video import FileVideoStream
	from imutils.video import VideoStream
	from imutils import face_utils
	from threading import Thread
	import playsound
	import numpy as np
	import argparse
	import imutils
	import time
	import dlib
	import cv2

	def sound_alarm(path):
		# play an alarm sound
		playsound.playsound(path)

	def eye_aspect_ratio(eye):
		# compute the euclidean distances between the two sets of
		# vertical eye landmarks (x, y)-coordinates
		A = dist.euclidean(eye[1], eye[5])
		B = dist.euclidean(eye[2], eye[4])

		# compute the euclidean distance between the horizontal
		# eye landmark (x, y)-coordinates
		C = dist.euclidean(eye[0], eye[3])

		# compute the eye aspect ratio
		ear = (A + B) / (2.0 * C)

		# return the eye aspect ratio
		return ear

	# construct the argument parse and parse the arguments
	# ap = argparse.ArgumentParser()
	# ap.add_argument("-p", "--shape-predictor", required=True,
	# 	help="path to facial landmark predictor")
	# ap.add_argument("-a", "--alarm", type=str, default="",
	# 	help="path alarm .WAV file")
	# ap.add_argument("-w", "--webcam", type=int, default=0,
	# 	help="index of webcam on system")
	# args = vars(ap.parse_args())
	# print (args)

	# define two constants, one for the eye aspect ratio to indicate
	# blink and then a second constant for the number of consecutive
	# frames the eye must be below the threshold
	EYE_AR_THRESH = 0.3
	EYE_AR_CONSEC_FRAMES = 3

	# initialize the frame counters and the total number of blinks
	COUNTER = 0
	ALARM_ON = False

	# initialize dlib's face detector (HOG-based) and then create
	# the facial landmark predictor
	print("[INFO] loading facial landmark predictor...")
	detector = dlib.get_frontal_face_detector()
	predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

	# grab the indexes of the facial landmarks for the left and
	# right eye, respectively
	(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
	(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

	# start the video stream thread
	print("[INFO] starting video stream thread...")
	# vs = FileVideoStream(args["video"]).start()
	fileStream = True
	vs = VideoStream(src=0).start()
	# vs = VideoStream(usePiCamera=True).start()
	fileStream = False
	time.sleep(1.0)

	# loop over frames from the video stream
	while True:
		# if this is a file video stream, then we need to check if
		# there any more frames left in the buffer to process
		if fileStream and not vs.more():
			break

		# grab the frame from the threaded video file stream, resize
		# it, and convert it to grayscale
		# channels)
		frame = vs.read()
		frame = imutils.resize(frame, width=450)
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

		# detect faces in the grayscale frame
		rects = detector(gray, 0)

	    	# loop over the face detections
		for rect in rects:
			# determine the facial landmarks for the face region, then
			# convert the facial landmark (x, y)-coordinates to a NumPy
			# array
			shape = predictor(gray, rect)
			shape = face_utils.shape_to_np(shape)

			# extract the left and right eye coordinates, then use the
			# coordinates to compute the eye aspect ratio for both eyes
			leftEye = shape[lStart:lEnd]
			rightEye = shape[rStart:rEnd]
			leftEAR = eye_aspect_ratio(leftEye)
			rightEAR = eye_aspect_ratio(rightEye)

			# average the eye aspect ratio together for both eyes
			ear = (leftEAR + rightEAR) / 2.0

	        # compute the convex hull for the left and right eye, then
			# visualize each of the eyes
			leftEyeHull = cv2.convexHull(leftEye)
			rightEyeHull = cv2.convexHull(rightEye)
			cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
			cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

	        # check to see if the eye aspect ratio is below the blink
			# threshold, and if so, increment the blink frame counter
			if (ear < EYE_AR_THRESH):
				COUNTER += 1

				# if the eyes were closed for a sufficient number of
				# then sound the alarm
				if COUNTER >= EYE_AR_CONSEC_FRAMES:
					# if the alarm is not on, turn it on
					if not ALARM_ON:
						ALARM_ON = True

						# check to see if an alarm file was supplied,
						# and if so, start a thread to have the alarm
						# sound played in the background
						if "alarm.wav" != "":
							t = Thread(target=sound_alarm,
								args=("alarm.wav",))
							t.deamon = True
							t.start()

					# draw an alarm on the frame
					cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
						cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


			# otherwise, the eye aspect ratio is not below the blink
			# threshold
			else:

				# reset the eye frame counter
				COUNTER = 0
				ALARM_ON = False

			# draw the computed eye aspect ratio on the frame to help
			# with debugging and setting the correct eye aspect ratio
			# thresholds and frame counters
			cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
				cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


		# show the frame
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF

		# if the `q` key was pressed, break from the loop
		if (key == ord("q")):
			break

	# do a bit of cleanup
	cv2.destroyAllWindows()
	vs.stop()
print("[INFO] starting video stream thread...")

#vs = FileVideoStream(args["video"]).start()
fileStream = True
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
fileStream = False
time.sleep(1.0)

# loop over frames from the video stream
startTime = datetime.datetime.now()

while True:
    # if this is a file video stream, then we need to check if
    # there any more frames left in the buffer to process
    if fileStream and not vs.more():
        break

    # grab the frame from the threaded video file stream, resize
    # it, and convert it to grayscale
    # channels)
    frame = vs.read()
    frame = imutils.resize(frame, width=450)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # loop over the face detections
    for rect in rects:
        # determine the facial landmarks for the face region, then
Example #31
0
 def run(self):
     ap = argparse.ArgumentParser()
     ap.add_argument("-p", "--shape-predictor", default='shape_predictor_68_face_landmarks.dat')
     args = vars(ap.parse_args())
     EYE_AR_THRESH = 0.2
     EYE_AR_CONSEC_FRAMES = 2
     COUNTER = 0
     TOTAL = 0

     print("[INFO] loading facial landmark predictor...")
     detector = dlib.get_frontal_face_detector()
     predictor = dlib.shape_predictor(args["shape_predictor"])

     (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
     (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
     (uStart, dEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"]

     vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
     fileStream = False

     #fourcc = cv2.VideoWriter_fourcc(*'DIVX')
     #out = cv2.VideoWriter('output.avi', fourcc, 2.6, (1600, 1000))
     time.sleep(1.0)
     fps = FPS().start()
# loop over frames from the video stream
     while True:
         if fileStream and not vs.more():
             break

         frame = vs.read()
         frame = imutils.resize(frame, width=1800,height=850)
         gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
         rects = detector(gray, 0)
         for rect in rects:
             shape = predictor(gray, rect)
             points = face_utils.shape_to_np(shape)
             leftEye = points[42:48]  # 取出左眼对应的特征点
             rightEye = points[36:42]  # 取出右眼对应的特征点
             leftEAR = self.eye_aspect_ratio(leftEye)
             rightEAR = self.eye_aspect_ratio(rightEye)
             ear = (leftEAR + rightEAR) / 2.0
             x = leftEye[0]
             x1 = 1800 - x[0]
             pyautogui.moveTo(x1, x[1])
             leftEyeHull = cv2.convexHull(leftEye)
             rightEyeHull = cv2.convexHull(rightEye)
             cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
             cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
             if ear < EYE_AR_THRESH:
                 COUNTER += 1
                 win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
                 win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)

             if COUNTER >= EYE_AR_CONSEC_FRAMES:
                 TOTAL += 1
                 COUNTER = 0

             cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                         cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
             cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                         cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
             #frame1 = cv2.resize(frame, (1600, 1000))
             #out.write(frame1)
         cv2.imshow("Frame", frame)
         key = cv2.waitKey(1) & 0xFF

         if key == ord("q"):
             break

     fps.stop()
     print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
     print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
     vs.stream.release()
     #out.release()
     cv2.destroyAllWindows()