def get_all_landmarks(self, frame):
     gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
     rects = self.detector(gray, 0)
     landmarks = []
     for rect in rects:
         shape = self.predictor(gray, rect)
         shape = face_utils.shape_to_np(shape)
         landmarks.append(shape)
     return landmarks
def main():
    camera = PiCamera()
    camera.resolution = (1024, 768)
    camera.start_preview()
    # Camera warm-up time
    sleep(1)
    camera.capture('foo.jpg', resize=(1024, 768))
    camera.stop_preview()
    camera.close()

    start = time.time()
    img = cv.imread('foo.jpg')
    w = len(img[0])
    h = len(img)
    M = cv.getRotationMatrix2D((w/2, h/2), 270, 1.0)
    img = cv.warpAffine(img, M, (h, w))

    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('/home/pi/Desktop/shape_predictor_68_face_landmarks.dat')
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    rects = detector(gray, 0)
    if (len(rects) == 0):
        return "Eyes: Failed Detection. Try Repositioning the Camera"
    print(rects)
    for rect in rects:
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        clone = img.copy()
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        for (x, y) in leftEye:
            cv.circle(clone, (x, y), 1, (0, 0, 255), -1)
        for (x, y) in rightEye:
            cv.circle(clone, (x, y), 1, (0, 0, 255), -1)
        leftEAR = ear_fn(leftEye)
        rightEAR = ear_fn(rightEye)
        ear = (leftEAR + rightEAR) / 2.0
        if (ear < EAR):
            return ("Closed")
            eyeOpenClose = "Eyes: Closed"
        else:
            return ("Open")
            eyeOpenClose = "Eyes: Open"
        cv.putText(clone, eyeOpenClose, (100, 200), cv.FONT_ITALIC,
        0.7, (0, 0, 255), 2)
        end = time.time()
        cv.imshow("Image", clone)
        cv.waitKey(0)
    print(end - start)
    return 
def detect_parts(image, filename):
	distances = []
	# resize the image, and convert it to grayscale
	image = imutils.resize(image, width=200, height=200)
	
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	# detect faces in the grayscale image
	rects = detector(gray, 1)
	
	# loop over the face detections
	for (i, rect) in enumerate(rects):
		shape = predictor(gray, rect)
		shape = face_utils.shape_to_np(shape)
		distances = euclidean_all(shape)
		# output = face_utils.visualize_facial_landmarks(image, shape)
		# visualize all facial landmarks with a transparent overlay
		# cv2.imshow("Image", output)
		# cv2.waitKey(0)	
	return distances
def main():
    # return
    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        print("Unable to connect to camera.")
        return
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(face_landmark_path)

    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            face_rects = detector(frame, 0)

            if len(face_rects) > 0:
                shape = predictor(frame, face_rects[0])
                shape = face_utils.shape_to_np(shape)

                reprojectdst, euler_angle = get_head_pose(shape)

                for (x, y) in shape:
                    cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)

                for start, end in line_pairs:
                    cv2.line(frame, reprojectdst[start], reprojectdst[end], (0, 0, 255))

                cv2.putText(frame, "X: " + "{:7.2f}".format(euler_angle[0, 0]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 0, 0), thickness=2)
                cv2.putText(frame, "Y: " + "{:7.2f}".format(euler_angle[1, 0]), (20, 50), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 0, 0), thickness=2)
                cv2.putText(frame, "Z: " + "{:7.2f}".format(euler_angle[2, 0]), (20, 80), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 0, 0), thickness=2)

            cv2.imshow("demo", frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Beispiel #5
0
def calculate_distance(reference_clip, compare_clip):
    # construct the `argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-p", "--shape-predictor", required=True, default = "shape_predictor_68_face_landmarks.dat",
    # 	help="path to facial landmark predictor")
    # ap.add_argument("-r", "--picamera", type=int, default=-1,
    # 	help="whether or not the Raspberry Pi camera should be used")
    # args = vars(ap.parse_args())

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    # 얼굴 자체를 인식하는 기능
    detector = dlib.get_frontal_face_detector()
    # face 안에 얼굴 인식하는 기능

    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    # predictor = dlib.shape_predictor(args["shape_predictor"])

    # vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
    # 비디오 출력 : https://076923.github.io/posts/Python-opencv-4/
    # https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html
    # capture = cv2.VideoCapture("cut_2.mp4")
    clips = [reference_clip, compare_clip]

    time.sleep(2.0)

    clips_frame_info = []
    for clip in clips:
        i = 0
        every_frame_info = []
        # loop over the frames from the video stream
        while True:
            # grab the frame from the threaded video stream, resize it to
            # have a maximum width of 400 pixels, and convert it to
            # ret, frame = capture.read() # frame = numpy array임
            frame = clip.get_frame(i * 1.0 / clip.fps)
            i += skip_frame_rate  # 1초에 60 fps가 있으므로 몇개는 skip해도 될거 같음!
            if (i * 1.0 / clip.fps) > clip.duration:
                break
            # if not ret:
            # 	print("Error")
            # 	break

            # width 높이면 더 판별 잘되지만, computational power 높음
            # The benefit of increasing the resolution of the input image prior to face detection is that it may allow us to detect more faces in the imag
            frame = imutils.resize(frame, width=800)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # detect faces in the grayscale frame
            # 얼굴 자체의 위치를 찾음
            rects = detector(gray, 0)

            if len(rects) > 0:
                # 얼굴 개수만큼 loop
                max_width = 0
                max_rect = None
                # 얼굴 박스 제일 큰거 하나로
                for rect in rects:
                    if int(rects[0].width()) > max_width:
                        max_rect = rect
                # determine the facial landmarks for the face region, then
                # convert the facial landmark (x, y)-coordinates to a NumPy array
                shape = predictor(gray, max_rect)
                shape = face_utils.shape_to_np(shape)
                every_frame_info.append(shape)
            else:
                every_frame_info.append([])

        clips_frame_info.append(np.array(every_frame_info))

    cv2.destroyAllWindows()

    min_size = min(len(clips_frame_info[0]), len(clips_frame_info[1]))
    min_diff = float("inf")
    min_idx = 0
    for i in range(min_size):
        if len(clips_frame_info[0][i]) > 0 and len(
                clips_frame_info[1][i]) > 0:  # 얼굴 둘다 있으면
            # 양쪽 눈
            left_eye = ((clips_frame_info[0][i][36][0] -
                         clips_frame_info[1][i][36][0])**2 +
                        (clips_frame_info[0][i][36][1] -
                         clips_frame_info[1][i][36][1])**2)**0.5
            right_eye = ((clips_frame_info[0][i][45][0] -
                          clips_frame_info[1][i][45][0])**2 +
                         (clips_frame_info[0][i][45][1] -
                          clips_frame_info[1][i][45][1])**2)**0.5
            total_diff = left_eye + right_eye
            if min_diff > total_diff:
                min_diff = total_diff
                min_idx = i

    return min_diff, (min_idx *
                      skip_frame_rate) / clip.fps  # 거리와 해당 초 위치를 계산해준다!
frame_check = 20
detect = dlib.get_frontal_face_detector()
predict = dlib.shape_predictor("C:\Users\akshaybahadur21\Documents\GitHub\Drowsiness_Detection|\shape_predictor_68_face_landmarks.dat")# Dat file is the crux of the code

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
cap=cv2.VideoCapture(0)
flag=0
while True:
	ret, frame=cap.read()
	frame = imutils.resize(frame, width=450)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	subjects = detect(gray, 0)
	for subject in subjects:
		shape = predict(gray, subject)
		shape = face_utils.shape_to_np(shape)#converting to NumPy Array
		leftEye = shape[lStart:lEnd]
		rightEye = shape[rStart:rEnd]
		leftEAR = eye_aspect_ratio(leftEye)
		rightEAR = eye_aspect_ratio(rightEye)
		ear = (leftEAR + rightEAR) / 2.0
		leftEyeHull = cv2.convexHull(leftEye)
		rightEyeHull = cv2.convexHull(rightEye)
		cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
		cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
		if ear < thresh:
			flag += 1
			print (flag)
			if flag >= frame_check:
				cv2.putText(frame, "****************ALERT!****************", (10, 30),
					cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
Beispiel #7
0
def detect_landmarks(face, frame, scale_x=0, scale_y=0):
    (x, y, w, h) = (int(e) for e in face)
    rectangle = dlib.rectangle(x, y, x + w, y + h)
    face_landmarks = landmarks_detector(frame, rectangle)
    return face_utils.shape_to_np(face_landmarks)
Beispiel #8
0
def blinkCounter(video):
    directions = []
    TOTAL = 0
    COUNTER = 0
    Total_Frames = 0
    time_for_blink = []
    consecutive_frames = []
    print("starting blink detection on: ", video)
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(data_set)
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    vs = FileVideoStream(video).start()
    fileStream = True
    time.sleep(1.0)
    while in_loop:
        if fileStream and not vs.more():
            return TOTAL, Total_Frames, time_for_blink, directions

        frame = vs.read()
        Total_Frames += 1
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = detector(gray, 0)
        for rect in rects:
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            ear = (leftEAR + rightEAR) / 2.0
            if ear < EYE_AR_THRESH:
                COUNTER += 1
                consecutive_frames.append(Total_Frames)
            else:
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1
                    if len(consecutive_frames) != 0:
                        timeStamp = consecutive_frames[0] / 30
                        time_for_blink.append(int(timeStamp))
                consecutive_frames = []
                COUNTER = 0
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            return TOTAL, Total_Frames, time_for_blink, directions
        if key == ord("7"):
            print("Upper Left")
            save_position("UL", Total_Frames, directions)
        if key == ord("8"):
            print("Up")
            save_position("U", Total_Frames, directions)
        if key == ord("9"):
            print("Upper Right")
            save_position("UR", Total_Frames, directions)
        if key == ord("4"):
            print("left")
            save_position("L", Total_Frames, directions)
        if key == ord("5"):
            print("middle")
            save_position("M", Total_Frames, directions)
        if key == ord("6"):
            print("right")
            save_position("R", Total_Frames, directions)
        if key == ord("1"):
            print("down left")
            save_position("DL", Total_Frames, directions)
        if key == ord("2"):
            print("down")
            save_position("D", Total_Frames, directions)
        if key == ord("3"):
            print("down right")
            save_position("DR", Total_Frames, directions)

    cv2.destroyAllWindows()
    vs.stop()
def auto_capturing(auto_path, man_path, mar_threshold, ear_threshold,
                   frame_waiter, verbose):
    """
    Creates a cv2 window which opens a web camera and depending on conditions (eyes are open and smile)
    take a snapshot, also available manual shots.

    :param auto_path: str :                 Directory name where automatically captured photos will be stored
    :param man_path: str :                  Directory name where manual captured photos will be stored
    :param mar_threshold:  (float, flaot) : Mouth Aspect Ratio threshold, (min, max):
                                            to control smiling threshold, default numbers are computed
                                            from trial and error, and may differ on
                                            different mouth shapes. min controls smiling
                                            without teeth, max controls smiling with teeth
    :param ear_threshold: float :           Eye Aspect Ratio threshold, if EAR is great or
                                            great or equal this number, it means eyes are open,
                                            and ready to a shot
    :param frame_waiter: float :            Number of frames that camera should wait before taking a snapshot
    :param verbose: bool:                   Show contours around facial features and MAR and EAR values
    :return:                                None
    """
    COUNTER = 0  # frame counter
    TOTAL = 0  # picture counter to update filename after each shot
    try:
        video_stream = VideoStream(src=0).start()
        time.sleep(1.0)
        cv2.namedWindow("auto-capture")

        while True:
            frame = video_stream.read()
            frame = imutils.resize(frame, width=840)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rects = detector(gray, 0)

            for rect in rects:
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)
                # get mouth
                mouth = shape[mouth_start:mouth_end]
                MAR = smile(mouth)

                # get eyes
                left_eye = shape[left_eye_start:left_eye_end]
                right_eye = shape[right_eye_start:right_eye_end]
                left_EAR = blink(left_eye)
                right_EAR = blink(right_eye)
                EAR = (left_EAR + right_EAR) / 2

                # check if eyes are opened and month is smiling
                if (MAR <= mar_threshold[0]
                        or MAR > mar_threshold[1]) and (EAR >= ear_threshold):
                    COUNTER += 1
                else:
                    # wait several frames
                    # this is done to detect if user smiles in a few milliseconds or it was just momentary
                    if COUNTER >= frame_waiter:
                        TOTAL += 1
                        frame = video_stream.read(
                        )  # load the frame where user is ready (smiling)
                        # time.sleep(.05)
                        img_name = f"auto_capture_frame_{TOTAL}.png"
                        cv2.imwrite(auto_path + "/" + img_name, frame)
                        print(f"{img_name} written!")  # console output
                    COUNTER = 0

                if verbose:
                    # connect facial landmark points to make a contour around a landmark
                    mouth_hull = cv2.convexHull(mouth)
                    left_eye_hull = cv2.convexHull(left_eye)
                    right_eye_hull = cv2.convexHull(right_eye)

                    cv2.drawContours(frame, [mouth_hull], -1, (0, 255, 0), 1)
                    cv2.drawContours(frame, [left_eye_hull], -1,
                                     (214, 16, 232), 1)
                    cv2.drawContours(frame, [right_eye_hull], -1,
                                     (214, 16, 232), 1)

                    cv2.putText(frame, f"MAR: {MAR:.4f}", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                    cv2.putText(frame, f"LEFT EAR: {left_EAR:.2f}", (10, 60),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (214, 16, 232),
                                2)
                    cv2.putText(frame, f"RIGHT EAR: {right_EAR:.2f}", (10, 90),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (214, 16, 232),
                                2)

            cv2.imshow("Frame", frame)

            key_pressed = cv2.waitKey(1) & 0xFF

            if key_pressed == ord('q') or key_pressed == 27:
                break
            elif key_pressed == 13 or key_pressed == 32:
                """
                When esc or q pressed take a manual shot, and continue the loop
                """
                frame = video_stream.read()
                time.sleep(.004)
                img_name = f"manual_capture_{TOTAL}.png"
                cv2.imwrite(man_path + "/" + img_name, frame)
                print(f"{img_name} written!")
                TOTAL += 1
                continue

        cv2.destroyAllWindows()
        video_stream.stop()
    except:
        print("Make sure you have a Web Cam and it's connected to device")
Beispiel #10
0
def create_resource(ti=15,
                    v="Hybernation",
                    inac=5,
                    asleep=2,
                    fold="/home/satya/Pictures"):
    # Initialize Pygame and load music
    ft = 0
    pygame.mixer.init()
    pygame.mixer.music.load(r'assets\audio\faded.ogg')
    # Minimum threshold of eye aspect ratio below which alarm is triggerd
    EYE_ASPECT_RATIO_THRESHOLD = 0.3
    # Minimum consecutive frames for which eye ratio is below threshold for alarm to be triggered
    EYE_ASPECT_RATIO_CONSEC_FRAMES = 50
    # COunts no. of consecutuve frames below threshold value
    COUNTER = 0
    b = 1
    flag1 = True
    global inace
    # Load face cascade which will be used to draw a rectangle around detected faces.
    face_cascade = cv2.CascadeClassifier(
        r"assets\haarcascades\haarcascade_frontalface_default.xml")

    # This function calculates and return eye aspect ratio
    def eye_aspect_ratio(eye):
        A = distance.euclidean(eye[1], eye[5])
        B = distance.euclidean(eye[2], eye[4])
        C = distance.euclidean(eye[0], eye[3])
        # EAR = eye_aspect_ratio
        ear = (A + B) / (2 * C)
        return ear

    # Load face detector and predictor, uses dlib shape predictor file
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        r'assets\shape_predictor_68_face_landmarks.dat')
    # Extract indexes of facial landmarks for the left and right eye
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS['left_eye']
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']
    # Start webcam video capture /Give some time for camera to initialize(not required) /time.sleep(2)
    a = 1
    while (True):
        video_capture = cv2.VideoCapture(0)
        nowt = datetime.datetime.now()
        nt1 = nowt.minute
        newt = nowt + datetime.timedelta(minutes=ti)
        nt2 = newt.minute
        print("***********")
        print(nowt, newt)
        print("***********")

        while (True):
            # Read each frame and flip it, and convert to grayscale
            ret, frame = video_capture.read()
            frame = cv2.flip(frame, 1)
            if ret:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                # Detect facial points through detector function
                faces = detector(gray, 0)
                # Detect faces through haarcascade_frontalface_default.xml
                face_rectangle = face_cascade.detectMultiScale(gray, 1.3, 5)
                # Draw rectangle around each face detected
                for (x, y, w, h) in face_rectangle:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)
                # Detect facial points
                # print(type(faces))
                if len(faces) == 0:
                    COUNTER = 0
                    inacs = datetime.datetime.now()
                    # print(inacs)
                    if flag1:
                        inace = inacs + datetime.timedelta(minutes=inac)
                        inace = inace.minute
                        flag1 = False
                    if (inacs.minute > inace and b == 1 and flag1 == False):
                        pygame.mixer.music.play(-1)
                        b = 0
                    print(inacs, inace)
                    if (b == 0 and flag1 == False and inacs.minute >
                        (inace + 1)):
                        flag1 = True
                        b = 1
                        pygame.mixer.music.stop()
                        pic = pyautogui.screenshot()
                        sour = fold + "screenshot.png"
                        pic.save(sour)
                        ft = 1
                for face in faces:
                    if (b != 1):
                        flag1 = True
                        b = 1
                        pygame.mixer.music.stop()
                    shape = predictor(gray, face)
                    shape = face_utils.shape_to_np(shape)
                    # Get array of coordinates of leftEye and rightEye
                    leftEye = shape[lStart:lEnd]
                    rightEye = shape[rStart:rEnd]
                    # Calculate aspect ratio of both eyes
                    leftEyeAspectRatio = eye_aspect_ratio(leftEye)
                    rightEyeAspectRatio = eye_aspect_ratio(rightEye)
                    eyeAspectRatio = (leftEyeAspectRatio +
                                      rightEyeAspectRatio) / 2
                    # Use hull to remove convex contour discrepencies and draw eye shape around eyes
                    leftEyeHull = cv2.convexHull(leftEye)
                    rightEyeHull = cv2.convexHull(rightEye)
                    cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                    cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
                    # Detect if eye aspect ratio is less than threshold
                    # print(eyeAspectRatio)
                    if (eyeAspectRatio < EYE_ASPECT_RATIO_THRESHOLD):
                        COUNTER += 1
                        # If no. of frames is greater than threshold frames,
                        print(COUNTER, EYE_ASPECT_RATIO_CONSEC_FRAMES)
                        if COUNTER >= EYE_ASPECT_RATIO_CONSEC_FRAMES and COUNTER >= asleep and a == 1:
                            pygame.mixer.music.play(-1)
                            a = 0
                            cv2.putText(frame, "You are Drowsy", (150, 200),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                                        (0, 0, 255), 2)
                        if COUNTER >= EYE_ASPECT_RATIO_CONSEC_FRAMES and COUNTER >= (
                                asleep + 20):
                            pic = pyautogui.screenshot()
                            sour = fold + "screenshot.png"
                            pic.save(sour)
                            pygame.mixer.music.stop()
                            ft = 1
                            break
                    else:
                        pygame.mixer.music.stop()
                        a = 1
                        COUNTER = 0
                if ft == 1:
                    break
            cv2.imshow('Video', frame)
            kp = cv2.waitKey(1)
            nt1 = datetime.datetime.now().minute
            if (nt2 < nt1):
                break
            elif (kp & 0xFF == ord('q')):
                return
        if ft == 1:
            video_capture.release()
            cv2.destroyAllWindows()
            pygame.mixer.music.stop()
            break
        else:
            video_capture.release()
            cv2.destroyAllWindows()
            pygame.mixer.music.stop()
            time.sleep(60 * ti)
    if ft == 1:
        time.sleep(10)
        if v == "Hybernation":
            changeStatus.hybernation()
        else:
            changeStatus.shut_down()
def gitsearch():
    # This part contains the main code.

    video_capture = cv2.VideoCapture(
        0
    )  #starts the web cam if you attach it externally use 1 or 2 , use trail and error
    detector = dlib.get_frontal_face_detector()
    predict_path = '/home/shaaran/PycharmProjects/shape_predictor_68_face_landmarks.dat'
    predictor = dlib.shape_predictor(predict_path)
    count = 0
    tfms = tfms_from_model(resnet34,
                           sz,
                           aug_tfms=transforms_side_on,
                           max_zoom=1.1)
    data = ImageClassifierData.from_paths(PATH, tfms=tfms)
    print(data.classes)

    learn = ConvLearner.pretrained(arch, data, precompute=True)
    print('loading requirements......')
    print(
        'This has been made by shaaran alias devshaaran, if you are using this code anywhere for research or educational purposes, please give reference.ENJOY!'
    )
    learn.precompute = False
    #learn.fit(1e-1, 1)
    learn.fit(1e-1, 3, cycle_len=1)
    learn.load('224_all')
    print('loading done !')

    # Initialize some variablesface_locations = []

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face detection processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale image
        rects = detector(gray, 1)

        for (i, rect) in enumerate(rects):
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            for (x, y) in shape:
                cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)

        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(small_frame,
                                                         model="cnn")
        counts = 0
        counts += 1

        # Display the results
        for top, right, bottom, left in face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            lower_red = np.array([0, 0, 253])
            upper_red = np.array([0, 0, 255])

            # Extract the region of the image that contains the face
            face_image = frame[top:bottom, left:right]
            mask = cv2.inRange(face_image, lower_red, upper_red)
            res = cv2.bitwise_and(face_image, face_image, mask=mask)

            #face_landmarks_list = face_recognition.face_landmarks(face_image)

            #for face_landmarks in face_landmarks_list:

            #for facial_feature in facial_features:
            #d.line(face_landmarks[facial_feature], width=5)

            #pil_image.show()

            cv2.imshow('vid', face_image)
            cv2.imshow('res', res)
            count += 1
            cv2.imwrite('0.jpg', res)
            #cv2.imwrite((output_loc + '\\' + str(count)+ str(counts) + '.jpg'), res)

            try:

                # learn = ConvLearner.pretrained(arch, data, precompute=True)
                trn_tfms, val_tfms = tfms_from_model(arch, sz)
                im = val_tfms(open_image('0.jpg'))
                learn.precompute = False
                preds = learn.predict_array(im[None])
                #print(preds)
                print(data.classes[np.argmax(preds)])

            except Exception as e:
                print(e)

        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    video_capture.release()
    cv2.destroyAllWindows()
Beispiel #12
0
import dlib
from imutils import face_utils
import numpy as np
import cv2

video_capture = cv2.VideoCapture(0)

face_detector = dlib.get_frontal_face_detector()
predictor_path = "../DlibData/shape_predictor_68_face_landmarks.dat"
face_predictor = dlib.shape_predictor(predictor_path)

while True:
    ret, img = video_capture.read()
    img = cv2.flip(img, 1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_detector(gray, 1)

    for face in faces:
        landmark = face_predictor(gray, face)
        landmark = face_utils.shape_to_np(landmark)

        for (i, (x, y)) in enumerate(landmark):
            cv2.circle(img, (x, y), 1, (0, 0, 255), -1)

    cv2.imshow("image", img)

    if cv2.waitKey(1) == ord('q'):
        break
    def videoLoop(self):

        try:

            EARG = 0
            MARG = 0

            print("[INFO] Cargando paquete predictor...")
            detector = dlib.get_frontal_face_detector()
            predictor = dlib.shape_predictor(
                "shape_predictor_68_face_landmarks.dat")

            time.sleep(1.0)

            (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
            (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
            (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

            #Variables auxiliares
            cont = 0
            contaux = 0
            SumaE = 0
            contp = 0
            i = 0
            contM = 0
            contMAu = 0
            conpat = 1

            #Variables para tiempos
            t_ojos_cerrados = 0
            t_ini_ojos_cerrados = 0
            con_t_ojos = 0
            con_gene = 0

            contador_general_distraccion = 0

            t_boca_abierta = 0
            t_ini_boca_abierta = 0
            con_t_boca = 0

            t_distraccion = 0
            self.t_global = 0

            self.Facul = 0
            self.Pro = 0
            #Variables para Video

            Cont_video = 1

            self.Estado.set("ESPERANDO...")
            self.Facultad.set("ESPERANDO...")
            self.Tema.set("ESPERANDO...")

            nombre = eg.enterbox(msg='Ingrese su nombre COMPLETO:',
                                 title='Nombre del Usuario',
                                 strip=True,
                                 image=None)
            id = eg.enterbox(msg='Ingrese su numero de Identificacion:',
                             title='Identificacion del Usuario',
                             strip=True,
                             image=None)

            Lista_tiempo = []
            Lista_boca = []
            Lista_ojos = []
            Lista_tema = []
            Lista_estado = []

            self.Estado_lista = "CONCENTRADO"

            #Bucle principal para la obtencion de datos
            messagebox.showinfo('Aviso', 'Calibracion de valores...')
            while not self.stopEvent.is_set() and self.salir == 0:

                self.Imprimir()

                if contaux == 0:
                    print("\033[1;36m" +
                          "[INFO] Calibrando data para ojos abiertos..." +
                          '\033[0;m')
                    messagebox.showinfo(
                        'Aviso',
                        'Por favor, mantenga los ojos ABIERTOS durante 3 segundos'
                    )
                    t_inicial = time.monotonic()

                elif contaux == 1:
                    print("\033[1;36m" +
                          "[INFO] Calibrando data para ojos cerrados..." +
                          '\033[0;m')
                    messagebox.showinfo(
                        'Aviso',
                        'Por favor, mantenga los ojos CERRADOS durante 3 segundos'
                    )
                    t_inicial = time.monotonic()

                elif contaux == 2:
                    print("\033[1;36m" +
                          "[INFO] Calibrando data para boca cerrada..." +
                          '\033[0;m')
                    messagebox.showinfo(
                        'Aviso',
                        'Por favor, mantenga la boca CERRADA durante 3 segundos'
                    )
                    t_inicial = time.monotonic()

                elif contaux == 3:
                    print("\033[1;36m" +
                          "[INFO] Calibrando data para boca abierta..." +
                          '\033[0;m')
                    messagebox.showinfo(
                        'Aviso',
                        'Por favor, mantenga la boca ABIERTA durante 3 segundos'
                    )
                    t_inicial = time.monotonic()

                self.frame = self.Video.read()
                self.frame = imutils.resize(self.frame, width=830)

                gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                rects = detector(gray, 0)

                for rect in rects:

                    shape = predictor(gray, rect)
                    shape = face_utils.shape_to_np(shape)

                    leftEye = shape[lStart:lEnd]
                    rightEye = shape[rStart:rEnd]

                    Mouth = shape[mStart:mEnd]

                    EARG = round(self.eye_aspect_ratio(rightEye, leftEye), 4)
                    MARG = round(self.mouth_aspect_ratio(Mouth), 4)

                    if self.Empieza < 1:

                        self.Ear.set(str(round(EARG, 3)))
                        self.Mar.set(str(round(MARG, 3)))
                    else:
                        self.Ear.set("")
                        self.Mar.set("")

################################################################################################
                if cont == 4 and self.Empieza == 1:
                    if con_gene == 0:
                        self.t_global = time.monotonic()
                        con_gene = 1

                    self.Inge.place_forget()

                    self.Arqui.place_forget()
                    self.EARL.place_forget()
                    self.EARL1.place_forget()
                    self.MARL.place_forget()
                    self.MARL1.place_forget()
                    self.Titulo.place_forget()

                    ##############################################
                    ##############################################
                    if EARG <= Umbral_ojos or MARG >= Umbral_boca:

                        self.Estado_lista = "POCA ATENCIÓN"
                        t_boca_abierta = time.monotonic() - t_ini_boca_abierta
                        t_ojos_cerrados = time.monotonic(
                        ) - t_ini_ojos_cerrados

                        if t_boca_abierta >= t_ojos_cerrados:
                            t_distraccion = t_boca_abierta
                        else:
                            t_distraccion = t_ojos_cerrados

                    else:
                        self.Estado_lista = "CONCENTRADO"
                    ##############################################
                    ##############################################

                    if EARG <= Umbral_ojos and con_t_ojos == 0:

                        t_ini_ojos_cerrados = time.monotonic()
                        con_t_ojos = 1

                        #if (time.monotonic() - t_ini_ojos_cerrados) >=2:
                    #		self.Estado_lista = "POCA ATENCIÓN"

                    elif EARG > Umbral_ojos and con_t_ojos == 1:
                        t_ojos_cerrados = time.monotonic(
                        ) - t_ini_ojos_cerrados

                        con_t_ojos = 0

                        if t_ojos_cerrados >= 3:

                            contador_general_distraccion += 1
                            #self.Estado_lista = "CONCENTRADO"
                        t_ojos_cerrados = 0

                    if MARG >= Umbral_boca and con_t_boca == 0:

                        t_ini_boca_abierta = time.monotonic()
                        con_t_boca = 1

                        #if time.monotonic() - t_ini_boca_abierta >=2:
                        #	self.Estado_lista = "POCA ATENCIÓN"

                    elif MARG < Umbral_boca and con_t_boca == 1:

                        t_boca_abierta = time.monotonic() - t_ini_boca_abierta
                        con_t_boca = 0

                        if t_boca_abierta > 3:
                            t_distraccion = t_distraccion + t_boca_abierta
                            contador_general_distraccion += 1
                            #self.Estado_lista = "CONCENTRADO"

                    #########################################################
                    self.Imprimir()
                    t_actual_lista = round((time.monotonic() - self.t_global),
                                           4)

                    Lista_tiempo.append(t_actual_lista)
                    Lista_boca.append(MARG)
                    Lista_ojos.append(EARG)
                    Lista_tema.append(self.Tema_lista)
                    Lista_estado.append(self.Estado_lista)

                    ###########################################################
                    ###########################################################
                    if self.next == 1:
                        self.next = 0
                        self.Estado_lista = "CAMBIO DE TEMA"

                        t_actual_lista = round(
                            (time.monotonic() - self.t_global), 4)

                        Lista_tiempo.append(t_actual_lista)
                        Lista_boca.append(MARG)
                        Lista_ojos.append(EARG)
                        Lista_tema.append(self.Tema_lista)
                        Lista_estado.append(self.Estado_lista)

                        if Cont_video < 4:
                            self.Pro += 1
                            Cap = 1
                            self.video_clase.release()
                            self.video_clase = self.Video_Path(
                                self.Facul, self.Pro, Cap)
                            grabbed, frame = self.video_clase.read()
                            frame = imutils.resize(frame, width=950)
                            contador_general_distraccion = 0
                            Cont_video += 1
                        else:
                            messagebox.showinfo('Aviso', 'Fin de la sesion.')
                            break

                    if contador_general_distraccion >= 7:
                        self.Estado_lista = "UMBRAL DISTRACCIONES SUPERADO"
                        Op = messagebox.askyesno(
                            "Aviso!",
                            "Se detecto poco interes, ¿Desea cambiar de tema?")
                        if Op:
                            if Cont_video < 4:
                                self.Pro += 1
                                Cap = 1
                                self.video_clase.release()
                                self.video_clase = self.Video_Path(
                                    self.Facul, self.Pro, Cap)
                                grabbed, frame = self.video_clase.read()
                                frame = imutils.resize(frame, width=950)
                                contador_general_distraccion = 0
                                Cont_video += 1
                            else:
                                messagebox.showinfo('Aviso',
                                                    'Fin de la sesion.')
                                break

                        else:
                            contador_general_distraccion = 0

                    grabbed, frame = self.video_clase.read()
                    self.panel.place(x=10, y=10)
                    frame = imutils.resize(frame, width=950)
                    if not grabbed:

                        if Cont_video < 4:
                            if contador_general_distraccion >= 4:
                                self.Pro += 1
                                Cap = 1
                            else:
                                Cap += 1

                            self.video_clase.release()
                            self.video_clase = self.Video_Path(
                                self.Facul, self.Pro, Cap)
                            grabbed, frame = self.video_clase.read()
                            frame = imutils.resize(frame, width=830)
                            contador_general_distraccion = 0
                            Cont_video += 1
                        else:
                            messagebox.showinfo('Aviso', 'Fin de la sesion.')
                            break

                    else:

                        #image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                        image = Image.fromarray(image)
                        image = ImageTk.PhotoImage(image)
#######################################################################################################
                else:
                    for (x, y) in shape:
                        cv2.circle(self.frame, (x, y), 1, (0, 0, 255), -1)

                    image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
                    image = Image.fromarray(image)
                    image = ImageTk.PhotoImage(image)

                if self.panel is None:
                    self.panel = tki.Label(image=image,
                                           relief="sunken",
                                           borderwidth=3)
                    self.panel.image = image
                    self.panel.place(x=80, y=115)

                else:
                    self.panel.configure(image=image,
                                         relief="sunken",
                                         borderwidth=3)
                    self.panel.image = image

                t_act = time.monotonic() - t_inicial
                #########################################################################################################
                #Etapa inicial para determinar los valores para ojos y boca
                #Ojos Abiertos
                if cont == 0:

                    contaux = -1
                    SumaE = SumaE + EARG
                    contp = contp + 1

                    if t_act >= 3:

                        promEarAbi = SumaE / contp
                        print(
                            "[INFO] Valor de Eye Aspect Ratio (EAR) para ojos abiertos: ",
                            promEarAbi)
                        cont = 1
                        contaux = 1
                        contp = 0
                        SumaE = 0

                #Ojos Cerrados
                elif cont == 1:

                    contaux = -1
                    SumaE = SumaE + EARG
                    contp = contp + 1

                    if t_act >= 3:

                        promEarCe = SumaE / contp
                        print(
                            "[INFO] Valor de Eye Aspect Ratio (EAR) para ojos cerrados: ",
                            promEarCe)
                        cont = 2
                        contaux = 2
                        contp = 0
                        SumaE = 0

                #Boca cerrada
                elif cont == 2:

                    contaux = -1
                    SumaE = SumaE + MARG
                    contp = contp + 1

                    contM += 1
                    contMAu += 1

                    if t_act >= 3:

                        promMarCe = round((SumaE / contp), 4)
                        print(
                            "[INFO] Valor de Mouth Aspect Ratio (MAR) para boca cerrada: ",
                            promMarCe)
                        cont = 3
                        contaux = 3
                        contp = 0
                        SumaE = 0

                #Boca ABIERTA
                elif cont == 3:

                    contaux = -1
                    SumaE = SumaE + MARG
                    contp = contp + 1
                    contMAu += 1

                    if t_act >= 3:

                        promMarAbi = round((SumaE / contp), 4)
                        print(
                            "[INFO] Valor de Mouth Aspect Ratio (MAR) para boca abierta: ",
                            promMarAbi)
                        cont = 4
                        contaux = 4
                        contp = 0
                        SumaE = 0
                        Umbral_ojos = round(
                            (((promEarAbi + promEarCe) / 2) - 0.05), 4)
                        Umbral_boca = round(((promMarCe + promMarAbi) / 2), 4)
                        print("\033[1;36m" +
                              "[INFO] Umbrales establecidos..." + '\033[0;m')
                        print("[INFO] Valor Umbral para ojos: ", Umbral_ojos)
                        print("[INFO] Valor Umbral para boca: ", Umbral_boca)
                        print("\033[1;33m" +
                              "[INFO] Calibracion finalizada..." + '\033[0;m')

                if self.loop:
                    break
            self.Close()
            self.t_global = time.monotonic() - self.t_global

            Lista_tiempo.append(self.t_global)
            Lista_boca.append(MARG)
            Lista_ojos.append(EARG)
            Lista_tema.append(self.Tema_lista)
            Lista_estado.append(self.Estado_lista)
            con_dist = 0
            for n in Lista_estado:
                if n is "POCA ATENCIÓN":
                    con_dist += 1

            t_distraccion = con_dist * 0.12

            print("[INFO] Tiempo Total: ", self.t_global)
            print("[INFO] Tiempo Distraido: ", t_distraccion)

            porcentaje = (round(t_distraccion / self.t_global, 4)) * 100
            porce_2 = (round(
                ((self.t_global - t_distraccion) / self.t_global), 4)) * 100

            print("[INFO] Porcentaje de distraccion: ", porcentaje, "%")
            print("[INFO] Porcentaje de atencion: ", porce_2, "%")

            self.Exportar(nombre, id, Lista_boca, Lista_ojos, Lista_tema,
                          Lista_estado, Lista_tiempo, porcentaje, porce_2,
                          promEarAbi, promEarCe, promMarCe, promMarAbi,
                          Umbral_ojos, Umbral_boca, t_distraccion)

        except RuntimeError:
            print("\033[1;31m" + "[ERROR] Error en RunTime " + '\033[0;m')
Beispiel #14
0
def dlibFaceDetector(inputFilePath, goodPath, badPath):
    if printDetails:
        file.writelines(getTime + "\t" +
                        "Histogram of Oriented Gradients: (neighbours:\t")
    global badResult, goodResult
    inputFile = cv2.imread(inputFilePath)
    # ( Width [0], Height [1]
    # inputFile = imutils.resize(inputFile, 500)
    grayImage = cv2.cvtColor(inputFile, cv2.COLOR_BGR2GRAY)
    width, height = inputFile.shape[:2]
    print("width: " + str(width) + " height: " + str(height) + "\n")
    rects = detector(grayImage, 1)
    if len(rects) != 1:
        cv2.imwrite(badPath + pathlib.Path(inputFilePath).name, inputFile)
        badResult += 1
    else:
        goodResult += 1
        for (i, rect) in enumerate(rects):
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(grayImage, rect)
            shape = face_utils.shape_to_np(shape)

            # Pokazanie że wykrywa twarz - można pominąć
            # cv2.rectangle(inputFile, (x, y), (x + w, y + int(h+(h*0.2))), (255, 0, 0), 2)
            # convert dlib's rectangle to a OpenCV-style bounding box
            # [i.e., (x, y, w, h)], then draw the face bounding box

            # udowodnienie że twarz wykrywa
            (x, y, w, h) = face_utils.rect_to_bb(rect)
            if x < 0:
                x = 0
            elif x > width:
                x = width - 1

            if (y < 0):
                y = 0
            elif y > height:
                y = height - 1

            if w < 0:
                w = 0
            elif w > width:
                w = width - 1

            if (h < 0):
                h = 0
            elif h > height:
                h = height - 1

            cv2.rectangle(inputFile, (x, y), (x + w, y + h), (0, 255, 0), 2)

            smart_h = int(h * chinHeightROI)
            roi_color = inputFile[y:y + h, x:x + w]
            #
            # if smart_h > h:
            #     roi_color = inputFile[y:y + (height - 1), x:x + w]
            # else:
            #     roi_color = inputFile[y:y + h + int(smart_h), x:x + w]

            roi_gray = grayImage[y:y + height, x:x + w]
            # croppedImage = cv2.clone
            # cv2.imshow("Output", roi_color)
            # cv2.waitKey(0)
            # show the face number
            # cv2.putText(inputFile, "Face #{}".format(i + 1), (x - 10, y - 10),
            #             cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            # loop over the (x, y)-coordinates for the facial landmarks
            # and draw them on the image
            for (x, y) in shape:
                cv2.circle(inputFile, (x, y), 1, (0, 0, 255), -1)

            # show the output image with the face detections + facial landmarks
            cv2.imwrite(goodPath + pathlib.Path(inputFilePath).name, inputFile)
Beispiel #15
0
def run_detector():
    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold for to set off the
    # alarm
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 48

    # initialize the frame counter as well as a boolean used to
    # indicate if the alarm is going off
    COUNTER = 0
    ALARM_ON = False

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

    # grab the indexes of the facial landmarks for the left and
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    # vs = VideoStream(src=args["webcam"]).start()
    #     # time.sleep(1.0)
    cam = cv2.VideoCapture(1 + cv2.CAP_DSHOW)

    # Current time in millis
    start_time = int(round(time.time() * 1000))
    #time 30 seconds later in millis
    stop_time = int(round(time.time() * 1000)) + 5000
    # loop over frames from the video stream
    while True:

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        ret, frame = cam.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

                # if the eyes were closed for a sufficient number of
                # then sound the alarm
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    # if the alarm is not on, turn it on
                    if not ALARM_ON:
                        ALARM_ON = True

                        # check to see if an alarm file was supplied,
                        # and if so, start a thread to have the alarm
                        # sound played in the background
                        t = Thread(target=sound_alarm, args=("alarm.wav", ))
                        t.deamon = True
                        t.start()

                    # draw an alarm on the frame
                    cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            # otherwise, the eye aspect ratio is not below the blink
            # threshold, so reset the counter and alarm
            else:
                COUNTER = 0
                ALARM_ON = False

            # draw the computed eye aspect ratio on the frame to help
            # with debugging and setting the correct eye aspect ratio
            # thresholds and frame counters
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        # show the frame
        cv2.imshow("Frame", frame)
        cv2.waitKey(1) & 0xFF

        # Time to run detector for
        # Currently set to 1 seconds for testing
        start_time = int(round(time.time() * 1000))
        if start_time > stop_time:
            break
    # do a bit of cleanup
    cv2.destroyAllWindows()
    cam.release()
Beispiel #16
0
def camera(yVal):
    cond = ''
    while (True):
        #Read each frame and flip it, and convert to grayscale
        ret, frame = video_capture.read()
        frame = cv2.flip(frame, 1)
        frame = cv2.resize(frame, (300, 240))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #Detect facial points through detector function
        faces = detector(gray, 0)

        #making reference lines
        frame = cv2.line(frame, (0, yVal - 40), (1279, yVal - 40), (0, 0, 255),
                         2)
        frame = cv2.line(frame, (0, yVal), (1279, yVal), (0, 0, 255), 2)

        #         cv2.imshow('EYE_DETECTION',frame )

        #Detect facial points
        for face in faces:

            shape = predictor(gray, face)
            shape = face_utils.shape_to_np(shape)

            #Get array of coordinates of leftEye and rightEye
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]

            #Use hull to remove convex contour discrepencies and draw eye shape around eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            print(leftEye)
            #             print(leftEye[0][1]) #focus on y coordinate
            cv2.imshow('EYE_DETECTION', frame)

            if (leftEye[0][1]) >= yVal - 40 and (leftEye[0][1]) <= yVal:
                cond = 'stop'
                print("Good position")
#                 print(moveLinear(cond))
            elif (leftEye[0][1]) < yVal - 40:
                cond = 'up'
                print("Table is moving up")
#                 moveLinear(cond)
            elif (leftEye[0][1]) > yVal:
                cond = 'down'
                print("Table is moving down")
#                 moveLinear(cond)

#Show video feed
#         cv2.imshow('EYE_DETECTION',frame )
        if cv2.waitKey(1) & 0xFF == ord("q"):
            time.sleep(2)

            print('New Distance detecting... ')
            return

        elif len(list(list(faces))) > 1:
            print("there are more than one person")
#             mainVoice('There are more than one person in the frame')
        elif len(list(list(faces))) == 0:
            print('faceless')
Beispiel #17
0
def main():
    cap = cv2.VideoCapture(0)
    flag = 0
    openEye = 0
    final = ''
    str = ''
    finalString = []
    L = []
    closed = False
    timer = 0
    pts = deque(maxlen=512)

    while True:
        ret, frame = cap.read()
        #cam1 = Camera()
        #frame = cam1.get_frame()
        frame = imutils.resize(frame, width=640)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        subjects = detect(gray, 0)
        for subject in subjects:
            shape = predict(gray, subject)
            shape = face_utils.shape_to_np(shape)  # converting to NumPy Array
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            ear = (leftEAR + rightEAR) / 2.0
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            if ear < thresh:  #closed eyes
                flag += 1
                pts.appendleft(flag)
                openEye = 0
            else:
                openEye += 1
                flag = 0
                pts.appendleft(flag)
            for i in range(1, len(pts)):
                if pts[i] > pts[i - 1]:
                    #print(pts[i - 1], pts[i])
                    if pts[i] > 30 and pts[i] < 60:
                        print("Eyes have been closed for 50 frames!")
                        L.append("-")
                        pts = deque(maxlen=512)
                        break
                    elif pts[i] > 15 and pts[i] < 30:
                        print("Eyes have been closed for 20 frames!")
                        L.append(".")
                        pts = deque(maxlen=512)
                        break

                    elif pts[i] > 60:
                        print("Eyes have been closed for 60 frames!")
                        L.pop()
                        pts = deque(maxlen=512)
                        break

        if (L != []):
            print(L)
        if openEye > 60:
            if (L != []):
                print(L)
            str = convertMorseToText(''.join(L))

            if str != None:
                print(str)
                finalString.append(str)
                final = (''.join(finalString))
            if str == None:
                L = []
            L = []
        cv2.putText(frame, "Predicted :  " + final, (10, 470),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
    cv2.destroyAllWindows()
    cap.stop()
Beispiel #18
0
def main():
    global COUNTER
    global TOTAL

    shape_predictor_path = '../shape_predictor_68_face_landmarks.dat'
    video_path = '../blink_test.mp4'
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(shape_predictor_path)
    print('Load detector successfully')

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    vs = FileVideoStream(video_path).start()
    file_stream = True
    while True:

        if file_stream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale frame
        rects = detector(gray, 0)
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < EYE_AR_THRESH:
                COUNTER += 1
            else:
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                COUNTER = 0
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    vs.stop()
Beispiel #19
0
    def navigationMode_1(self):

        print(
            "[INFO-NAVIGATION FUNCTION CLASS] : navigationMode_1() is called")
        print("[STATUS] : NAVIGATION MODE 1 STARTED")

        for f in self.stream:
            self.image = f.array
            print("[STATUS] : Reading Frame")
            self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
            print("[STATUS] : accesss opencv")
            self.rects = self.detector(self.gray, 0)
            print("[STATUS] : accesss detector")
            for rect in self.rects:
                self.shape = self.predictor(self.gray, rect)
                print("[STATUS] : LANDMARK POINT IS EXTRACTED")
                self.shape = face_utils.shape_to_np(self.shape)
                print("[STATUS] : LANDMARK POINT CONVERTED INTO NUMPY ARRAY")
                self.mouthPoint = self.shape[self.mStart:self.mEnd]
                print("[STATUS] : MOUTH PART IS EXTRACTED")
                self.leftEye = self.shape[self.lStart:self.lEnd]
                print("[STATUS] : LEFT EYE PART IS EXTRACTED")
                self.rightEye = self.shape[self.rStart:self.rEnd]
                print("[STATUS] : RIGHT EYE PART IS EXTRACTED")
                self.mouthMAR = self._mouth_aspect_ratio(self.mouthPoint)
                print("[STATUS] : MAR VALUE IS OBTAINED")
                self.leftEAR = self.eye_aspect_ratio(self.leftEye)
                print("[STATUS] : LEAR VALUE IS OBTAINED")
                self.rightEAR = self.eye_aspect_ratio(self.rightEye)
                print("[STATUS] : REAR VALUE IS OBTAINED")

                #_____STAGE_1___________________________________________________________________________________________________

                print("[STATUS] : STAGE 1")

                if self.mouthMAR < self.MOUTH_AR_THRESH_CLOSED:
                    print("[STATUS] : MOUTH CLOSE FOR THIS FRAME")
                    self.closed_Counter += 1
                    self.open_Counter = 0
                    self.Mouth_Open_progress.emit(self.open_Counter)
                    self.Mouth_Close_progress.emit(self.closed_Counter)

                else:
                    if self.mouthMAR > self.MOUTH_AR_THRESH_OPENED:
                        print("[STATUS] : MOUTH OPEN FOR THIS FRAME")
                        self.open_Counter += 1
                        self.closed_Counter = 0
                        self.Mouth_Open_progress.emit(self.open_Counter)
                        self.Mouth_Close_progress.emit(self.closed_Counter)

                    else:
                        print("[STATUS] : NO MOUTH DATA FOR THIS FRAME")
                        self.closed_Counter = 0
                        self.open_Counter = 0
                        self.Mouth_Open_progress.emit(self.open_Counter)
                        self.Mouth_Close_progress.emit(self.closed_Counter)

                if self.leftEAR < self.EYE_AR_THRESH:
                    print("[STATUS] : LEFT EYE CLOSE FOR THIS FRAME")
                    self.LEFT_CLOSE_COUNTER += 1
                    self.LEFT_OPEN_COUNTER = 0
                    self.LeftEye_Open_progress.emit(self.LEFT_OPEN_COUNTER)
                    self.LeftEye_Close_progress.emit(self.LEFT_CLOSE_COUNTER)
                else:
                    print("[STATUS] : LEFT EYE OPEN FOR THIS FRAME")
                    self.LEFT_OPEN_COUNTER += 1
                    self.LEFT_CLOSE_COUNTER = 0
                    self.LeftEye_Open_progress.emit(self.LEFT_OPEN_COUNTER)
                    self.LeftEye_Close_progress.emit(self.LEFT_CLOSE_COUNTER)

                if self.rightEAR < self.EYE_AR_THRESH:
                    print("[STATUS] : RIGHT EYE CLOSE FOR THIS FRAME")
                    self.RIGHT_CLOSE_COUNTER += 1
                    self.RIGHT_OPEN_COUNTER = 0
                    self.RightEye_Open_progress.emit(self.RIGHT_OPEN_COUNTER)
                    self.RightEye_Close_progress.emit(self.RIGHT_CLOSE_COUNTER)

                else:
                    print("[STATUS] : RIGHT EYE OPEN FOR THIS FRAME")
                    self.RIGHT_OPEN_COUNTER += 1
                    self.RIGHT_CLOSE_COUNTER = 0
                    self.RightEye_Open_progress.emit(self.RIGHT_OPEN_COUNTER)
                    self.RightEye_Close_progress.emit(self.RIGHT_CLOSE_COUNTER)

#_____STAGE_2___________________________________________________________________________________________________

                print("[STATUS] : STAGE 2")

                if self.LEFT_CLOSE_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                    print("[STATUS] : LEFT EYE CLOSE VALIDATED")
                    self.CURRENT_LEFT_EYE_STATE = "OPEN"
                    self.LEFT_EYE_OPEN_TOTAL += 1
                    self.LEFT_EYE_CLOSE_TOTAL = 0
                    self.LEFT_CLOSE_COUNTER = 0
                    self.LEFT_OPEN_COUNTER = 0
                    self.LeftEye_State_progress.emit(
                        self.CURRENT_LEFT_EYE_STATE)

                else:
                    if self.LEFT_OPEN_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                        print("[STATUS] : LEFT EYE OPEN VALIDATED")
                        self.CURRENT_LEFT_EYE_STATE = "CLOSE"
                        self.LEFT_EYE_CLOSE_TOTAL += 1
                        self.LEFT_EYE_OPEN_TOTAL = 0
                        self.LEFT_OPEN_COUNTER = 0
                        self.LEFT_CLOSE_TOTAL = 0
                        self.LeftEye_State_progress.emit(
                            self.CURRENT_LEFT_EYE_STATE)

                    else:
                        print("[STATUS] : LEFT EYE NOT DEFINED")
                        self.CURRENT_LEFT_EYE_STATE = "NOT DEFINED"
                        self.LeftEye_State_progress.emit(
                            self.CURRENT_LEFT_EYE_STATE)

                if self.RIGHT_CLOSE_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                    print("[STATUS] : RIGHT EYE CLOSE VALIDATED")
                    self.CURRENT_RIGHT_EYE_STATE = "OPEN"
                    self.RIGHT_EYE_OPEN_TOTAL += 1
                    self.RIGHT_EYE_CLOSE_TOTAL = 0
                    self.RIGHT_CLOSE_COUNTER = 0
                    self.RIGHT_OPEN_COUNTER = 0
                    self.RightEye_State_progress.emit(
                        self.CURRENT_RIGHT_EYE_STATE)

                else:
                    if self.RIGHT_OPEN_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                        print("[STATUS] : RIGHT EYE OPEN VALIDATED")
                        self.CURRENT_RIGHT_EYE_STATE = "CLOSE"
                        self.RIGHT_EYE_CLOSE_TOTAL += 1
                        self.RIGHT_EYE_OPEN_TOTAL = 0
                        self.RIGHT_OPEN_COUNTER = 0
                        self.RIGHT_CLOSE_COUNTER = 0
                        self.RightEye_State_progress.emit(
                            self.CURRENT_RIGHT_EYE_STATE)

                    else:
                        print("[STATUS] : RIGHT EYE NOT DEFINED")
                        self.CURRENT_RIGHT_EYE_STATE = "NOT DEFINED"
                        self.RightEye_State_progress.emit(
                            self.CURRENT_RIGHT_EYE_STATE)
                        print("[STATUS] : RIGHT EYE NOT DEFINED SIGNAL SENT")

                if self.open_Counter >= self.MOUTH_AR_CONSEC_FRAME:
                    print("[STATUS] : MOUTH OPEN VALIDATED")
                    self.CURRENT_MOUTH_STATE = "OPEN"
                    self.TOTAL_MOUTH_OPEN_TOTAL += 1
                    self.TOTAL_MOUTH_CLOSE_TOTAL = 0
                    self.Mouth_State_progress.emit(self.CURRENT_MOUTH_STATE)

                else:
                    if self.closed_Counter >= self.MOUTH_AR_CONSEC_FRAME:
                        print("[STATUS] : MOUTH CLOSE VALIDATED")
                        self.CURRENT_MOUTH_STATE = "CLOSE"
                        self.TOTAL_MOUTH_CLOSE_TOTAL += 1
                        self.TOTAL_MOUTH_OPEN_TOTAL = 0
                        self.Mouth_State_progress.emit(
                            self.CURRENT_MOUTH_STATE)

                    else:
                        print("[STATUS] : MOUTH NOT DEFINED VALIDATED")
                        self.CURRENT_MOUTH_STATE = "NOT DEFINED"
                        print(
                            "[STATUS] : MOUTH NOT DEFINED VALIDATED SIGNAL WILL BE SENT"
                        )
                        self.Mouth_State_progress.emit(
                            self.CURRENT_MOUTH_STATE)
                        print(
                            "[STATUS] : MOUTH NOT DEFINED VALIDATED SIGNAL SENT"
                        )

#_____STAGE_3___________________________________________________________________________________________________
                print("[STATUS] : DEBUGGER POINT 11_")

                if self.LEFT_EYE_OPEN_TOTAL != 1 or self.LEFT_EYE_CLOSE_TOTAL != 1:
                    self.PREVIOUS_LEFT_EYE_STATE = self.CURRENT_LEFT_EYE_STATE
                if self.RIGHT_EYE_OPEN_TOTAL != 1 or self.RIGHT_EYE_CLOSE_TOTAL != 1:
                    self.PREVIOUS_RIGHT_EYE_STATE = self.CURRENT_RIGHT_EYE_STATE
                if self.TOTAL_MOUTH_CLOSE_TOTAL != 1 or self.TOTAL_MOUTH_OPEN_TOTAL != 1:
                    self.PREVIOUS_MOUTH_STATE = self.CURRENT_MOUTH_STATE


                if self.PREVIOUS_LEFT_EYE_STATE == self.CURRENT_LEFT_EYE_STATE and \
                  self.PREVIOUS_RIGHT_EYE_STATE == self.CURRENT_RIGHT_EYE_STATE and self.PREVIOUS_MOUTH_STATE == self.CURRENT_MOUTH_STATE :
                    self.patternCaller += 1
                else:
                    self.patternCaller = 0

                if self.patternCaller > 4:
                    self.patternResult = eightCombo(self,
                                                    CURRENT_LEFT_EYE_STATE,
                                                    CURRENT_RIGHT_EYE_STATE,
                                                    CURRENT_MOUTH_STATE)
                    self.Pattern_progress.emit(self.patternResult)
                    self.finished.emit()
                    self.rawCapture.close()
                    self.camera.close()

            self.rawCapture.truncate(0)
            print("[STATUS] : DONE THIS FRAME.NEXT FRAME")

        self.rawCapture.close()
        self.camera.close()
def blink_detector(output_textfile, input_video):

    Q = Queue(maxsize=7)

    FRAME_MARGIN_BTW_2BLINKS = 3
    MIN_AMPLITUDE = 0.04
    MOUTH_AR_THRESH = 0.35
    MOUTH_AR_THRESH_ALERT = 0.30
    MOUTH_AR_CONSEC_FRAMES = 20

    EPSILON = 0.01  # for discrete derivative (avoiding zero derivative)

    class Blink():
        def __init__(self):

            self.start = 0  #frame
            self.startEAR = 1
            self.peak = 0  #frame
            self.peakEAR = 1
            self.end = 0  #frame
            self.endEAR = 0
            self.amplitude = (self.startEAR + self.endEAR -
                              2 * self.peakEAR) / 2
            self.duration = self.end - self.start + 1
            self.EAR_of_FOI = 0  #FrameOfInterest
            self.values = []
            self.velocity = 0  #Eye-closing velocity

    def eye_aspect_ratio(eye):
        # compute the euclidean distances between the two sets of
        # vertical eye landmarks (x, y)-coordinates
        A = dist.euclidean(eye[1], eye[5])
        B = dist.euclidean(eye[2], eye[4])

        # compute the euclidean distance between the horizontal
        # eye landmark (x, y)-coordinates
        C = dist.euclidean(eye[0], eye[3])

        if C < 0.1:  #practical finetuning due to possible numerical issue as a result of optical flow
            ear = 0.3
        else:
            # compute the eye aspect ratio
            ear = (A + B) / (2.0 * C)
        if ear > 0.45:  #practical finetuning due to possible numerical issue as a result of optical flow
            ear = 0.45
        # return the eye aspect ratio
        return ear

    def mouth_aspect_ratio(mouth):

        A = dist.euclidean(mouth[14], mouth[18])

        C = dist.euclidean(mouth[12], mouth[16])

        if C < 0.1:  #practical finetuning
            mar = 0.2
        else:
            # compute the mouth aspect ratio
            mar = (A) / (C)

        # return the mouth aspect ratio
        return mar

    def EMERGENCY(ear, COUNTER):
        if ear < 0.21:
            COUNTER += 1

            if COUNTER >= 50:
                print('EMERGENCY SITUATION (EYES TOO LONG CLOSED)')
                print(COUNTER)
                COUNTER = 0
        else:
            COUNTER = 0
        return COUNTER

    def Linear_Interpolate(start, end, N):
        m = (end - start) / (N + 1)
        x = np.linspace(1, N, N)
        y = m * (x - 0) + start
        return list(y)

    def Ultimate_Blink_Check():
        #Given the input "values", retrieve blinks and their quantities
        retrieved_blinks = []
        MISSED_BLINKS = False
        values = np.asarray(Last_Blink.values)
        THRESHOLD = 0.4 * np.min(values) + 0.6 * np.max(
            values)  # this is to split extrema in highs and lows
        N = len(values)
        Derivative = values[1:N] - values[0:N -
                                          1]  #[-1 1] is used for derivative
        i = np.where(Derivative == 0)
        if len(i[0]) != 0:
            for k in i[0]:
                if k == 0:
                    Derivative[0] = -EPSILON
                else:
                    Derivative[k] = EPSILON * Derivative[k - 1]
        M = N - 1  #len(Derivative)
        ZeroCrossing = Derivative[1:M] * Derivative[0:M - 1]
        x = np.where(ZeroCrossing < 0)
        xtrema_index = x[0] + 1
        XtremaEAR = values[xtrema_index]
        Updown = np.ones(
            len(xtrema_index))  # 1 means high, -1 means low for each extremum
        Updown[
            XtremaEAR <
            THRESHOLD] = -1  #this says if the extremum occurs in the upper/lower half of signal
        #concatenate the beginning and end of the signal as positive high extrema
        Updown = np.concatenate(([1], Updown, [1]))
        XtremaEAR = np.concatenate(([values[0]], XtremaEAR, [values[N - 1]]))
        xtrema_index = np.concatenate(([0], xtrema_index, [N - 1]))
        ##################################################################

        Updown_XeroCrossing = Updown[1:len(Updown)] * Updown[0:len(Updown) - 1]
        jump_index = np.where(Updown_XeroCrossing < 0)
        numberOfblinks = int(len(jump_index[0]) / 2)
        selected_EAR_First = XtremaEAR[jump_index[0]]
        selected_EAR_Sec = XtremaEAR[jump_index[0] + 1]
        selected_index_First = xtrema_index[jump_index[0]]
        selected_index_Sec = xtrema_index[jump_index[0] + 1]
        if numberOfblinks > 1:
            MISSED_BLINKS = True
        if numberOfblinks == 0:
            print(Updown, Last_Blink.duration)
            print(values)
            print(Derivative)
        for j in range(numberOfblinks):
            detected_blink = Blink()
            detected_blink.start = selected_index_First[2 * j]
            detected_blink.peak = selected_index_Sec[2 * j]
            detected_blink.end = selected_index_Sec[2 * j + 1]

            detected_blink.startEAR = selected_EAR_First[2 * j]
            detected_blink.peakEAR = selected_EAR_Sec[2 * j]
            detected_blink.endEAR = selected_EAR_Sec[2 * j + 1]

            detected_blink.duration = detected_blink.end - detected_blink.start + 1
            detected_blink.amplitude = 0.5 * (
                detected_blink.startEAR - detected_blink.peakEAR) + 0.5 * (
                    detected_blink.endEAR - detected_blink.peakEAR)
            detected_blink.velocity = (
                detected_blink.endEAR - selected_EAR_First[2 * j + 1]) / (
                    detected_blink.end - selected_index_First[2 * j + 1] + 1
                )  #eye opening ave velocity
            retrieved_blinks.append(detected_blink)

        return MISSED_BLINKS, retrieved_blinks

    def Blink_Tracker(EAR, IF_Closed_Eyes, Counter4blinks, TOTAL_BLINKS, skip):
        BLINK_READY = False
        #If the eyes are closed
        if int(IF_Closed_Eyes) == 1:
            Current_Blink.values.append(EAR)
            Current_Blink.EAR_of_FOI = EAR  #Save to use later
            if Counter4blinks > 0:
                skip = False
            if Counter4blinks == 0:
                Current_Blink.startEAR = EAR  #EAR_series[6] is the EAR for the frame of interest(the middle one)
                Current_Blink.start = reference_frame - 6  #reference-6 points to the frame of interest which will be the 'start' of the blink
            Counter4blinks += 1
            if Current_Blink.peakEAR >= EAR:  #deciding the min point of the EAR signal
                Current_Blink.peakEAR = EAR
                Current_Blink.peak = reference_frame - 6

        # otherwise, the eyes are open in this frame
        else:

            if Counter4blinks < 2 and skip == False:  # Wait to approve or reject the last blink
                if Last_Blink.duration > 15:
                    FRAME_MARGIN_BTW_2BLINKS = 8
                else:
                    FRAME_MARGIN_BTW_2BLINKS = 1
                if ((reference_frame - 6) -
                        Last_Blink.end) > FRAME_MARGIN_BTW_2BLINKS:
                    # Check so the prev blink signal is not monotonic or too small (noise)
                    if Last_Blink.peakEAR < Last_Blink.startEAR and Last_Blink.peakEAR < Last_Blink.endEAR and Last_Blink.amplitude > MIN_AMPLITUDE and Last_Blink.start < Last_Blink.peak:
                        if ((Last_Blink.startEAR - Last_Blink.peakEAR) >
                            (Last_Blink.endEAR - Last_Blink.peakEAR) * 0.25 and
                            (Last_Blink.startEAR - Last_Blink.peakEAR) * 0.25 <
                            (Last_Blink.endEAR -
                             Last_Blink.peakEAR)):  # the amplitude is balanced
                            BLINK_READY = True
                            #####THE ULTIMATE BLINK Check

                            Last_Blink.values = signal.convolve1d(
                                Last_Blink.values, [1 / 3.0, 1 / 3.0, 1 / 3.0],
                                mode='nearest')
                            # Last_Blink.values=signal.median_filter(Last_Blink.values, 3, mode='reflect')   # smoothing the signal
                            [MISSED_BLINKS,
                             retrieved_blinks] = Ultimate_Blink_Check()
                            #####
                            TOTAL_BLINKS = TOTAL_BLINKS + len(
                                retrieved_blinks
                            )  # Finally, approving/counting the previous blink candidate
                            ###Now You can count on the info of the last separate and valid blink and analyze it
                            Counter4blinks = 0
                            print("MISSED BLINKS= {}".format(
                                len(retrieved_blinks)))
                            return retrieved_blinks, int(
                                TOTAL_BLINKS
                            ), Counter4blinks, BLINK_READY, skip
                        else:
                            skip = True
                            print('rejected due to imbalance')
                    else:
                        skip = True
                        print('rejected due to noise,magnitude is {}'.format(
                            Last_Blink.amplitude))
                        print(Last_Blink.start < Last_Blink.peak)

            # if the eyes were closed for a sufficient number of frames (2 or more)
            # then this is a valid CANDIDATE for a blink
            if Counter4blinks > 1:
                Current_Blink.end = reference_frame - 7  #reference-7 points to the last frame that eyes were closed
                Current_Blink.endEAR = Current_Blink.EAR_of_FOI
                Current_Blink.amplitude = (Current_Blink.startEAR +
                                           Current_Blink.endEAR -
                                           2 * Current_Blink.peakEAR) / 2
                Current_Blink.duration = Current_Blink.end - Current_Blink.start + 1

                if Last_Blink.duration > 15:
                    FRAME_MARGIN_BTW_2BLINKS = 8
                else:
                    FRAME_MARGIN_BTW_2BLINKS = 1
                if (
                        Current_Blink.start - Last_Blink.end
                ) <= FRAME_MARGIN_BTW_2BLINKS + 1:  #Merging two close blinks
                    print('Merging...')
                    frames_in_between = Current_Blink.start - Last_Blink.end - 1
                    print(Current_Blink.start, Last_Blink.end,
                          frames_in_between)
                    valuesBTW = Linear_Interpolate(Last_Blink.endEAR,
                                                   Current_Blink.startEAR,
                                                   frames_in_between)
                    Last_Blink.values = Last_Blink.values + valuesBTW + Current_Blink.values
                    Last_Blink.end = Current_Blink.end  # update the end
                    Last_Blink.endEAR = Current_Blink.endEAR
                    if Last_Blink.peakEAR > Current_Blink.peakEAR:  #update the peak
                        Last_Blink.peakEAR = Current_Blink.peakEAR
                        Last_Blink.peak = Current_Blink.peak
                        #update duration and amplitude
                    Last_Blink.amplitude = (Last_Blink.startEAR +
                                            Last_Blink.endEAR -
                                            2 * Last_Blink.peakEAR) / 2
                    Last_Blink.duration = Last_Blink.end - Last_Blink.start + 1
                else:  #Should not Merge (a Separate blink)

                    Last_Blink.values = Current_Blink.values  #update the EAR list

                    Last_Blink.end = Current_Blink.end  # update the end
                    Last_Blink.endEAR = Current_Blink.endEAR

                    Last_Blink.start = Current_Blink.start  #update the start
                    Last_Blink.startEAR = Current_Blink.startEAR

                    Last_Blink.peakEAR = Current_Blink.peakEAR  #update the peak
                    Last_Blink.peak = Current_Blink.peak

                    Last_Blink.amplitude = Current_Blink.amplitude
                    Last_Blink.duration = Current_Blink.duration

            # reset the eye frame counter
            Counter4blinks = 0
        retrieved_blinks = 0
        return retrieved_blinks, int(
            TOTAL_BLINKS), Counter4blinks, BLINK_READY, skip

    print('hello')
    #

    # initialize the frame counters and the total number of yawnings
    COUNTER = 0
    MCOUNTER = 0
    TOTAL = 0
    MTOTAL = 0
    TOTAL_BLINKS = 0
    Counter4blinks = 0
    skip = False  # to make sure a blink is not counted twice in the Blink_Tracker function
    Last_Blink = Blink()

    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    #Load the Facial Landmark Detector
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    #Load the Blink Detector
    loaded_svm = pickle.load(
        open('Trained_SVM_C=1000_gamma=0.1_for 7kNegSample.sav', 'rb'))
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
    print("[INFO] starting video stream thread...")

    lk_params = dict(winSize=(13, 13),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    EAR_series = np.zeros([13])
    Frame_series = np.linspace(1, 13, 13)
    reference_frame = 0
    First_frame = True
    top = tk.Tk()
    frame1 = Frame(top)
    frame1.grid(row=0, column=0)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    plot_frame = FigureCanvasTkAgg(fig, master=frame1)
    plot_frame.get_tk_widget().pack(side=tk.BOTTOM, expand=True)
    plt.ylim([0.0, 0.5])
    line, = ax.plot(Frame_series, EAR_series)
    plot_frame.draw()

    # loop over frames from the video stream

    stream = cv2.VideoCapture(path)
    start = datetime.datetime.now()
    number_of_frames = 0
    while True:
        (grabbed, frame) = stream.read()
        if not grabbed:
            print('not grabbed')
            print(number_of_frames)
            break

        frame = imutils.resize(frame, width=450)

        # To Rotate by 90 degreees
        # rows=np.shape(frame)[0]
        # cols = np.shape(frame)[1]
        # M = cv2.getRotationMatrix2D((cols / 2, rows / 2),-90, 1)
        # frame = cv2.warpAffine(frame, M, (cols, rows))

        gray = cv2.cvtColor(
            frame, cv2.COLOR_BGR2GRAY)  #Brighten the image(Gamma correction)
        reference_frame = reference_frame + 1
        gray = adjust_gamma(gray, gamma=1.5)
        Q.put(frame)
        end = datetime.datetime.now()
        ElapsedTime = (end - start).total_seconds()

        # detect faces in the grayscale frame
        rects = detector(gray, 0)
        if (np.size(rects) != 0):
            number_of_frames = number_of_frames + 1  # we only consider frames that face is detected
            First_frame = False
            old_gray = gray.copy()
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rects[0])
            shape = face_utils.shape_to_np(shape)

            ###############YAWNING##################
            #######################################
            Mouth = shape[mStart:mEnd]
            MAR = mouth_aspect_ratio(Mouth)

            MouthHull = cv2.convexHull(Mouth)
            cv2.drawContours(frame, [MouthHull], -1, (255, 0, 0), 1)

            if MAR > MOUTH_AR_THRESH:
                MCOUNTER += 1

            elif MAR < MOUTH_AR_THRESH_ALERT:

                if MCOUNTER >= MOUTH_AR_CONSEC_FRAMES:
                    MTOTAL += 1

                MCOUNTER = 0

            ##############YAWNING####################
            #########################################

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0
            #EAR_series[reference_frame]=ear
            EAR_series = shift(EAR_series, -1, cval=ear)

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            ############HANDLING THE EMERGENCY SITATION################
            ###########################################################
            ###########################################################
            COUNTER = EMERGENCY(ear, COUNTER)

            # EMERGENCY SITUATION (EYES TOO LONG CLOSED) ALERT THE DRIVER IMMEDIATELY
            ############HANDLING THE EMERGENCY SITATION################
            ###########################################################
            ###########################################################

            if Q.full() and (
                    reference_frame > 15
            ):  #to make sure the frame of interest for the EAR vector is int the mid
                EAR_table = EAR_series
                IF_Closed_Eyes = loaded_svm.predict(EAR_series.reshape(1, -1))
                if Counter4blinks == 0:
                    Current_Blink = Blink()
                retrieved_blinks, TOTAL_BLINKS, Counter4blinks, BLINK_READY, skip = Blink_Tracker(
                    EAR_series[6], IF_Closed_Eyes, Counter4blinks,
                    TOTAL_BLINKS, skip)
                if (BLINK_READY == True):
                    reference_frame = 20  #initialize to a random number to avoid overflow in large numbers
                    skip = True
                    #####
                    BLINK_FRAME_FREQ = TOTAL_BLINKS / number_of_frames
                    for detected_blink in retrieved_blinks:
                        print(detected_blink.amplitude, Last_Blink.amplitude)
                        print(detected_blink.duration, detected_blink.velocity)
                        print('-------------------')

                        if (detected_blink.velocity > 0):
                            with open(output_file, 'ab') as f_handle:
                                f_handle.write(b'\n')
                                np.savetxt(f_handle, [
                                    TOTAL_BLINKS, BLINK_FRAME_FREQ * 100,
                                    detected_blink.amplitude,
                                    detected_blink.duration,
                                    detected_blink.velocity
                                ],
                                           delimiter=', ',
                                           newline=' ',
                                           fmt='%.4f')

                    Last_Blink.end = -10  # re initialization
                    #####

                line.set_ydata(EAR_series)
                plot_frame.draw()
                frameMinus7 = Q.get()
                cv2.imshow("Frame", frameMinus7)
            elif Q.full(
            ):  #just to make way for the new input of the Q when the Q is full
                junk = Q.get()

            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key != 0xFF:
                break
        #Does not detect any face
        else:
            ###################Using Optical Flow############
            ###################    (Optional)    ############
            st = 0
            st2 = 0
            if (First_frame == False):
                leftEye = leftEye.astype(np.float32)
                rightEye = rightEye.astype(np.float32)
                p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, gray, leftEye,
                                                       None, **lk_params)
                p2, st2, err2 = cv2.calcOpticalFlowPyrLK(
                    old_gray, gray, rightEye, None, **lk_params)

            if np.sum(st) + np.sum(st2) == 12 and First_frame == False:

                p1 = np.round(p1).astype(np.int)
                p2 = np.round(p2).astype(np.int)
                #print(p1)

                leftEAR = eye_aspect_ratio(p1)
                rightEAR = eye_aspect_ratio(p2)

                ear = (leftEAR + rightEAR) / 2.0
                EAR_series = shift(EAR_series, -1, cval=ear)
                #EAR_series[reference_frame] = ear
                leftEyeHull = cv2.convexHull(p1)
                rightEyeHull = cv2.convexHull(p2)
                cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
                old_gray = gray.copy()
                leftEye = p1
                rightEye = p2
                ############HANDLING THE EMERGENCY SITATION################
                ###########################################################
                ###########################################################
                COUNTER = EMERGENCY(ear, COUNTER)
                ############HANDLING THE EMERGENCY SITATION################
                ###########################################################
                ###########################################################

            ###################Using Optical Flow############
            ###################                  ############

            if Q.full() and (reference_frame > 15):
                EAR_table = EAR_series
                IF_Closed_Eyes = loaded_svm.predict(EAR_series.reshape(1, -1))
                if Counter4blinks == 0:
                    Current_Blink = Blink()
                    retrieved_blinks, TOTAL_BLINKS, Counter4blinks, BLINK_READY, skip = Blink_Tracker(
                        EAR_series[6], IF_Closed_Eyes, Counter4blinks,
                        TOTAL_BLINKS, skip)
                if (BLINK_READY == True):
                    reference_frame = 20  #initialize to a random number to avoid overflow in large numbers
                    skip = True
                    #####
                    BLINK_FRAME_FREQ = TOTAL_BLINKS / number_of_frames
                    for detected_blink in retrieved_blinks:
                        print(detected_blink.amplitude, Last_Blink.amplitude)
                        print(detected_blink.duration, Last_Blink.duration)
                        print('-------------------')
                        with open(output_file, 'ab') as f_handle:
                            f_handle.write(b'\n')
                            np.savetxt(f_handle, [
                                TOTAL_BLINKS, BLINK_FRAME_FREQ * 100,
                                detected_blink.amplitude,
                                detected_blink.duration,
                                detected_blink.velocity
                            ],
                                       delimiter=', ',
                                       newline=' ',
                                       fmt='%.4f')

                    Last_Blink.end = -10  # re initialization

                    #####

                line.set_ydata(EAR_series)
                plot_frame.draw()
                frameMinus7 = Q.get()
                cv2.imshow("Frame", frameMinus7)
            elif Q.full():
                junk = Q.get()

            key = cv2.waitKey(1) & 0xFF

            if key != 0xFF:
                break

    # do a bit of cleanup
    stream.release()
    cv2.destroyAllWindows()
Beispiel #21
0
    def navigationMode_2(self):

        print(
            "[INFO-NAVIGATION FUNCTION CLASS] : navigationMode_1() is called")
        print("[STATUS] : NAVIGATION MODE 2 STARTED")

        for f in self.stream:
            self.image = f.array
            print("[STATUS] : Reading Frame")
            self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
            print("[STATUS] : accesss opencv")
            self.rects = self.detector(self.gray, 0)
            print("[STATUS] : accesss detector")
            for rect in self.rects:
                self.shape = self.predictor(self.gray, rect)
                print("[STATUS] : LANDMARK POINT IS EXTRACTED")
                self.shape = face_utils.shape_to_np(self.shape)
                print("[STATUS] : LANDMARK POINT CONVERTED INTO NUMPY ARRAY")

                self.leftEye = self.shape[self.lStart:self.lEnd]
                print("[STATUS] : LEFT EYE PART IS EXTRACTED")
                self.rightEye = self.shape[self.rStart:self.rEnd]
                print("[STATUS] : RIGHT EYE PART IS EXTRACTED")
                self.leftEAR = self.eye_aspect_ratio(self.leftEye)
                print("[STATUS] : LEAR VALUE IS OBTAINED")
                self.rightEAR = self.eye_aspect_ratio(self.rightEye)
                print("[STATUS] : REAR VALUE IS OBTAINED")

                #_____STAGE_1___________________________________________________________________________________________________

                print("[STATUS] : STAGE 1")

                if self.leftEAR < self.EYE_AR_THRESH:
                    print("[STATUS] : LEFT EYE CLOSE FOR THIS FRAME")
                    self.LEFT_CLOSE_COUNTER += 1
                    self.LEFT_OPEN_COUNTER = 0
                    self.LeftEye_Open_progress.emit(self.LEFT_OPEN_COUNTER)
                    self.LeftEye_Close_progress.emit(self.LEFT_CLOSE_COUNTER)
                else:
                    print("[STATUS] : LEFT EYE OPEN FOR THIS FRAME")
                    self.LEFT_OPEN_COUNTER += 1
                    self.LEFT_CLOSE_COUNTER = 0
                    self.LeftEye_Open_progress.emit(self.LEFT_OPEN_COUNTER)
                    self.LeftEye_Close_progress.emit(self.LEFT_CLOSE_COUNTER)

                if self.rightEAR < self.EYE_AR_THRESH:
                    print("[STATUS] : RIGHT EYE CLOSE FOR THIS FRAME")
                    self.RIGHT_CLOSE_COUNTER += 1
                    self.RIGHT_OPEN_COUNTER = 0
                    self.RightEye_Open_progress.emit(self.RIGHT_OPEN_COUNTER)
                    self.RightEye_Close_progress.emit(self.RIGHT_CLOSE_COUNTER)

                else:
                    print("[STATUS] : RIGHT EYE OPEN FOR THIS FRAME")
                    self.RIGHT_OPEN_COUNTER += 1
                    self.RIGHT_CLOSE_COUNTER = 0
                    self.RightEye_Open_progress.emit(self.RIGHT_OPEN_COUNTER)
                    self.RightEye_Close_progress.emit(self.RIGHT_CLOSE_COUNTER)

#_____STAGE_2___________________________________________________________________________________________________

                print("[STATUS] : STAGE 2")

                if self.LEFT_CLOSE_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                    print("[STATUS] : LEFT EYE CLOSE VALIDATED")
                    self.CURRENT_LEFT_EYE_STATE = "OPEN"
                    self.LEFT_EYE_OPEN_TOTAL += 1
                    self.LEFT_EYE_CLOSE_TOTAL = 0
                    self.LEFT_CLOSE_COUNTER = 0
                    self.LEFT_OPEN_COUNTER = 0
                    self.LeftEye_State_progress.emit(
                        self.CURRENT_LEFT_EYE_STATE)

                else:
                    if self.LEFT_OPEN_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                        print("[STATUS] : LEFT EYE OPEN VALIDATED")
                        self.CURRENT_LEFT_EYE_STATE = "CLOSE"
                        self.LEFT_EYE_CLOSE_TOTAL += 1
                        self.LEFT_EYE_OPEN_TOTAL = 0
                        self.LEFT_OPEN_COUNTER = 0
                        self.LEFT_CLOSE_TOTAL = 0
                        self.LeftEye_State_progress.emit(
                            self.CURRENT_LEFT_EYE_STATE)

                    else:
                        print("[STATUS] : LEFT EYE NOT DEFINED")
                        self.CURRENT_LEFT_EYE_STATE = "NOT DEFINED"
                        self.LeftEye_State_progress.emit(
                            self.CURRENT_LEFT_EYE_STATE)

                if self.RIGHT_CLOSE_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                    print("[STATUS] : RIGHT EYE CLOSE VALIDATED")
                    self.CURRENT_RIGHT_EYE_STATE = "OPEN"
                    self.RIGHT_EYE_OPEN_TOTAL += 1
                    self.RIGHT_EYE_CLOSE_TOTAL = 0
                    self.RIGHT_CLOSE_COUNTER = 0
                    self.RIGHT_OPEN_COUNTER = 0
                    self.RightEye_State_progress.emit(
                        self.CURRENT_RIGHT_EYE_STATE)

                else:
                    if self.RIGHT_OPEN_COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                        print("[STATUS] : RIGHT EYE OPEN VALIDATED")
                        self.CURRENT_RIGHT_EYE_STATE = "CLOSE"
                        self.RIGHT_EYE_CLOSE_TOTAL += 1
                        self.RIGHT_EYE_OPEN_TOTAL = 0
                        self.RIGHT_OPEN_COUNTER = 0
                        self.RIGHT_CLOSE_COUNTER = 0
                        self.RightEye_State_progress.emit(
                            self.CURRENT_RIGHT_EYE_STATE)

                    else:
                        print("[STATUS] : RIGHT EYE NOT DEFINED")
                        self.CURRENT_RIGHT_EYE_STATE = "NOT DEFINED"
                        self.RightEye_State_progress.emit(
                            self.CURRENT_RIGHT_EYE_STATE)
                        print("[STATUS] : RIGHT EYE NOT DEFINED SIGNAL SENT")

#_____STAGE_3___________________________________________________________________________________________________
                print("[STATUS] : DEBUGGER POINT 11_")

                if self.LEFT_EYE_OPEN_TOTAL != 1 or self.LEFT_EYE_CLOSE_TOTAL != 1:
                    self.PREVIOUS_LEFT_EYE_STATE = self.CURRENT_LEFT_EYE_STATE

                print("[STATUS] : DEBUGGER POINT 12_")

                if self.RIGHT_EYE_OPEN_TOTAL != 1 or self.RIGHT_EYE_CLOSE_TOTAL != 1:
                    self.PREVIOUS_RIGHT_EYE_STATE = self.CURRENT_RIGHT_EYE_STATE

                print("[STATUS] : DEBUGGER POINT 13_")

                if self.PREVIOUS_LEFT_EYE_STATE == self.CURRENT_LEFT_EYE_STATE and \
                  self.PREVIOUS_RIGHT_EYE_STATE == self.CURRENT_RIGHT_EYE_STATE :
                    self.patternCaller += 1
                else:
                    self.patternCaller = 0

                if self.patternCaller > 3:
                    self.patternResult = threeCombo(self, left_Eye_state,
                                                    right_Eye_state)
                    if self.patternResult == 'PATTERN_11':
                        self.rawCapture.close()
                        self.camera.close()

                    else:
                        print("signal emitter")

            self.rawCapture.truncate(0)
            print("[INFO] : DONE THIS FRAME.NEXT FRAME")

        self.rawCapture.close()
        self.camera.close()
def eye_state():
    vs = VideoStream(src=0).start()
    time.sleep(1.0)
    COUNTER = 0
    while True:
        img = vs.read()
        img = imutils.resize(img, width=700)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = detector(gray, 0)

        for face in faces:
            shapes = predictor(gray, face)
            shapes = face_utils.shape_to_np(shapes)

            eye_img_l, eye_rect_l = crop_eye(gray, eye_points=shapes[36:42])
            eye_img_r, eye_rect_r = crop_eye(gray, eye_points=shapes[42:48])

            eye_img_l = cv2.resize(eye_img_l, dsize=IMG_SIZE)
            eye_img_r = cv2.resize(eye_img_r, dsize=IMG_SIZE)
            eye_img_r = cv2.flip(eye_img_r, flipCode=1)

            # cv2.imshow('l', eye_img_l)
            # cv2.imshow('r', eye_img_r)

            eye_input_l = eye_img_l.copy().reshape(
                (1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.
            eye_input_r = eye_img_r.copy().reshape(
                (1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.

            pred_l = model.predict(eye_input_l)
            pred_r = model.predict(eye_input_r)

            # visualize
            state_l = 'O %.1f' if pred_l > 0.1 else '- %.1f'
            state_r = 'O %.1f' if pred_r > 0.1 else '- %.1f'

            state_l = state_l % pred_l
            state_r = state_r % pred_r

            cv2.rectangle(img,
                          pt1=tuple(eye_rect_l[0:2]),
                          pt2=tuple(eye_rect_l[2:4]),
                          color=(255, 255, 255),
                          thickness=2)
            cv2.rectangle(img,
                          pt1=tuple(eye_rect_r[0:2]),
                          pt2=tuple(eye_rect_r[2:4]),
                          color=(255, 255, 255),
                          thickness=2)

            cv2.putText(img, state_l, tuple(eye_rect_l[0:2]),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
            cv2.putText(img, state_r, tuple(eye_rect_r[0:2]),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

            if state_l[0] == '-' and state_r[0] == '-':
                COUNTER += 1
                print(COUNTER)
                if COUNTER >= 50:
                    cv2.putText(img, "You Died", (230, 250),
                                cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 7)
            else:
                COUNTER = 0

        cv2.imshow('result', img)
        if cv2.waitKey(1) == ord('q'):
            break
    cv2.destroyAllWindows()
    vs.stop()
def find_eyes_in_rect(frame, rect):
    shapes = shape_predictor(frame, rect)
    shapes = face_utils.shape_to_np(shapes)
    left_eye = shapes[left_eye_start:left_eye_end]
    right_eye = shapes[right_eye_start:right_eye_end]
    return (left_eye, right_eye)
Beispiel #24
0
    tick = cv2.getTickCount()

    ret, rgb = cap.read()
    gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
    faces = face_cascade.detectMultiScale(gray,
                                          scaleFactor=1.11,
                                          minNeighbors=3,
                                          minSize=(100, 100))

    if len(faces) == 1:
        x, y, w, h = faces[0, :]
        cv2.rectangle(rgb, (x, y), (x + w, y + h), (255, 0, 0), 2)

        face = dlib.rectangle(x, y, x + w, y + h)
        face_parts = face_parts_detector(gray, face)
        face_parts = face_utils.shape_to_np(face_parts)

        for i, ((x, y)) in enumerate(face_parts[:]):
            cv2.circle(rgb, (x, y), 1, (0, 255, 0), -1)
            cv2.putText(rgb, str(i), (x + 2, y - 2), cv2.FONT_HERSHEY_SIMPLEX,
                        0.3, (0, 255, 0), 1)

    fps = cv2.getTickFrequency() / (cv2.getTickCount() - tick)
    cv2.putText(rgb, "FPS:{} ".format(int(fps)), (10, 50),
                cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 2, cv2.LINE_AA)

    cv2.imshow('frame', rgb)
    if cv2.waitKey(1) == 27:
        break  # esc to quit

cap.release()
Beispiel #25
0
def calculate_features(path, out_path=''):
    frame = cv2.imread(path, cv2.IMREAD_COLOR)
    landmarks = {}
    # detect faces in the grayscale frame
    rects = face_detector(frame, 0)
    # loop over the face detections
    for rect in rects:
        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        shape = landmark_shape_predictor(frame, rect)
        shape = face_utils.shape_to_np(shape)
        # loop over the (x, y)-coordinates for the facial landmarks
        # and draw them on the image
        c = 0
        for (x, y) in shape:
            c += 1
            if c in pointsConsider:
                landmarks[c] = (x, y)
            cv2.putText(frame, str(c), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
                        (0, 0, 255), 1, cv2.LINE_AA)

    current_eyebrow_left = 0
    current_eyebrow_right = 0
    current_angle_with_nose_left = 0
    current_angle_with_nose_right = 0
    mouth_width_distance = 0
    mouth_height_distance = 0

    for pair in pairs:
        p1 = pair[0]
        p2 = pair[1]
        try:
            l1 = landmarks[p1]
            l2 = landmarks[p2]
            distance = get_distance(l1, l2)
            lip_normal_distance = get_distance(landmarks[34], landmarks[52])
            eye_norm_distance_left = get_distance(landmarks[40], landmarks[22])
            eye_norm_distance_right = get_distance(landmarks[43],
                                                   landmarks[23])

            if pair in leftEyebrow:
                current_eyebrow_left += distance / eye_norm_distance_left
            elif pair in rightEyebrow:
                current_eyebrow_right += distance / eye_norm_distance_right
            elif pair in mouthWidth:
                mouth_width_distance = distance / lip_normal_distance
            elif pair in mouthHeight:
                mouth_height_distance = distance / lip_normal_distance
            elif pair in angleWithNoseLeft:
                current_angle_with_nose_left += distance / lip_normal_distance
            elif pair in angleWithNoseRight:
                current_angle_with_nose_right += distance / lip_normal_distance
            cv2.line(frame, l1, l2, (0, 255, 0), 1, cv2.LINE_AA)
        except KeyError:
            print('not found pairs 1', p1)
            print('not found pairs 2', p2)

    out = np.array([
        current_eyebrow_left, current_eyebrow_right,
        current_angle_with_nose_left, current_angle_with_nose_right,
        mouth_width_distance, mouth_height_distance
    ])
    # print('out:', out)
    if len(out_path) > 0:
        cv2.imwrite(out_path, frame)
    return out
Beispiel #26
0
vs = VideoStream(src=0).start()
fileStream = False
time.sleep(1.0)
while(1):
    if fileStream and not vs.more():
        break
    frame = vs.read()
    frame = imutils.resize(frame, width=600)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)
    #print(len(rects))
    for rect in rects:
        print('-'*20)
        print(rect)
        shape = predictor(gray, rect)
        points = face_utils.shape_to_np(shape)# convert the facial landmark (x, y)-coordinates to a NumPy array
        # points = shape.parts()
        leftEye = points[lStart:lEnd]
        rightEye = points[rStart:rEnd]

        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)
        print('leftEAR = {0}'.format(leftEAR))
        print('rightEAR = {0}'.format(rightEAR))

        ear = (leftEAR+rightEAR)/2.0

        leftEyeHull = cv2.convexHull(leftEye)
        #print(leftEyeHull)
        rightEyeHull = cv2.convexHull(rightEye)
        xmaxl=max(leftEyeHull, key=lambda x: x[0][0])[0][0]
def cvloop(run_event):
    global panelA
    global SPRITES

    dir_ = "./sprites/flyes/"
    flies = [f for f in listdir(dir_) if isfile(join(dir_, f))] #image of flies to make the "animation"
    i = 0
    video_capture = cv2.VideoCapture(0) #read from webcam
    (x,y,w,h) = (0,0,10,10) #whatever initial values

    #Filters path
    detector = dlib.get_frontal_face_detector()

    #Facial landmarks
    print("[INFO] loading facial landmark predictor...")
    model = "filters/shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(model) # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2

    while run_event.is_set(): #while the thread is active we loop
        ret, image = video_capture.read()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = detector(gray, 0)

        for face in faces: #if there are faces
            (x,y,w,h) = (face.left(), face.top(), face.width(), face.height())
            # *** Facial Landmarks detection
            shape = predictor(gray, face)
            shape = face_utils.shape_to_np(shape)
            incl = calculate_inclination(shape[17], shape[26]) #inclination based on eyebrows

            # condition to see if mouth is open
            is_mouth_open = (shape[66][1] -shape[62][1]) >= 10 #y coordiantes of landmark points of lips

            #hat condition
            if SPRITES[0]:
                apply_sprite(image, "./sprites/hat.png",w,x,y, incl)

            #mustache condition
            if SPRITES[1]:
                (x1,y1,w1,h1) = get_face_boundbox(shape, 6)
                apply_sprite(image, "./sprites/mustache.png",w1,x1,y1, incl)

            #glasses condition
            if SPRITES[3]:
                (x3,y3,_,h3) = get_face_boundbox(shape, 1)
                apply_sprite(image, "./sprites/glasses.png",w,x,y3, incl, ontop = False)

            #flies condition
            if SPRITES[2]:
                #to make the "animation" we read each time a different image of that folder
                # the images are placed in the correct order to give the animation impresion
                apply_sprite(image, dir_+flies[i],w,x,y, incl)
                i+=1
                i = 0 if i >= len(flies) else i #when done with all images of that folder, begin again

            #doggy condition
            (x0,y0,w0,h0) = get_face_boundbox(shape, 6) #bound box of mouth
            if SPRITES[4]:
                (x3,y3,w3,h3) = get_face_boundbox(shape, 5) #nose
                apply_sprite(image, "./sprites/doggy_nose.png",w3,x3,y3, incl, ontop = False)

                apply_sprite(image, "./sprites/doggy_ears.png",w,x,y, incl)

                if is_mouth_open:
                    apply_sprite(image, "./sprites/doggy_tongue.png",w0,x0,y0, incl, ontop = False)
            else:
                if is_mouth_open:
                    apply_sprite(image, "./sprites/rainbow.png",w0,x0,y0, incl, ontop = False)


        # OpenCV represents image as BGR; PIL but RGB, we need to change the chanel order
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        # conerts to PIL format
        image = Image.fromarray(image)
        # Converts to a TK format to visualize it in the GUI
        image = ImageTk.PhotoImage(image)
        # Actualize the image in the panel to show it
        panelA.configure(image=image)
        panelA.image = image

    video_capture.release()
Beispiel #28
0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(".\shape_predictor_68_face_landmarks.dat"
                                 )  # Dat file is the crux of the code

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
cap = cv2.VideoCapture(0)
frame_count = 0
while True:
    ret, frame = cap.read()
    cv2.namedWindow("VideoCapture", cv2.WINDOW_NORMAL)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = detector(gray, 0)
    for face in faces:
        shape = predictor(gray, face)
        shape = face_utils.shape_to_np(shape)  # converting to NumPy Array
        left_Eye = shape[lStart:lEnd]
        right_Eye = shape[rStart:rEnd]
        left_EAR = eye_aspect_ratio(left_Eye)
        right_EAR = eye_aspect_ratio(right_Eye)
        ear = (left_EAR + right_EAR) / 2.0
        leftEyeHull = cv2.convexHull(left_Eye)
        rightEyeHull = cv2.convexHull(right_Eye)
        cv2.drawContours(frame, [leftEyeHull], -1, (255, 0, 85), 1)
        cv2.drawContours(frame, [rightEyeHull], -1, (255, 0, 85), 1)
        if ear < min_ear:
            frame_count += 1
            print(frame_count)
            if frame_count >= max_frame:
                cv2.putText(frame,
                            "!!!!!!!  ALERT! YOU ARE ABOUT TO SLEEP !!!!!!!",
    def calculate(self,frame):
        frame = imutils.resize(frame, width=640)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        subjects = self.detect(gray, 0)
        for subject in subjects:
            shape = self.predict(gray, subject)
            shape = face_utils.shape_to_np(shape)  # converting to NumPy Array
            leftEye = shape[self.lStart:self.lEnd]
            rightEye = shape[self.rStart:self.rEnd]
            leftEAR = self.eye_aspect_ratio(leftEye)
            rightEAR = self.eye_aspect_ratio(rightEye)
            ear = (leftEAR + rightEAR) / 2.0
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            if ear < self.thresh:  # closed eyes
                self.flag += 1
                self.pts.appendleft(self.flag)
                self.openEye = 0
            else:
                self.openEye += 1
                self.flag = 0
                self.pts.appendleft(self.flag)
            for i in range(1, len(self.pts)):
                if self.pts[i] > self.pts[i - 1]:
                    # print(pts[i - 1], pts[i])
                    if self.pts[i] > 30 and self.pts[i] < 60:
                        print("Eyes have been closed for 50 frames! - Print '-'")
                        log("Eyes have been closed for 50 frames!")
                        self.L.append("-")
                        self.pts = deque(maxlen=512)
                        break
                    elif self.pts[i] > 15 and self.pts[i] < 30:
                        print("Eyes have been closed for 20 frames!")
                        log("Eyes have been closed for 20 frames! - Print '.'")
                        self.L.append(".")
                        self.pts = deque(maxlen=512)
                        break

                    elif self.pts[i] > 60:
                        print("Eyes have been closed for 60 frames!")
                        log("Eyes have been closed for 60 frames! - Remove morse character")
                        self.L.pop()
                        self.pts = deque(maxlen=512)
                        break

        if (self.L != []):

            print(self.L)
        if self.openEye > 60:
            if (self.L != []):
                print(self.L)
            self.str = convertMorseToText(''.join(self.L))

            if self.str != None:
                print(self.str)
                self.finalString.append(self.str)
                self.final = ''.join(self.finalString)
            if self.str == None:
                self.L = []
            self.L = []
        cv2.putText(frame, "Predicted :  " + self.final, (10, 470),
                    cv2.FONT_HERSHEY_DUPLEX, 0.7, (52, 152, 219), 2)

        return frame
    # it, and convert it to grayscale
    # channels)
    frame = vs.read()
    frame = imutils.resize(frame, width=450)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # loop over the face detections
    for rect in rects:
        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # extract the left and right eye coordinates, then use the
        # coordinates to compute the eye aspect ratio for both eyes
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)

        # average the eye aspect ratio together for both eyes
        ear = (leftEAR + rightEAR) / 2.0

        # compute the convex hull for the left and right eye, then
        # visualize each of the eyes
        leftEyeHull = cv2.convexHull(leftEye)
        rightEyeHull = cv2.convexHull(rightEye)
def main():
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 30
    ALARM_ON = False
    COUNTER = 0

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    # print("-> Starting Video Stream")
    vs = VideoStream(0).start()
    # vs= VideoStream(usePiCamera=True).start()       //For Raspberry Pi
    time.sleep(1.0)

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            eye = final_ear(shape)
            ear = eye[0]
            leftEye = eye[1]
            rightEye = eye[2]



            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < EYE_AR_THRESH:
                COUNTER += 1

                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    if not ALARM_ON:
                        ALARM_ON = True
                        t = Thread(target=sound_alarm,
                                   args=(["alarm"],))
                        t.deamon = True
                        t.start()
                    cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


            else:
                COUNTER = 0
                ALARM_ON = False

            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Beispiel #32
0
    ret, frame = cap.read()
    if ret == False:
        print(
            'Failed to capture frame from camera. Check camera index in cv2.VideoCapture(0) \n'
        )
        break
        # cv2.imshow(frame)

    frame = imutils.resize(frame, width=450)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)

    for rect in rects:
        # determine the facial landmarks for             #face region
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)  # converting to numpy array

        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]

        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)

        ear = (leftEAR + rightEAR) / 2.0

        leftEyeHull = cv2.convexHull(leftEye)
        rightEyeHull = cv2.convexHull(rightEye)
        cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
        cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

        if ear < EYE_AR_THRESH:
Beispiel #33
0
def head_posture(rect, gray, predictor, size):
    shape0 = predictor(gray, rect)
    shape0 = np.array(face_utils.shape_to_np(shape0))
    # 2D image points. If you change the image, you need to change vector
    image_points = np.array(
        [
            (shape0[33, :]),  # Nose tip
            (shape0[8, :]),  # Chin
            (shape0[36, :]),  # Left eye left corner
            (shape0[45, :]),  # Right eye right corne
            (shape0[48, :]),  # Left Mouth corner
            (shape0[54, :])  # Right mouth corner
        ],
        dtype="double")
    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])
    # Camera internals
    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")
    # print("Camera Matrix :\n {0}".format(camera_matrix))

    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    (success, setting.rotation_vector,
     setting.translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
                                                camera_matrix,
                                                dist_coeffs,
                                                flags=cv2.SOLVEPNP_ITERATIVE)

    # print("Rotation Vector:\n {0}".format(setting.rotation_vector))
    # print("Translation Vector:\n {0}".format(setting.translation_vector))
    # Project a 3D point (0, 0, 1000.0) onto the image plane.
    # We use this to draw a line sticking out of the nose

    (nose_end_point2D,
     jacobian) = cv2.projectPoints(np.array([
         (0.0, 0.0, 1000.0)
     ]), setting.rotation_vector, setting.translation_vector, camera_matrix,
                                   dist_coeffs)

    for p in image_points:
        cv2.circle(setting.frame, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

    if setting.translation_vector[1][0] < 300 and setting.translation_vector[
            1][0] > 0:
        setting.HP_CODE = 1
    elif setting.translation_vector[1][
            0] >= 300 and setting.translation_vector[1][0] <= 600:
        setting.HP_CODE = 0
    elif setting.translation_vector[1][0] > 600:
        setting.HP_CODE = 2
    elif setting.translation_vector[1][0] < 0:
        setting.HP_CODE = 3
Beispiel #34
0
def StartDetection(CameraQueue,FrameQueue,RectQueue,FacepointQueue,SpeakerQueue):
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("../models/dlib/shape_predictor_68_face_landmarks.dat")
    #fourcc = cv2.VideoWriter_fourcc(*'XVID')
    #outVideo = cv2.VideoWriter('outputRoboy.mp4',fourcc, 20.0, (800,533))
    vs = cv2.VideoCapture(0)
    detect_net = load_net("../darknet/cfg/yolo.cfg", "../darknet/yolo.weights", 0)
    detect_meta = load_meta("../darknet/cfg/coco.data")
    counter = 0
    while True:
        """
        grab the frame from the threaded video stream, resize it to
        have a maximum width of 800 pixels, and convert it to
        grayscale"""
        ok,frame = vs.read()
        if not ok:
            break;
        #frame = imutils.resize(frame, width=800)\
        frame = frame[0:376, 0:500]

       	detectObjects(frame,detect_net,detect_meta) 
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale frame
        rects = detector(gray, 0)
        # loop over the face detections
        counter=0
        facepoints = dict()
        SpeakerDict = {}
        try:
            SpeakerDict = SpeakerQueue.get_nowait()
        except:
            pass 
        for rect in rects:
            counter +=1
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame,str(counter),(rect.left()-10,rect.top()-10),\
                        font, 2, (200,255,155), 13,\
                       cv2.LINE_AA)
            p1 = (int(rect.left()),int(rect.top()))
            p2 = (int(rect.right()),int(rect.bottom()))
            cv2.rectangle(frame, p1, p2, (0,0,255,10))
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            facepoints[counter] =shape
            #print ("Rect from face_detect:",rect,"Detected face width: ",rect.right()-rect.left()," height:",rect.bottom()-rect.top())
            if SpeakerDict:
                #print("Person:",SpeakerDict[counter])
                try:
                    if SpeakerDict[counter]:
                        for (x, y) in shape:    
                            cv2.circle(frame, (x, y), 2, (0, 255,0), -1)
                    else:
                        for (x, y) in shape:    
                            cv2.circle(frame, (x, y), 2, (0, 0,255), -1)
                except:
                    pass
            else:
                for (x, y) in shape:    
                        cv2.circle(frame, (x, y), 2, (0, 0, 255), -1)

        FacepointQueue.put(pickle.dumps(facepoints))
        FrameQueue.put(frame)  
     #   outVideo.write(frame)
        RectQueue.put(rects)
Beispiel #35
0
        break

    image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
    image.flags.writeable = False
    results = pose.process(image)
    image.flags.writeable = True
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, dsize=(0, 0), fx=fx, fy=fy)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    try:
        face = detector(gray)[0]

        # for face in faces:
        shapes = predictor(gray, face)
        shapes = face_utils.shape_to_np(shapes)

        eye_img_l, eye_rect_l = crop_eye(gray, eye_points=shapes[36:42])
        eye_img_r, eye_rect_r = crop_eye(gray, eye_points=shapes[42:48])

        eye_img_l = cv2.resize(eye_img_l, dsize=IMG_SIZE)
        eye_img_r = cv2.resize(eye_img_r, dsize=IMG_SIZE)
        eye_img_r = cv2.flip(eye_img_r, flipCode=1)

        eye_input_l = eye_img_l.copy().reshape(
            (1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.
        eye_input_r = eye_img_r.copy().reshape(
            (1, IMG_SIZE[1], IMG_SIZE[0], 1)).astype(np.float32) / 255.

        pred_l = model.predict(eye_input_l)
        pred_r = model.predict(eye_input_r)
(rightStart, rightEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]  # Gets right eye indices
vs = VideoStream(0).start()
time.sleep(1.0)  # pause for feed

while True:
    frame = vs.read()  # Get frame from video
    # frame = imutils.resize(frame, width=450)  # Resize the frame
    # frame = cv2.resize(frame,(frame.height,450))
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Convert to grayscale
    # Detect faces in image
    faces = detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),
                                      flags=cv2.CASCADE_SCALE_IMAGE)
    for (x, y, w, h) in faces:
        face = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))   # Creates a rectangle for the face
        shape = predictor(gray, face)  # Get facial coordinates
        shape = face_utils.shape_to_np(shape)  # Convert facial cartesian coordinates to a numpy array
        # extract the left and right eye coordinates, then use the
        # coordinates to compute the eye aspect ratio for both eyes
        leftEye = shape[leftStart:leftEnd]  # Slice of coordinates corresponding to left eye
        rightEye = shape[rightStart:rightEnd]  # Slice of coordinates corresponding to right eye
        leftAspectRatio = aspectRatio(leftEye)  # Get aspect ratio of left eye
        rightAspectRatio = aspectRatio(rightEye)  # Get aspect ratio of left eye
        avgAspectRatio = (leftAspectRatio + rightAspectRatio) / 2.0  # Average of both eyes)
        cv2.drawContours(frame, [cv2.convexHull(leftEye)], -1, (0, 255, 0), 1)  # Drawing contour around the left eye
        cv2.drawContours(frame, [cv2.convexHull(rightEye)], -1, (0, 255, 0), 1)  # Drawing contour around the right eye
        if avgAspectRatio < ASPECT_RATIO_THRESHOLD:  # If Aspect ratio dips below threshold
            counter += 1
            print(counter)
            if counter >= MAX_CONSEC_FRAMES:  # If eyes are closed for long enough
                # if the alarm is not on, turn it on
                #if not is_alarm_on:
def cvloop(run_event):
    global panelA
    global SPRITES
    global image_path
    i = 0
    # video_capture = cv2.VideoCapture(0) #read from webcam
    # video_capture = cv2.VideoCapture('http://192.168.43.247:4747/mjpegfeed') #read from webcam
    video_capture = cv2.VideoCapture(
        'http://192.168.0.100:4747/mjpegfeed')  #read from webcam
    (x, y, w, h) = (0, 0, 10, 10)  #whatever initial values

    #Filters path
    detector = dlib.get_frontal_face_detector()

    model = "C:\\Users\\amitt\\Desktop\\Folder\\E-Dressing-master\\Project\\data\\shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(
        model
    )  # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2

    while run_event.is_set():
        ret, image = video_capture.read()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = detector(gray, 0)

        for face in faces:
            (x, y, w, h) = (face.left(), face.top(), face.width(),
                            face.height())

            shape = predictor(gray, face)
            shape = face_utils.shape_to_np(shape)
            incl = calculate_inclination(
                shape[17], shape[26])  #inclination based on eyebrows

            # condition to see if mouth is open
            is_mouth_open = (shape[66][1] - shape[62][1]
                             ) >= 10  #y coordiantes of landmark points of lips

            if SPRITES[0]:

                apply_sprite(image, image_path, w, x, y + 40, incl, ontop=True)

            if SPRITES[1]:
                (x1, y1, w1, h1) = get_face_boundbox(shape, 6)
                apply_sprite(image, image_path, w1, x1, y1 + 275, incl)

            if SPRITES[3]:
                (x3, y3, _, h3) = get_face_boundbox(shape, 1)
                apply_sprite(image, image_path, w, x, y3, incl, ontop=False)

            (x0, y0, w0, h0) = get_face_boundbox(shape, 6)  #bound box of mouth

            if SPRITES[4]:
                (x3, y3, w3, h3) = get_face_boundbox(shape, 7)  # ears
                apply_sprite(image, image_path, w3, x3 - 20, y3 + 25, incl)
                (x3, y3, w3, h3) = get_face_boundbox(shape, 8)  # ears
                apply_sprite(image, image_path, w3, x3 + 20, y3 + 25, incl)

            if SPRITES[5]:
                findRects = []
                upperPath = "C:\\Users\\amitt\\Desktop\\Folder\\E-Dressing-master\\Project\\data\\haarcascade_upperbody.xml"
                imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                upperCascade = cv2.CascadeClassifier(upperPath)
                upperRect = upperCascade.detectMultiScale(imageGray,
                                                          scaleFactor=1.1,
                                                          minNeighbors=1,
                                                          minSize=(1, 1))

                if len(upperRect) > 0:
                    findRects.append(upperRect[0])

                for obj in findRects:
                    # img = cv2.rectangle(img, (obj[0],obj[1]), (obj[0]+obj[2], obj[1]+obj[3]), (0, 255, 0), 2)
                    sprite = cv2.imread(image_path, -1)
                    image = draw_sprite(image, sprite, obj[0], obj[1])

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(image)
        image = ImageTk.PhotoImage(image)
        panelA.configure(image=image)
        panelA.image = image

    video_capture.release()
Beispiel #38
0
def rotateImage(imageObj, correctionAngle, nosePoint):
    rotationMatrix = cv2.getRotationMatrix2D(nosePoint, correctionAngle, 1.0)
    return cv2.warpAffine(imageObj, rotationMatrix, (imageObj.shape[1], imageObj.shape[0]), flags=cv2.INTER_LINEAR)


for person in os.listdir("Users"):
    recognizer = cv2.face.createLBPHFaceRecognizer()
    for faceImageFile in os.listdir("Users/" + person + "/originals"):
        if faceImageFile.endswith(".png"):
            gray = cv2.imread("Users/" + person + "/originals/" + faceImageFile,
                          cv2.CV_8UC1)  # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            rects = detector(gray, 1)

            for (j, rect) in enumerate(rects):
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                angle = calculateFaceTilt(Point(shape[39]), Point(shape[42]))

                gray = rotateImage(gray, angle, tuple(shape[33]))
                # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                rects = detector(gray, 1)

                # loop over the face detections
                for (k, rect) in enumerate(rects):
                    shape = predictor(gray, rect)
                    shape = face_utils.shape_to_np(shape)
                    eye = Point(shape[37])
                    eyebrow = Point(shape[19])
                    left = Point(min(shape, key=itemgetter(0)))
                    top = Point(min(shape, key=itemgetter(1)))
Beispiel #39
0
    def get_frame(self):
        pickle_in = open("New_testing_dlib_normalized.pickle", "rb")
        model = pickle.load(pickle_in)
        while(self.video.isOpened()) :
            self.frame += 1
            ret, frame = self.video.read()
            if ret is True:
                if self.frame % 5 == 0:
                    # print(str(self.frame)+" frame")
                    #ret, frame = self.video.read()
                    # if ret == True:
                    self.frame+=1
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    file = open("Expressions.csv", "w")
                    face = detector(gray,0)
                    # print("lala")
                    # print(type(face))
                    # print(face)

                    # print("Number of Faces {}".format(len(face)))
                    my_list = []
                    count_interested= 0
                    count_bore  =0
                    count_neutral =0

                    for (J, rect) in enumerate(face):
                        shap = predictor(frame, rect)
                        xlist = []
                        ylist = []
                        shap = face_utils.shape_to_np(shap)
                        Centre = (shap[30])
                        centre_x = Centre[0]
                        centre_y = Centre[1]
                        shap = shap[18:68]
                        for i in shap:
                            xlist.append(i[0])
                            ylist.append(i[1])
                        forx = []
                        fory = []
                        for x in xlist:
                            forx.append((x - centre_x) ** 2)
                        for y in ylist:
                            fory.append((y - centre_y) ** 2)
                        listsum = [sum(x) for x in zip(forx, fory)]
                        features = []
                        for i in listsum:
                            k = mpmath.sqrt(float(i))
                            features.append(float(k))
                        maxx = (max(features))
                        final = []
                        for i in features:
                            if (i == 0.0):
                                continue
                            F = i / maxx
                            final.append(F)
                        numpy_array = np.array(final)


                        #Prdiction by the MOdel

                        prediction = model.predict([numpy_array])[0]

                        (x, y, w, h) = face_utils.rect_to_bb(rect)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
                        # display the image and the prediction
                        cv2.putText(frame, prediction, (x-7, y-6), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                    (0, 255, 0), 2)
                        cv2.circle(frame, (centre_x, centre_y), 1, (0, 0, 0), 10)
                        for (x, y) in shap:
                            cv2.circle(frame, (x, y), 1, (0, 0, 255), 2)
                            cv2.line(frame,(centre_x,centre_y),(x,y),(0,255,1) )

                        if prediction == "INTERESTED":
                            count_interested +=1
                            # self.Expression.append(1)

                        elif prediction == "BORE":
                            count_bore +=1
                        else:
                            count_neutral +=1

                        ret, jpeg = cv2.imencode('.jpg', frame)
                        my_list.append(jpeg.tobytes)

                    if (count_interested > count_bore) and (count_interested > count_neutral):
                        self.Expression.append(1)

                    elif (count_bore > count_interested) and (count_bore > count_neutral):
                        self.Expression.append(-1)
                    else:
                        self.Expression.append(0)

                    with file:
                     writter = csv.writer(file)
                     writter.writerow(self.Expression)


                    #Here wil be the code for best expression...

                    # print(self.int_count)
                    if(count_interested > self.int_count):
                        print("BEST frame has been caught")

                        file_storage = best_int_path + "\\" + "BEST_PIC" + '.jpg'
                        cv2.imwrite(file_storage, gray)
                        # global self.int_count
                        print(self.int_count)
                        self.int_count = count_interested
                        print("best frame has been save to record")

                    #sending detected faces list to app.py file on to be displayed the browser
                    return (my_list)
            else:
                break
Beispiel #40
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """

        # Get current image frame from the camera
        frame = self.camera.get_frame()
        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run()
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        global smileneighbour, mqLoop, smilecount, eyetot
        # if self.bpm_plot:
        # self.make_bpm_plot()
        x = str(datetime.datetime.now())

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = self.detector(gray, 0)
        #####Record self-report#####
        key = cv2.waitKey(1) & 0xFF
        if key == ord("."):
            c.write(str(md.datestr2num(x)) + " " + str(500) + "\n")
            ############################

        for rect in rects:
            shape = self.predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            shape2 = shape.copy()
            newline = ""
            for (w,z) in shape2:
                cv2.circle(frame, (w, z), 1, (0, 0, 255), -1)
                new = str((w,z)) + " "
                newline += new

            #######################
            ########Raw Data#######
            #######################

            raw.write(str(md.datestr2num(x)) + newline + "\n")

            #######################
            ######New Things#######

            leftBrow = shape[21]
            rightBrow = shape[22]
            distance = dist.euclidean(leftBrow, rightBrow)

            upper1 = shape[37]
            lower1 = shape[41]
            right1 = shape[38]
            upper2 = shape[43]
            lower2 = shape[47]
            right2 = shape[44]
            average = (dist.euclidean(upper1, lower1) * dist.euclidean(upper1, right1) + dist.euclidean(upper2,
                                                                                                        lower2) * dist.euclidean(
                upper2, right2))

            leftmouth = shape[48]
            rightmouth = shape[54]

            upperlip = shape[62]
            lowerlip = shape[66]

            ##########End##########
            #######################
            #Time, Frown, Eye size, Mouth width, Mouth heigth, Brow Raise, Length of face, and Width of face
            all.write(str(md.datestr2num(x)) + " " + str(distance) + " " + str(average) + " " + str(
                dist.euclidean(leftmouth, rightmouth)) + " " + str(dist.euclidean(upperlip, lowerlip)) + " " + str(
                (dist.euclidean(shape[24], shape[44]) + dist.euclidean(shape[19], shape[37]))) +  " " + str(dist.euclidean(shape[27], shape[8])) + " " + str(dist.euclidean(shape[2], shape[14])) + "\n")
            #######################
            ######Single File######
        cv2.imshow("Frame", frame)

        if mqLoop >= 1:

            sm.write(str(md.datestr2num(x)) + " " + str(smileneighbour) + "\n")
            e.write(str(md.datestr2num(x)) + " " + str(eyetot) + "\n")
            hr.write(str(md.datestr2num(x)) + " " + str(self.processor.show_bpm_text.bpm) + "\n")
            smileneighbour += 2 * eyetot
            smileneighbour /= 100
            if (self.processor.show_bpm_text.bpm) > dhr:
                # print (self.processor.fft.samples[-1]/2, self.processor.fft.samples[-1]-dhr/2)
                # overbeat = (self.processor.fft.samples[-1]-dhr)*(self.processor.fft.samples[-1]-dhr)
                smileneighbour += (self.processor.show_bpm_text.bpm - dhr)

            f.write(str(md.datestr2num(x)) + " " + str(smileneighbour) + "\n")
            mqLoop = 0
        else:
            mqLoop += 0.9
        img = cv.QueryFrame(capture)
        smileneighbour = 0
        eyetot = 0
        if img:
            image = DetectRedEyes(img, faceCascade, smileCascade, eyeCascade)
            cv.ShowImage("camera", image)
        # handle any key presses
        self.key_handler()
Beispiel #41
0
    def sendFaceImages(self):
        self.statusLabel.set_text("Obrada uzoraka")
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor("ldm.dat")

        for i in range(0, 7):
            gray = cv2.imread("Camera/Resources/" + str(i) + '.png', cv2.CV_8UC1)
            #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            rects = detector(gray, 1)

            # loop over the face detections
            for (j, rect) in enumerate(rects):
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                angle = self.calculateFaceTilt(Point(shape[39]), Point(shape[42]))

                gray = self.rotateImage(gray, angle, tuple(shape[33]))
                #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                rects = detector(gray, 1)

                # loop over the face detections
                for (k, rect) in enumerate(rects):

                    shape = predictor(gray, rect)
                    shape = face_utils.shape_to_np(shape)
                    eye = Point(shape[37])
                    eyebrow = Point(shape[19])
                    left = Point(min(shape, key=itemgetter(0)))
                    top = Point(min(shape, key=itemgetter(1)))
                    right = Point(max(shape, key=itemgetter(0)))
                    bottom = Point(max(shape, key=itemgetter(1)))

                    gray = gray[int(top.y - eye.distance(eyebrow) / 2):int(top.y + top.distance(bottom)),
                            int(left.x):int(left.x + left.distance(right))]

                    #ujednacavanje histograma
                    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
                    gray = clahe.apply(gray)
                    #gray = cv2.bilateralFilter(gray, 9, 75, 75)

                    ratio = 300.0 / gray.shape[1]
                    dimensions = (300, int(gray.shape[0] * ratio))

                    gray = cv2.resize(gray, dimensions, interpolation=cv2.INTER_AREA)
                    cv2.imwrite("Camera/Resources/" + str(i) + '.png', gray)

        for i in range(0, 7):
            self.statusLabel.set_text("Slanje uzoraka")
            client.send("ftp:" + str(i))
            self.waitForResponse()
            imageFile = open("Camera/Resources/" + str(i) + '.png', "rb")
            offset = 0
            while True:
                sent = sendfile(client.fileno(), imageFile.fileno(), offset, 4096)
                if sent == 0:
                    client.send("EOF")
                    break  # EOF
                offset += sent
            self.statusLabel.set_text("Potvrdite PIN")
            self.btnConfirm.set_sensitive(True)
            self.waitForResponse()