Esempio n. 1
0
def Eye_tracking(yolo):
    import cv2
    cam = cv2.VideoCapture(0)
    cam.set(3, 1000)
    cam.set(4, 600)
    gaze = GazeTracking()
    while True:
        _, frame = cam.read()
        image = Image.fromarray(frame)
        res, Faces = yolo.detect_image_with_coord(image)

        output = np.array(res)

        for face in Faces:
            x1, y1, x2, y2 = face

            #Face_img = frame[y1:y2,(x1-10):(x2+10)]
            gaze.refresh(output)

            Ox1, Oy1, Ox2, Oy2 = gaze.annotated_frame(x1, y1)
            color = (0, 0, 255)
            cv2.line(output, (Ox1 - 5, Oy1), (Ox1 + 5, Oy1), color)
            cv2.line(output, (Ox1, Oy1 - 5), (Ox1, Oy1 + 5), color)
            cv2.line(output, (Ox2 - 5, Oy2), (Ox2 + 5, Oy2), color)
            cv2.line(output, (Ox2, Oy2 - 5), (Ox2, Oy2 + 5), color)

            # 60 130 165
            text = ""
            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right"
            elif gaze.is_left():
                text = "Looking left"
            elif gaze.is_center():
                text = "Looking center"

            left_pupil = gaze.pupil_left_relative_coords()
            right_pupil = gaze.pupil_right_relative_coords()
            cv2.putText(output, text, (x1, y1 + 20), cv2.FONT_HERSHEY_DUPLEX,
                        0.8, (147, 58, 31), 2)
            cv2.putText(output, "Left pupil:  " + str(left_pupil),
                        (x1, y1 + 50), cv2.FONT_HERSHEY_DUPLEX, 0.4,
                        (147, 58, 31), 1)
            cv2.putText(output, "Right pupil: " + str(right_pupil),
                        (x1, y1 + 65), cv2.FONT_HERSHEY_DUPLEX, 0.4,
                        (147, 58, 31), 1)

        cv2.imshow("Camera", output)
        if cv2.waitKey(5) == 27:
            break

    yolo.close_session()
Esempio n. 2
0
class GazeExtractor:
    def __init__(self):
        self.cv_bridge = CvBridge()
        self.gaze = GazeTracking()
        self.publish_annotated_frame = rospy.get_param(
            "~publish_annotated_frame", True)
        if self.publish_annotated_frame:
            self.annotated_frame_publisher = rospy.Publisher(
                'image_annotated_raw', Image, queue_size=10)
        self.gaze_publisher = rospy.Publisher('gaze_state',
                                              GazeState,
                                              queue_size=10)

    def extract_from_image(self, img_msg):

        #convert image to opencv type
        try:
            cv_image = self.cv_bridge.imgmsg_to_cv2(img_msg, "bgr8")
        except CvBridgeError as e:
            print(e)
            return

        #run gaze detection
        self.gaze.refresh(cv_image)

        #if desired, publish annotated frame
        if self.publish_annotated_frame:
            annotated_image_msg = self.cv_bridge.cv2_to_imgmsg(
                self.gaze.annotated_frame(), "bgr8")
            self.annotated_frame_publisher.publish(annotated_image_msg)

        #if no pupils detected, stop here
        if not self.gaze.pupils_located:
            return

        #pack gaze tracking result into a GazeState message and publish
        result_msg = GazeState()
        result_msg.header = img_msg.header
        result_msg.is_left = self.gaze.is_left()
        result_msg.is_right = self.gaze.is_right()
        result_msg.is_center = self.gaze.is_center()
        result_msg.is_blinking = self.gaze.is_blinking()
        result_msg.pupil_left_coords.x = self.gaze.pupil_left_coords()[0]
        result_msg.pupil_left_coords.y = self.gaze.pupil_left_coords()[1]
        result_msg.pupil_right_coords.x = self.gaze.pupil_right_coords()[0]
        result_msg.pupil_right_coords.y = self.gaze.pupil_right_coords()[1]
        result_msg.horizontal_ratio = self.gaze.horizontal_ratio()
        result_msg.vertical_ratio = self.gaze.vertical_ratio()

        self.gaze_publisher.publish(result_msg)
def eye_tracking(image_path):
    gaze = GazeTracking()
    frame = cv2.imread(image_path)
    gaze.refresh(frame)

    frame = gaze.annotated_frame()

    if gaze.is_right():
        value = 0.5
    elif gaze.is_left():
        value = 0.5
    elif gaze.is_center():
        value = 1
    else:
        value = 0
    return value
Esempio n. 4
0
def run_gazetracker(seconds):
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    current_time = time.time()
    gaze_matrix = []
    while time.time() - current_time <= 60:
        # We get a new frame from the webcam
        success, frame = webcam.read()
        if not success:
            print('NOT SUCCESSFUL')
            break
        else:

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right"
            elif gaze.is_left():
                text = "Looking left"
            elif gaze.is_center():
                text = "Looking center"

            cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (147, 58, 31), 2)

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()
            cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

            cv2.imshow("Demo", frame)
            gaze_matrix.append([time.time(), left_pupil, right_pupil])

            if cv2.waitKey(1) == 27:
                break
            print(left_pupil)
            print(right_pupil)
            print(gaze_matrix)
Esempio n. 5
0
    def get_frame(self):
        '''success, image = self.video.read()
        image=cv2.resize(image,None,fx=ds_factor,fy=ds_factor,interpolation=cv2.INTER_AREA)
        gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        face_rects=face_cascade.detectMultiScale(gray,1.3,5)
        for (x,y,w,h) in face_rects:
        	cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
        	break
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()'''
        gaze = GazeTracking()
        webcam = cv2.VideoCapture(0)

        while True:
            # We get a new frame from the webcam
            _, frame = webcam.read()

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right,please look at the screen"
            elif gaze.is_left():
                text = "Looking left,please look at the screen"
            elif gaze.is_center():
                text = "Looking center"

            cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (147, 58, 31), 2)

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()
            cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            '''cv2.imshow("Demo", frame)
            if cv2.waitKey(1) == 27:
                break'''
            #comment lower part and decomment upper part for unique tab
            ret, jpeg = cv2.imencode('.jpg', frame)
            return jpeg.tobytes()
Esempio n. 6
0
def getEyeResults():
    gaze = GazeTracking()
    frame = cv2.imread("./images/analysis/proctor.png")
    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Blinking"
    elif gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_center():
        text = "Looking center"
    print(text)
    return text
Esempio n. 7
0
def GazeYourEye(video, student):
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(video)
    result = []
    while True:
        value, frame = webcam.read()
        if value == False: break
        gaze.refresh(frame)

        frame = gaze.annotated_frame()

        if gaze.is_blinking():
            result.append('B')
        elif gaze.is_right():
            result.append('R')
        elif gaze.is_left():
            result.append('L')
        elif gaze.is_center():
            result.append('C')

        if cv2.waitKey(1) == 27:
            break
    whole = len(result)

    ret = [
        round(result.count('C') / whole * 100, 2),
        round(result.count('B') / whole * 100, 2),
        round(result.count('L') / whole * 100, 2),
        round(result.count('R') / whole * 100, 2)
    ]

    student = Students.query.filter(
        Students.student_number == student.student_number)
    student.update({
        'eye_ratio_center': ret[0],
        'eye_ratio_blink': ret[1],
        'eye_ratio_left': ret[2],
        'eye_ratio_right': ret[3]
    })

    data = np.array([[ret[0], ret[1], ret[2], ret[3]]])
    [result] = load_model.predict(data)
    student.update({'eye_result': bool(result)})
    db.session.commit()
Esempio n. 8
0
def startCam():
    import cv2
    from gaze_tracking import GazeTracking
    import time

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    startTime = time.time()
    totalFrames = 0
    framesDistracted = 0
    framesFocused = 0

    while True:
        _, frame = webcam.read()
        totalFrames += 1
        gaze.refresh(frame)
        frame = gaze.annotated_frame()

        if gaze.is_blinking():
            framesDistracted += 1
        elif gaze.is_right():
            framesDistracted += 1
        elif gaze.is_left():
            framesDistracted += 1
        elif gaze.is_center():
            framesFocused += 1
        else:
            framesDistracted += 1

        cv2.imshow("Camera", frame)

        if cv2.waitKey(1) == ord('q'):
            break

    webcam.release()
    cv2.destroyAllWindows()

    totalTime = truncate(time.time() - startTime, 2)
    percentFocused = truncate((framesFocused / totalFrames) * 100, 2)
    percentDistracted = truncate((framesDistracted / totalFrames) * 100, 2)

    return totalTime, percentFocused, percentDistracted
Esempio n. 9
0
    def eyeTrack(self):

        gaze = GazeTracking()
        blinkCount = 0

        while True:

            # Grab a single frame of video
            ret, frame = self.video_capture.read()

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Goz Kirpildi"
                blinkCount += 1
            elif gaze.is_right():
                text = "Saga Bakildi"
            elif gaze.is_left():
                text = "Sola Bakildi"
            elif gaze.is_center():
                text = "Merkeze Bakildi"

            cv2.putText(frame, text, (0, 30), cv2.FONT_HERSHEY_DUPLEX, 1,
                        (147, 58, 31), 2)

            # Display the resulting image
            cv2.imshow('Video', frame)
            print("Goz Kırpma: " + str(blinkCount))

            if blinkCount >= 3:
                return 1

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
def main(args):
    filename = args["input_file"]
    faceCascade = cv2.CascadeClassifier(
        'models/haarcascade_frontalface_default.xml')
    model = load_model('models/facenet_keras.h5')

    if filename is None:
        isVideo = False
        webcam = cv2.VideoCapture(0)
        webcam.set(3, args['wh'][0])
        webcam.set(4, args['wh'][1])
    else:
        isVideo = True
        webcam = cv2.VideoCapture(filename)
        fps = webcam.get(cv2.webcam_PROP_FPS)
        width = int(webcam.get(cv2.webcam_PROP_FRAME_WIDTH))
        height = int(webcam.get(cv2.webcam_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        name, ext = osp.splitext(filename)
        out = cv2.VideoWriter(args["output_file"], fourcc, fps,
                              (width, height))

    # Variable Setting
    hpd = headpose.HeadposeDetection(
        args["landmark_type"], args["landmark_predictor"])  #import headpose
    gaze = GazeTracking()  # import gazetracking
    yellocard = 0
    redcard = 0
    tempval = 0
    timee = int(
        input("시험 시간을 입력하세요(Minute): "))  # Input time for limit test time
    max_time_end = time.time() + (60 * timee)

    # Infinity Loop for Detect Cheating for Online test
    while (webcam.isOpened()):

        ret, frame = webcam.read()  # Read wabcam
        gaze.refresh(frame)
        frame = gaze.annotated_frame()  # Mark pupil for frame

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=3,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)  # face structure

        # Get point from pupil
        if gaze.is_blinking():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_right():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_left():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_center():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        else:
            yellocard = yellocard + 2

        # Get redcard optiom
        if yellocard > 50:
            yellocard = 0
            tempval = tempval + 1
            redcard = redcard + 1

        # if get 1redcard, then give Aural and Text Warning(Loop)
        if tempval == 1:
            text1 = "WARNING"
            cv2.putText(frame, text1, (10, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (0, 0, 255), 2)
            my_thread = threading.Thread(target=Sound)
            my_thread.start()
            tempval = 0

    # if you are not GPU environment, Do not run this code by # --------------
    # if get 2redcard, then give Picture Warning(Once)
        if redcard == 2:
            warn_img = cv2.imread("Warning/warning.png", cv2.IMREAD_COLOR)
            cv2.imshow('Warning', warn_img)
            cv2.waitKey(1)
            redcard = 2.1
    # -----------------------------------------------------------------------
    # Get log consistently
        print("<< *의심수준:", yellocard, " || ", "*경고횟수:", redcard, " >>")

        #Detect head position
        if isVideo:
            frame, angles = hpd.process_image(frame)
            if frame is None:
                break
            else:
                out.write(frame)
        else:
            frame, angles = hpd.process_image(frame)

            if angles is None:
                pass
            else:  #angles = [x,y,z] , get point from headposition
                if angles[0] > 15 or angles[0] < -15 or angles[
                        1] > 15 or angles[1] < -15 or angles[2] > 15 or angles[
                            2] < -15:
                    yellocard = yellocard + 2
                else:
                    yellocard = yellocard - 1
                    yellocard = notnegative(yellocard)

        yellocard = yellocard + hpd.yello(frame)
        if yellocard < 0:
            yellocard = notnegative(yellocard)

    # Draw a rectangle around the faces and predict the face name
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                          2)  # take the face pixels from the frame
            crop_frame = frame[y:y + h, x:x +
                               w]  # turn the face pixels back into an image
            new_crop = Image.fromarray(
                crop_frame
            )  # resize the image to meet the size requirment of facenet
            new_crop = new_crop.resize(
                (160, 160))  # turn the image back into a tensor
            crop_frame = np.asarray(
                new_crop)  # get the face embedding using the face net model
            face_embed = get_embedding(
                model, crop_frame
            )  # it is a 1d array need to reshape it as a 2d tensor for svm
            face_embed = face_embed.reshape(
                -1, face_embed.shape[0])  # predict using our SVM model
            pred = svm.predict(face_embed)  # get the prediction probabiltiy
            pred_prob = svm.predict_proba(
                face_embed)  # pred_prob has probabilities of each class

            # get name
            class_index = pred[0]
            class_probability = pred_prob[0, class_index] * 100
            predict_names = out_encoder.inverse_transform(pred)
            text = 'Predicted: %s (%.3f%%)' % (predict_names[0],
                                               class_probability)

            #add the name to frame but only if the pred is above a certain threshold
            if (class_probability > 70):
                cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                            (0, 0, 255), 2)

        # Display the resulting frame
            cv2.imshow('POCAS', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            print("관리자에 의해 시험이 강제 종료 되었습니다")
            PrintResult(yellocard, redcard)
            Fail(timee, redcard)
            break
        elif time.time() > max_time_end:
            print(timee, "분의 시험이 종료되었습니다.")
            PrintResult(yellocard, redcard)
            Fail(timee, redcard)
            break

    # When everything done, release the webcam
    webcam.release()
    if isVideo:
        out.release()
        cv2.destroyAllWindows()
Esempio n. 11
0
def main(args):
    filename = args["input_file"]
    #<<<<<<< HEAD
    faceCascade = cv2.CascadeClassifier(
        'C:/Capstone/models/haarcascade_frontalface_default.xml')
    model = load_model(
        'C:\Capstone\poscas\POSCO_AIProject_OnlineTestCheatingDetectionAiSystem-master\models/facenet_keras.h5'
    )
    #C:\Capstone\poscas\POSCO_AIProject_OnlineTestCheatingDetectionAiSystem-master\models
    #=======
    faceCascade = cv2.CascadeClassifier(
        'models/haarcascade_frontalface_default.xml')
    model = load_model('models/facenet_keras.h5')
    #>>>>>>> 757e3559f2acad057224670a45fca1fc2d17309e

    if filename is None:
        isVideo = False
        #url='http://192.168.0.06:8091/?action=stream'
        #webcam = cv2.VideoCapture(url)

        webcam = cv2.VideoCapture(0)  # 캠으로 이미지 받아오는 코드
        webcam.set(3, args['wh'][0])
        webcam.set(4, args['wh'][1])
    else:
        isVideo = True
        webcam = cv2.VideoCapture(filename)
        fps = webcam.get(cv2.webcam_PROP_FPS)
        width = int(webcam.get(cv2.webcam_PROP_FRAME_WIDTH))
        height = int(webcam.get(cv2.webcam_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        name, ext = osp.splitext(filename)
        out = cv2.VideoWriter(args["output_file"], fourcc, fps,
                              (width, height))

    ##############################################################################################

    # 이름 제대로 들어가는지 테스트 -> 이름 제대로 들어감
    # print('main 문 ')
    # print(name1)
    # print(time1)

    # ##############################
    UserName = name1
    f.write(UserName + "   ")
    checktime = 1
    start_check = time.time() + (10 * checktime)
    checktime_end = time.time() + (60 * checktime)  #1분 동안 체크 (60)
    while (webcam.isOpened()
           ):  # Infinity Loop for Detect Cheating for Online test

        ret, frame = webcam.read()  # Read wabcam
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=3,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)  # face structure
        for (x, y, w, h) in faces:
            # take the face pixels from the frame
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            # turn the face pixels back into an image
            crop_frame = frame[y:y + h, x:x + w]
            # resize the image to meet the size requirment of facenet
            new_crop = Image.fromarray(crop_frame)
            # turn the image back into a tensor
            new_crop = new_crop.resize((160, 160))
            # get the face embedding using the face net model
            crop_frame = np.asarray(new_crop)
            # it is a 1d array need to reshape it as a 2d tensor for svm
            face_embed = get_embedding(model, crop_frame)
            # predict using our SVM model
            face_embed = face_embed.reshape(-1, face_embed.shape[0])
            pred = svm.predict(face_embed)  # get the prediction probabiltiy
            # pred_prob has probabilities of each class
            pred_prob = svm.predict_proba(face_embed)

            # get name
            class_index = pred[0]
            class_probability = pred_prob[0, class_index] * 100
            predict_names = out_encoder.inverse_transform(pred)
            text = '%s (%.3f%%)' % (predict_names[0], class_probability)
            cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

        cv2.imshow('capstone', frame)  #-> 정상 출력 되는 코드
        #label.after(20,frame) #-> window에 비디오 창이 열리게 끔 하고 싶었으나 안됨

        cv2.waitKey(1)

        if (time.time() > start_check and predict_names[0] == UserName
                and class_probability > 80):

            #print("얼굴이 일치합니다. 시험을 시작하겠습니다.")
            messagebox.showinfo("얼굴 확인", "얼굴이 일치합니다. 시험을 시작합니다")
            break

        if time.time() > checktime_end:
            #print("얼굴이 일치하지 않아 시험에 응시하지 못합니다.")
            messagebox.showerror("얼굴 확인", "얼굴이 일치하지 않아 시험에 응시하지 못합니다.")
            f.write("    얼굴 불일치" + '\n')

            TxtOpen()  # 작동 함

            quit()
            window.destroy()  # UI 화면 닫기
            break

    ##################################################################################

    #########################################################################
    # Variable Setting
    hpd = headpose.HeadposeDetection(
        args["landmark_type"], args["landmark_predictor"])  # import headpose
    gaze = GazeTracking()  # import gazetracking
    yellocard = 0
    redcard = 0
    tempval = 0

    # Input time for limit test time
    timee = int(time1)
    #timee = int(input("시험 시간을 입력하세요(Minute): "))
    max_time_end = time.time() + (60 * timee)

    check_angle = time.time() + (10 * checktime)
    while (webcam.isOpened()
           ):  # Infinity Loop for Detect Cheating for Online test
        ret, frame = webcam.read()  # Read wabcam
        gaze.refresh(frame)
        frame = gaze.annotated_frame()  # Mark pupil for frame

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=3,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)  # face structure

        # Get point from pupil
        if gaze.is_blinking():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_right():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_left():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_center():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        else:
            yellocard = yellocard + 2

        # Get redcard optiom
        if yellocard > 50:
            yellocard = 0
            tempval = tempval + 1
            redcard = redcard + 1

        # if get 1redcard, then give Aural and Text Warning(Loop)
        if tempval == 1:
            text1 = "WARNING"
            cv2.putText(frame, text1, (10, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (0, 0, 255), 2)
            my_thread = threading.Thread(target=Sound)
            my_thread.start()
            tempval = 0

    # if you are not GPU environment, Do not run this code by # --------------
    # if get 2redcard, then give Picture Warning(Once)
        if redcard == 2:
            warn_img = cv2.imread("Warning/warning.png", cv2.IMREAD_COLOR)
            cv2.imshow('Warning', warn_img)
            cv2.waitKey(1)
            redcard = 2.1
    # -----------------------------s------------------------------------------
    # Get log consistently
        print("<< *의심수준:", yellocard, " || ", "*경고횟수:", redcard, " >>")
        #cv2.destroyWindow('Warning')
        # Detect head position
        if isVideo:
            frame, angles = hpd.process_image(frame)
            if frame is None:
                break
            else:
                out.write(frame)
        else:
            frame, angles = hpd.process_image(frame)
            if angles is None:
                #print("경고! 응시자가 사라졌습니다")
                #messagebox.showwarning("경고","경고! 응시자가 사라졌습니다")
                if time.time() > check_angle:
                    redcard = timee / 3 + redcard
                    #print("지속적으로 카메라 앵글 밖으로 나갔으므로, 시험을 강제종료합니다.")
                    #messagebox.showinfo("확인","얼굴이 일치합니다. 시험을 시작합니다")
                    messagebox.showerror(
                        "경고", "지속적으로 카메라 앵글 밖으로 나갔으므로, 시험을 강제종료합니다.")

                    PrintResult(yellocard, redcard)
                    Fail(timee, redcard)

                    TxtOpen()

                    window.destroy()
                    quit()

                else:
                    pass

            else:  # angles = [x,y,z] , get point from headposition
                if angles[0] > 15 or angles[0] < -15 or angles[
                        1] > 15 or angles[1] < -15 or angles[2] > 15 or angles[
                            2] < -15:
                    yellocard = yellocard + 2
                else:
                    yellocard = yellocard - 1
                    yellocard = notnegative(yellocard)

        yellocard = yellocard + hpd.yello(frame)
        if yellocard < 0:
            yellocard = notnegative(yellocard)

            # Display the resulting frame
            cv2.imshow('capstone', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #print("관리자에 의해 시험이 강제 종료 되었습니다")

            f.write("경고 횟수 : %s" % (redcard))
            f.write("   -> 강제 종료 \n")

            messagebox.showerror("경고", "관리자에 의해 시험이 강제 종료 되었습니다")

            PrintResult(yellocard, redcard)
            Fail(timee, redcard)

            # messagebox.showinfo("결과출력","결과가 출력됩니다.")
            # #messagebox("결과출력","결과가 출력됩니다.")
            # data = open('C:/Capstone/result_data.txt', 'r')
            # contents = data.read()
            # messagebox.showinfo("결과 출력",contents)

            TxtOpen()

            window.destroy()
            #  f.close()
            break
        elif time.time() > max_time_end:
            #print(timee, "분의 시험이 종료되었습니다.")
            f.write("    -> 정상 종료 \n")

            PrintResult(yellocard, redcard)
            Fail(timee, redcard)

            messagebox.showinfo("시험 종료", "시험이 종료되었습니다.")

            #messagebox.showinfo("결과출력","결과가 출력됩니다.")
            # data = open('C:/Capstone/result_data.txt', 'r')
            # contents = data.read()
            # messagebox.showinfo("결과 출력",contents)

            # f.close()

            TxtOpen()

            window.destroy()
            break

    # When everything done, release the webcam
    webcam.release()
    cv2.destroyAllWindows()

    #TxtOpen() -> 새로운 UI 창이 뜸.. 여기에 넣으면 안될 듯

    quit()
    window.destroy()
    if isVideo:
        out.release()
        cv2.destroyAllWindows()
Esempio n. 12
0
def eyeGaze():
    gaze = GazeTracking()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-i",
        help=
        'Path to input image or video file. Skip this argument to capture frames from a camera.'
    )

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        "pretrained_model/shape_predictor_68_face_landmarks.dat")

    args = parser.parse_args()
    cap = cv2.VideoCapture(args.i if args.i else 0)

    while cv2.waitKey(1) < 0:
        t = time.time()
        ret, frame = cap.read()
        if not ret:
            cv2.waitKey()
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = detector(gray)
        if faces is not None:
            i = np.zeros(shape=(frame.shape), dtype=np.uint8)
        for face in faces:
            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right"
            elif gaze.is_left():
                text = "Looking left"
            elif gaze.is_center():
                text = "Looking center"
            left = face.left()
            top = face.top()
            right = face.right()
            bottom = face.bottom()
            cv2.rectangle(frame, (left, top), (right, bottom), (147, 58, 31),
                          2)
            cv2.rectangle(frame, (left, bottom - 10), (right, bottom),
                          (147, 58, 31), cv2.FILLED)
            cv2.putText(frame, text, (left + 2, bottom - 2),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
        #cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1, (147, 58, 31), 2)
        '''left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.7, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.7, (147, 58, 31), 1)'''

        cv2.imshow("Demo", frame)

        print("Time : {:.3f}".format(time.time() - t))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    print('[INFO] Stopping System')
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 13
0
class stream_video():
    def __init__(self):
        self.model = model_face()
        self.model.load_weights('model/model.h5')

        self.gaze = GazeTracking()

        # cv2.ocl.setUseOpenCL(True)

        self.emotion_dict = {
            0: "Angry",
            1: "Disgusted",
            2: "Fearful",
            3: "Happy",
            4: "Neutral",
            5: "Sad",
            6: "Surprised"
        }

        self.cap = cv2.VideoCapture(0)
        self.facecasc = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')

    def take_frame(self):
        _, self.frame = self.cap.read()

        gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        faces = self.facecasc.detectMultiScale(gray,
                                               scaleFactor=1.3,
                                               minNeighbors=5)

        for (x, y, w, h) in faces:
            cv2.rectangle(self.frame, (x, y - 50), (x + w, y + h + 10),
                          (255, 0, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            cropped_img = np.expand_dims(
                np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
            prediction = self.model.predict(cropped_img)

            maxindex = int(np.argmax(prediction))
            cv2.putText(self.frame, self.emotion_dict[maxindex],
                        (x + 20, y - 60), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)

    def eye_construction(self):
        self.gaze.refresh(self.frame)
        self.frame = self.gaze.annotated_frame()

        text = ""

        if self.gaze.is_blinking():
            text = "Blinking"
        elif self.gaze.is_right():
            text = "Looking right"
        elif self.gaze.is_left():
            text = "Looking left"
        elif self.gaze.is_center():
            text = "Looking center"

        cv2.putText(self.frame, text, (20, 100), cv2.FONT_HERSHEY_DUPLEX, 1.2,
                    (147, 58, 31), 2)

        left_pupil = self.gaze.pupil_left_coords()
        right_pupil = self.gaze.pupil_right_coords()
        cv2.putText(self.frame, "Left pupil:  " + str(left_pupil), (5, 30),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(self.frame, "Right pupil: " + str(right_pupil), (5, 60),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

    def print_frame(self):
        cv2.imshow(
            'Video',
            cv2.resize(self.frame, (1600, 960), interpolation=cv2.INTER_CUBIC))
Esempio n. 14
0
def gaze():

    gaze = GazeTracking()
    # webcam = cv2.VideoCapture(0)
    distraction_point = 0
    tm = time.localtime()
    photo_block = []

    while True:
        # We get a new frame from the webcam
        # _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        # gaze.refresh(frame)

        # frame = gaze.annotated_frame()
        text = ""

        if gaze.is_blinking():
            text = "Blinking"
            # print("blinking")

        if gaze.is_right():
            text = "Looking right"
            # print("right")

        elif gaze.is_left():
            text = "Looking left"
            # print("left")

        elif gaze.is_center():
            text = "Looking center"
            # print("center")

        ## 수 정 시 작

        hori_ratio = gaze.horizontal_ratio()
        verti_ratio = gaze.vertical_ratio()

        try:

            if curr_location == [0, 0]:
                curr_location = [hori_ratio, verti_ratio]
                print(curr_location)
            else:
                prev_location = curr_location
                curr_location = [hori_ratio, verti_ratio]
                hori_diff = curr_location[0] - prev_location[0]
                verti_diff = curr_location[1] - prev_location[1]

                if prev_second2 == -1:
                    prev_second2 = tm.tm_sec
                    print(prev_second2)
                else:
                    curr_second2 = tm.tm_sec
                    if curr_second2 - prev_second2 == 1 or curr_second2 - prev_second2 < 0:
                        distance.append((hori_diff**2) + (verti_diff**2))
                        prev_second2 = curr_second2

                        if len(photo_block) < 3:
                            photo_block.append((hori_diff**2))

                # len(distance), sum(distance)임의 값 설정
                if len(distance) > 59:
                    if sum(distance) > 1:
                        print('주의 산만')
                        distraction_point += 1
                        distance = distance[1:]

        except:
            curr_location = [0.5, 0.5]
Esempio n. 15
0
def run(models):
    #카메라 열기
    cap = cv2.VideoCapture(0)
    gaze = GazeTracking()
    cheat_cnt = 0
    right_cnt = 0
    left_cnt = 0
    up_cnt = 0
    down_cnt = 0
    name = ""

    while True:
        #카메라로 부터 사진 한장 읽기
        ret, frame = cap.read()

        gaze.refresh(frame)
        # 얼굴 검출 시도
        image, face = face_detector(frame)
        try:
            min_score = 999  #가장 낮은 점수로 예측된 사람의 점수
            min_score_name = ""  #가장 높은 점수로 예측된 사람의 이름

            #검출된 사진을 흑백으로 변환
            face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

            #위에서 학습한 모델로 예측시도
            for key, model in models.items():
                result = model.predict(face)
                if min_score > result[1]:
                    min_score = result[1]
                    min_score_name = key

            #min_score 신뢰도이고 0에 가까울수록 자신과 같다는 뜻이다.
            if min_score < 500:
                confidence = int(100 * (1 - (min_score) / 300))
                # 유사도 화면에 표시
                display_string = str(
                    confidence) + '% Confidence it is ' + min_score_name
                name = min_score_name
            cv2.putText(image, display_string, (90, 170),
                        cv2.FONT_HERSHEY_COMPLEX, 1, (250, 120, 255), 2)
            #75 보다 크면 동일 인물로 간주해 Match!
            if confidence > 75:
                cv2.putText(image, "Match : " + min_score_name, (250, 450),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
                cv2.imshow('Face Detection', image)
                frame = gaze.annotated_frame()
                text = ""

                if gaze.is_blinking():
                    text = "Blinking"
                elif gaze.is_right():
                    text = "Looking right"
                    right_cnt += 1
                elif gaze.is_left():
                    text = "Looking left"
                    left_cnt += 1
                elif gaze.is_up():
                    text = "Looking up"
                    up_cnt += 1
                elif gaze.is_down():
                    text = "Looking down"
                    down_cnt += 1
                elif gaze.is_center():
                    text = "Looking center"

                cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX,
                            1.6, (147, 58, 31), 2)

                left_pupil = gaze.pupil_left_coords()
                right_pupil = gaze.pupil_right_coords()

                cv2.putText(frame, "Left pupil:  " + str(left_pupil),
                            (90, 100), cv2.FONT_HERSHEY_DUPLEX, 0.9,
                            (147, 58, 31), 1)
                cv2.putText(frame, "Right pupil: " + str(right_pupil),
                            (90, 135), cv2.FONT_HERSHEY_DUPLEX, 0.9,
                            (147, 58, 31), 1)

                cv2.imshow("Face Detection", frame)

                if left_pupil is None and right_pupil is None:
                    cheat_cnt += 1
                elif right_cnt > 5:
                    cheat_cnt += 1
                    print('Too much looking right')
                elif left_cnt > 5:
                    cheat_cnt += 1
                    print('Too much looking left')
                elif up_cnt > 5:
                    cheat_cnt += 1
                    print('Too much looking up')
                elif down_cnt > 5:
                    cheat_cnt += 1
                    print('Too much looking down')
            else:
                #75 이하면 Unmatch
                cv2.putText(image, "Unmatch", (250, 450),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
                cv2.imshow('Face Detection', image)
        except:
            #얼굴 검출 안됨
            cv2.putText(image, "Face Not Found", (250, 450),
                        cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)
            cv2.imshow('Face Detection', image)
            pass

        if cheat_cnt > 100:
            print(name + " Cheating Probability is high")
            break
        if cv2.waitKey(1) == 13:
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 16
0
class face_detector():
	def __init__(self):
		# Load the parameters
		self.conf = config()
		# initialize dlib's face detector (HOG-based) and then create the
		# facial landmark predictor
		print("[INFO] loading facial landmark predictor...")
		self.detector = dlib.get_frontal_face_detector()
		self.predictor = dlib.shape_predictor(self.conf.shape_predictor_path)
		
		# grab the indexes of the facial landmarks for the left and
		# right eye, respectively
		(self.lStart, self.lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
		(self.rStart, self.rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
		
		# initialize the video stream and sleep for a bit, allowing the
		# camera sensor to warm up
		self.cap = cv2.VideoCapture(0)
		if self.conf.vedio_path == 0:
			self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
		_, sample_frame = self.cap.read()
		
		# Introduce mark_detector to detect landmarks.
		self.mark_detector = MarkDetector()
		
		# Setup process and queues for multiprocessing.
		self.img_queue = Queue()
		self.box_queue = Queue()
		self.img_queue.put(sample_frame)
		self.box_process = Process(target=get_face, args=(
			self.mark_detector, self.img_queue, self.box_queue,))
		self.box_process.start()
		
		# Introduce pose estimator to solve pose. Get one frame to setup the
		# estimator according to the image size.
		self.height, self.width = sample_frame.shape[:2]
		self.pose_estimator = PoseEstimator(img_size=(self.height, self.width))
		
		# Introduce scalar stabilizers for pose.
		self.pose_stabilizers = [Stabilizer(
			state_num=2,
			measure_num=1,
			cov_process=0.1,
			cov_measure=0.1) for _ in range(6)]
		
		self.tm = cv2.TickMeter()
		# Gaze tracking
		self.gaze = GazeTracking()
	
	def detect(self):
		# loop over the frames from the video stream
		temp_steady_pose = 0
		while True:
			# grab the frame from the threaded video stream, resize it to
			# have a maximum width of 400 pixels, and convert it to
			# grayscale
			frame_got, frame = self.cap.read()
			
			# Empty frame
			frame_empty = np.zeros(frame.shape)
			
			# frame = imutils.rotate(frame, 90)
			frame = imutils.resize(frame, width=400)
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			
			# detect faces in the grayscale frame
			rects = self.detector(gray, 0)
			
			# initialize the frame counters and the total number of blinks
			TOTAL = 0
			COUNTER = 0
			# loop over the face detections
			for (i, rect) in enumerate(rects):
				# determine the facial landmarks for the face region, then
				# convert the facial landmark (x, y)-coordinates to a NumPy
				# array
				self.shape = self.predictor(gray, rect)
				self.shape = face_utils.shape_to_np(self.shape)
				
				# ********************************
				# Blink detection
				# extract the left and right eye coordinates, then use the
				# coordinates to compute the eye aspect ratio for both eyes
				self.leftEye = self.shape[self.lStart:self.lEnd]
				self.rightEye = self.shape[self.rStart:self.rEnd]
				self.leftEAR = eye_aspect_ratio(self.leftEye)
				self.rightEAR = eye_aspect_ratio(self.rightEye)
				
				# average the eye aspect ratio together for both eyes
				ear = (self.leftEAR + self.rightEAR) / 2.0
				
				# check to see if the eye aspect ratio is below the blink
				# threshold, and if so, increment the blink frame counter
				if ear < self.conf.EYE_AR_THRESH:
					COUNTER += 1
				
				# otherwise, the eye aspect ratio is not below the blink
				# threshold
				else:
					# if the eyes were closed for a sufficient number of
					# then increment the total number of blinks
					if COUNTER >= self.conf.EYE_AR_CONSEC_FRAMES:
						TOTAL += 1
					
					# reset the eye frame counter
					COUNTER = 0
				
				# Frame empty
				cv2.putText(frame_empty, "Blinks: {}".format(TOTAL), (30, 60),
							cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 2)
				cv2.putText(frame_empty, "EAR: {:.2f}".format(ear), (30, 90),
							cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 2)
				
				# ********************************
				# convert dlib's rectangle to a OpenCV-style bounding box
				# [i.e., (x, y, w, h)], then draw the face bounding box
				(x, y, w, h) = face_utils.rect_to_bb(rect)
				self.bounding_box = (x, y, w, h)
				# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
				
				# Frame empty
				cv2.rectangle(frame_empty, (x, y), (x + w, y + h), (0, 255, 0), 2)
				
				# show the face number
				cv2.putText(frame_empty, "Face #{}".format(i + 1), (30, 120),
							cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 2)
				
				# loop over the (x, y)-coordinates for the facial landmarks
				# and draw them on the image
				for (x, y) in self.shape:
					# cv2.circle(frame, (x, y), 1, (0, 255, 255), -1)
					cv2.circle(frame_empty, (x, y), 1, (0, 255, 255), -1)
			
			# **********************************************************
			if frame_got is False:
				break
			
			# If frame comes from webcam, flip it so it looks like a mirror.
			if self.conf.vedio_path == 0:
				frame = cv2.flip(frame, 2)
			
			# Pose estimation by 3 steps:
			# 1. detect face;
			# 2. detect landmarks;
			# 3. estimate pose
			
			# Feed frame to image queue.
			self.img_queue.put(frame)
			
			# Get face from box queue.
			self.facebox = self.box_queue.get()
			
			if self.facebox is not None:
				# Detect landmarks from image of 128x128.
				face_img = frame[self.facebox[1]: self.facebox[3],
						   self.facebox[0]: self.facebox[2]]
				face_img = cv2.resize(face_img, (self.conf.CNN_INPUT_SIZE, self.conf.CNN_INPUT_SIZE))
				face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
				
				self.tm.start()
				# marks = self.mark_detector.detect_marks([face_img])
				self.tm.stop()
				
				# Convert the marks locations from local CNN to global image.
				self.shape *= (self.facebox[2] - self.facebox[0])
				self.shape[:, 0] += self.facebox[0]
				self.shape[:, 1] += self.facebox[1]
				
				# Uncomment following line to show raw marks.
				# mark_detector.draw_marks(
				#     frame, marks, color=(0, 255, 0))
				
				# Uncomment following line to show facebox.
				# mark_detector.draw_box(frame, [facebox])
				
				# Try pose estimation with 68 points.
				self.pose = self.pose_estimator.solve_pose_by_68_points(self.shape)
				
				# Stabilize the pose.
				self.steady_pose = []
				pose_np = np.array(self.pose).flatten()
				for value, ps_stb in zip(pose_np, self.pose_stabilizers):
					ps_stb.update([value])
					self.steady_pose.append(ps_stb.state[0])
				self.steady_pose = np.reshape(self.steady_pose, (-1, 3))
				
				# Uncomment following line to draw pose annotation on frame.
				# pose_estimator.draw_annotation_box(
				# 	frame, pose[0], pose[1], color=(255, 128, 128))
				
				# Uncomment following line to draw stabile pose annotation on frame.
				# pose_estimator.draw_annotation_box(frame, steady_pose[0], steady_pose[1], color=(128, 255, 128))
				
				# Uncomment following line to draw head axes on frame.
				# pose_estimator.draw_axes(frame, steady_pose[0], steady_pose[1])
				self.pose_estimator.draw_axes(frame_empty, self.steady_pose[0], self.steady_pose[1])
				print('steady pose vector: {}'.format(self.steady_pose[0], self.steady_pose[1]))
			else:
				# cv2.putText(frame, "Signal loss", (200, 200),
				# 			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
				cv2.putText(frame_empty, "Signal loss", (200, 200),
							cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			# ******************************************************************
			# We send this frame to GazeTracking to analyze it
			self.gaze.refresh(frame)
			
			frame = self.gaze.annotated_frame()
			text = ""
			
			if self.gaze.is_blinking():
				text = "Blinking"
			elif self.gaze.is_right():
				text = "Looking right"
			elif self.gaze.is_left():
				text = "Looking left"
			elif self.gaze.is_center():
				text = "Looking center"
			
			cv2.putText(frame_empty, text, (250, 250), cv2.FONT_HERSHEY_DUPLEX, 0.5, (147, 58, 31), 2)
			
			left_pupil = self.gaze.pupil_left_coords()
			right_pupil = self.gaze.pupil_right_coords()
			cv2.putText(frame_empty, "Left pupil:  " + str(left_pupil), (250, 280), cv2.FONT_HERSHEY_DUPLEX, 0.5,
						(147, 58, 31), 1)
			cv2.putText(frame_empty, "Right pupil: " + str(right_pupil), (250, 310), cv2.FONT_HERSHEY_DUPLEX, 0.5,
						(147, 58, 31), 1)
			
			# ********************************************************************
			# show the frame
			# cv2.imshow("Frame", frame)
			cv2.imshow("Frame", frame_empty)
			key = cv2.waitKey(1) & 0xFF
			
			self.pass_variable = np.array(1)
			
			try:
				self._listener(self.pass_variable)
			except:
				pass
			
			# if the `q` key was pressed, break from the loop
			if key == ord("q"):
				break
		# do a bit of cleanup
		cv2.destroyAllWindows()
		# self.cap.stop()
	
	def set_listener(self, listener):
		self._listener = listener
def TrackImages():
    check_haarcascadefile()
    assure_path_exists("Attendance/")
    assure_path_exists("StudentDetails/")
    for k in tv.get_children():
        tv.delete(k)
    msg = ''
    i = 0
    j = 0

    recognizer = cv2.face.LBPHFaceRecognizer_create(
    )  # cv2.createLBPHFaceRecognizer()
    exists3 = os.path.isfile("TrainingImageLabel\Trainner.yml")
    if exists3:
        recognizer.read("TrainingImageLabel\Trainner.yml")
    else:
        mess._show(title='Data Missing',
                   message='Please click on Save Profile to reset data!!')
        return
    harcascadePath = "haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(harcascadePath)

    cam = cv2.VideoCapture(0)
    font = cv2.FONT_HERSHEY_SIMPLEX
    col_names = ['Id', '', 'Name', '', 'Date', '', 'Time']
    exists1 = os.path.isfile("StudentDetails\StudentDetails.csv")
    if exists1:
        df = pd.read_csv("StudentDetails\StudentDetails.csv")
    else:
        mess._show(title='Details Missing',
                   message='Students details are missing, please check!')
        cam.release()
        cv2.destroyAllWindows()
        window.destroy()

    gaze = GazeTracking()
    l = 0
    r = 0
    l_done = 0
    r_done = 0
    ok = 0
    all_ok = 0
    LARGE_FONT = ("Verdana", 12)
    NORM_FONT = ("Helvetica", 10)
    SMALL_FONT = ("Helvetica", 8)
    while True:
        ret, im = cam.read()
        _, frame = cam.read()  #---------

        gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(gray, 1.2, 5)

        gaze.refresh(frame)  #-------
        im = gaze.annotated_frame()  #-----
        text = ""  #------
        msgg = ""  #-----

        if gaze.is_blinking():
            text = "Blinking"
        elif gaze.is_right():
            text = "Looking right"
            r_done = 1
        elif gaze.is_left():
            text = "Looking left"
            l_done = 1
        elif gaze.is_center():
            text = "Looking center"

        if (l_done == 0 and r_done == 0):
            msgg = "Please Trun Your Eye In Left Side "
        elif (l_done == 1 and r_done == 0):
            msgg = "Please Trun Your Eye In Right Side"
        elif (l_done == 0 and r_done == 1):
            msgg = "Please Trun Your Eye In Left Side"
        elif (l_done == 1 and r_done == 1):
            msgg = "Eye Movement Done"
            ok = 1

        for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)
            serial, conf = recognizer.predict(gray[y:y + h, x:x + w])
            if (conf < 50):
                ts = time.time()
                date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')
                timeStamp = datetime.datetime.fromtimestamp(ts).strftime(
                    '%H:%M:%S')
                aa = df.loc[df['SERIAL NO.'] == serial]['NAME'].values
                ID = df.loc[df['SERIAL NO.'] == serial]['ID'].values
                ID = str(ID)
                ID = ID[1:-1]
                bb = str(aa)
                bb = bb[2:-2]
                attendance = [
                    str(ID), '', bb, '',
                    str(date), '',
                    str(timeStamp)
                ]
                if (ok == 1):
                    cv2.putText(im,
                                "{} Press 'Q' for save and exit !".format(bb),
                                (50, 30), font, 0.7, (0, 0, 255), 2)
                    all_ok = 1
                else:
                    cv2.putText(
                        im,
                        "{} Follow The Eye Movement Instructions !".format(bb),
                        (30, 30), font, 0.7, (0, 0, 255), 2)

            else:
                Id = 'Unknown'
                bb = str(Id)
                cv2.putText(im, "You are unregistered !", (30, 30), font, 0.7,
                            (0, 0, 255), 2)
            cv2.putText(im, str(bb), (x, y + h), font, 1, (255, 255, 255), 2)

        cv2.putText(im, "Eye position {}".format(text), (2, 70), font, 0.5,
                    (95, 106, 106), 2)  #-------
        cv2.putText(im, "{}".format(msgg), (150, 450), font, 0.6, (0, 0, 255),
                    2)  #-------
        cv2.imshow('Taking Attendance', im)

        if (cv2.waitKey(1) == ord('q')):
            break

    if (all_ok == 1):
        ts = time.time()
        date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')
        exists = os.path.isfile("Attendance\Attendance_" + date + ".csv")
        if exists:
            with open("Attendance\Attendance_" + date + ".csv",
                      'a+') as csvFile1:
                writer = csv.writer(csvFile1)
                writer.writerow(attendance)
            csvFile1.close()
        else:
            with open("Attendance\Attendance_" + date + ".csv",
                      'a+') as csvFile1:
                writer = csv.writer(csvFile1)
                writer.writerow(col_names)
                writer.writerow(attendance)
            csvFile1.close()
        with open("Attendance\Attendance_" + date + ".csv", 'r') as csvFile1:
            reader1 = csv.reader(csvFile1)
            for lines in reader1:
                i = i + 1
                if (i > 1):
                    if (i % 2 != 0):
                        iidd = str(lines[0]) + '   '
                        tv.insert('',
                                  0,
                                  text=iidd,
                                  values=(str(lines[2]), str(lines[4]),
                                          str(lines[6])))
        csvFile1.close()
        cam.release()
        cv2.destroyAllWindows()
    else:
        popup = tk.Tk()
        popup.wm_title("!")
        label = ttk.Label(popup, text="Something Wrong !", font=NORM_FONT)
        label.pack(side="top", fill="x", pady=10)
        B1 = ttk.Button(popup, text="Okay", command=popup.destroy)
        B1.pack()
        #popup.mainloop()
        cam.release()
        cv2.destroyAllWindows()
Esempio n. 18
0
def get_data():
    l = []
    center_left = []
    center_right = []
    l_x = []
    l_y = []
    r_x = []
    r_y = []
    d = {'Time': [], 'Left eye': [], 'Right eye': []}

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        frame = gaze.annotated_frame()
        text = ""

        if gaze.is_blinking():
            text = "Blinking"
            l.append(datetime.datetime.now())
        elif gaze.is_left():
            text = "Looking left"
        elif gaze.is_center():
            text = "Looking right"

        cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                    (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        try:
            l_x.append(gaze.pupil_left_coords()[0])
            l_y.append(gaze.pupil_left_coords()[1])
            r_x.append(gaze.pupil_right_coords()[0])
            r_y.append(gaze.pupil_right_coords()[1])
        except:
            l_x.append(0)
            l_y.append(0)
            r_x.append(0)
            r_y.append(0)
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        #print((left_pupil,right_pupil))
        try:
            d['Left eye'].append((left_pupil[0], left_pupil[1]))
            d['Right eye'].append((right_pupil[0], right_pupil[1]))
            d['Time'].append(datetime.datetime.now())
        except:
            d['Left eye'].append(0)
            d['Right eye'].append(0)
            d['Time'].append(datetime.datetime.now())

        cv2.imshow("Frame", frame)

        if cv2.waitKey(1) == 27:
            break

    eye_coordinates = pd.DataFrame(d)
    eye_coordinates.columns = ['Time', 'Left eye', 'Right eye']
    eye_blinking = pd.Series(l)
    return eye_coordinates  #,eye_blinking,center_right,center_left,l_x,l_y,r_x,r_y
Esempio n. 19
0
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)

while True:
    _, frame = webcam.read()

    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking(): text = " Blinking"
    elif gaze.is_right(): text = " Looking right"
    elif gaze.is_left(): text = " Looking left"
    elif gaze.is_center(): text = " Looking center"

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                (147, 58, 31), 2)

    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    cv2.putText(frame, "Left Eye Coords :  " + str(left_pupil), (90, 130),
                cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv2.putText(frame, "Right Eye Coords: " + str(right_pupil), (90, 165),
                cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

    cv2.imshow("Demo", frame)

    if cv2.waitKey(1) == 27:
        break
Esempio n. 20
0
def calculate_cog_load():
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    pupil_position = Pupil_position('center', 0)

    t = dt.datetime.now()
    start_time = dt.datetime.now()
    blink_count = 0
    saccades = 0
    pupil_dilation_x = []
    pupil_dilation_y = []
    fixations = [0]
    minute = 0
    blink_rate = 0
    saccades_rate = 0
    pup_dil_x = 0
    pup_dil_y = 0
    fixation_avg = 0
    cogload = 0

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        response = requests.get(
            "https://api.fitbit.com/1/user/7QCRW3/activities/heart/date/today/today.json",
            headers=header).json()

        frame = gaze.annotated_frame()
        text = ""

        pupil_dilation_x.append(gaze.horizontal_ratio())
        pupil_dilation_y.append(gaze.vertical_ratio())

        horizontal_ratio = gaze.horizontal_ratio()
        vertical_ratio = gaze.vertical_ratio()

        if horizontal_ratio is not None:
            pupil_dilation_x.append(horizontal_ratio)

        if vertical_ratio is not None:
            pupil_dilation_y.append(vertical_ratio)

        if gaze.is_blinking():
            text = "Blinking"
            blink_count = blink_count + 1

        elif gaze.is_right():
            delta = dt.datetime.now() - t
            position = Pupil_position('right', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking right"

        elif gaze.is_left():
            delta = dt.datetime.now() - t
            position = Pupil_position('left', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking left"

        elif gaze.is_center():
            delta = dt.datetime.now() - t
            position = Pupil_position('center', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking center"

        cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                    (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()

        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Blink Rate: " + str(blink_rate), (90, 195),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Saccades Rate: " + str(saccades_rate), (90, 225),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Pupil dilation x: " + str(pup_dil_x), (90, 255),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Pupil dilation y: " + str(pup_dil_y), (90, 285),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Fixation: " + str(fixation_avg), (90, 315),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Cognitive Load: " + str(cogload), (90, 345),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        delta = dt.datetime.now() - t

        elapsed_time = dt.datetime.now() - start_time
        elapsed_time_second = elapsed_time.seconds

        cv2.putText(frame, "Elapsed Time: " + str(elapsed_time_second),
                    (90, 375), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.imshow("Demo", frame)

        if delta.seconds >= 10:
            minute = minute + 1
            blink_rate = blink_count / 10
            saccades_rate = saccades / 10

            Not_none_values_x = filter(None.__ne__, pupil_dilation_x)
            Not_none_values_y = filter(None.__ne__, pupil_dilation_y)

            pupil_dilation_x = list(Not_none_values_x)
            pupil_dilation_y = list(Not_none_values_y)

            pup_dil_x = sum(pupil_dilation_x) / len(pupil_dilation_x)
            pup_dil_y = sum(pupil_dilation_y) / len(pupil_dilation_y)

            fixation_avg = sum(fixations) / len(fixations)

            blink_count = 0
            saccades = 0

            pupil_position = Pupil_position('center', 0)

            t = dt.datetime.now()

            pupil_dilation_x = []
            pupil_dilation_y = []
            fixations = [0]

            print(
                response['activities-heart-intraday']['dataset'][-1]['value'])

            cogload = blink_rate   \
                + math.sqrt(pup_dil_x * pup_dil_x + pup_dil_y * pup_dil_y) \
                + saccades_rate \
                - fixation_avg

            print(blink_rate)
            print(pup_dil_x)
            print(pup_dil_y)
            print(saccades_rate)
            print(fixation_avg)
            print(cogload)
            write_csv('data.csv', minute, blink_rate, pup_dil_x, pup_dil_y,
                      fixation_avg, saccades_rate, cogload)

        if cv2.waitKey(33) == 27:
            break
        time.sleep(0.25)
Esempio n. 21
0
    _, frame = webcam.read()

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Down"
        #ser.write(b'B')
    elif gaze.is_right():
        text = "Right"
        #ser.write(b'R')
    elif gaze.is_left():
        text = "Left"
        #ser.write(b'L')
    elif gaze.is_up():
        text = "Up"
        ser.write(b'F')
    elif gaze.is_center():
        text = "Center"
        #ser.write(b'S')
       
    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (0, 0, 255), 2)

    cv2.imshow("Eye Tracking", frame)

    if cv2.waitKey(1) == 27:
        break
Esempio n. 22
0
File: test.py Progetto: yy-oung/test
def gen_frames():
    camera = cv2.VideoCapture(0)
    time.sleep(0.2)
    lastTime = time.time() * 1000.0

    #시선추적
    gaze = GazeTracking()

    while True:

        ret, image = camera.read()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.3,
                                             minNeighbors=5)

        delt = time.time() * 1000.0 - lastTime
        s = str(int(delt))
        #print (delt," Found {0} faces!".format(len(faces)) )
        lastTime = time.time() * 1000.0

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            cv2.circle(image, (int(x + w / 2), int(y + h / 2)), int(
                (w + h) / 3), (255, 255, 255), 3)
            cv2.putText(image, s, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 255, 0), 2)

        now = datetime.datetime.now()
        timeString = now.strftime("%Y-%m-%d %H:%M")
        cv2.putText(image, timeString, (10, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (255, 0, 0), 2)
        #cv2.imshow("Frame", image)

        #시선추적
        gaze.refresh(image)
        image = gaze.annotated_frame()

        text = ""

        if gaze.is_right():
            text = "Looking right"
        elif gaze.is_left():
            text = "Looking left"
        elif gaze.is_center():
            text = "Looking center"

        cv2.putText(image, text, (10, 65), cv2.FONT_HERSHEY_DUPLEX, 0.7,
                    (255, 0, 0), 2)
        cv2.imshow("Demo", image)

        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        ret, buffer = cv2.imencode('.jpg', image)
        frame = buffer.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')