Ejemplo n.º 1
0
def main():
    # Initialize webcam
    webcam = cv2.VideoCapture(0)

    # Start GazeTracking
    gaze = GazeTracking()
    thread_gaze = Thread(target=gaze.run, args=(webcam, ))
    thread_gaze.name = "ThreadGaze"
    #thread_gaze.daemon = True
    thread_gaze.start()

    # Calibrate
    size = pyautogui.size()
    # changed manually to test only in my primary monitor
    size = (1920, 1080)
    calibration = Calibration(gaze, size)

    # Show annotated camera
    annotated_frame = AnnotatedFrame(gaze)
    thread_annotated = Thread(target=annotated_frame.show)
    thread_annotated.name = "ThreadAnnotated"
    #thread_annotated.daemon = True
    thread_annotated.start()

    # Initialize mouse control
    mouse_control = MouseControl(gaze, calibration)
    mouse_control.run()
Ejemplo n.º 2
0
    def __init__(self, testMode, rotation):
        self.blinked = False
        self.captured = False
        self.rotation = rotation
        self.photoList = dict()
        self.genderList = list()
        self.ageList = list()
        self.timeList = list()

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        self.W = None
        self.H = None

        # Model
        self.face_cascade = cv2.CascadeClassifier()
        if not self.face_cascade.load('haarcascade_frontalface_alt.xml'):
            print("face detector model not loaded")
        self.age_net = cv2.dnn.readNetFromCaffe(
            model_directory + '/deploy_age.prototxt',
            model_directory + '/age_net.caffemodel')
        self.gender_net = cv2.dnn.readNetFromCaffe(
            model_directory + '/deploy_gender.prototxt',
            model_directory + '/gender_net.caffemodel')

        # Edit this for development
        print("Running on " + platform.system())
        if platform.system() == 'Linux' or platform.system() == "linux2":
            self.cam = cv2.VideoCapture(-1)
        else:
            self.cam = cv2.VideoCapture(1)
        self.cam.set(3, 1920)
        self.cam.set(4, 1920)
        self.gaze = GazeTracking(testMode)
Ejemplo n.º 3
0
 def __init__(self):
     self.gaze = GazeTracking()
     self.webcam = cv2.VideoCapture(0)
     self.hori = []
     self.verti = []
     self.circle = []
     self.i = 0
     self.window1 = Tk()
     pass
Ejemplo n.º 4
0
 def __init__(self):
     self.cv_bridge = CvBridge()
     self.gaze = GazeTracking()
     self.publish_annotated_frame = rospy.get_param(
         "~publish_annotated_frame", True)
     if self.publish_annotated_frame:
         self.annotated_frame_publisher = rospy.Publisher(
             'image_annotated_raw', Image, queue_size=10)
     self.gaze_publisher = rospy.Publisher('gaze_state',
                                           GazeState,
                                           queue_size=10)
Ejemplo n.º 5
0
def Eye_tracking(yolo):
    import cv2
    cam = cv2.VideoCapture(0)
    cam.set(3, 1000)
    cam.set(4, 600)
    gaze = GazeTracking()
    while True:
        _, frame = cam.read()
        image = Image.fromarray(frame)
        res, Faces = yolo.detect_image_with_coord(image)

        output = np.array(res)

        for face in Faces:
            x1, y1, x2, y2 = face

            #Face_img = frame[y1:y2,(x1-10):(x2+10)]
            gaze.refresh(output)

            Ox1, Oy1, Ox2, Oy2 = gaze.annotated_frame(x1, y1)
            color = (0, 0, 255)
            cv2.line(output, (Ox1 - 5, Oy1), (Ox1 + 5, Oy1), color)
            cv2.line(output, (Ox1, Oy1 - 5), (Ox1, Oy1 + 5), color)
            cv2.line(output, (Ox2 - 5, Oy2), (Ox2 + 5, Oy2), color)
            cv2.line(output, (Ox2, Oy2 - 5), (Ox2, Oy2 + 5), color)

            # 60 130 165
            text = ""
            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right"
            elif gaze.is_left():
                text = "Looking left"
            elif gaze.is_center():
                text = "Looking center"

            left_pupil = gaze.pupil_left_relative_coords()
            right_pupil = gaze.pupil_right_relative_coords()
            cv2.putText(output, text, (x1, y1 + 20), cv2.FONT_HERSHEY_DUPLEX,
                        0.8, (147, 58, 31), 2)
            cv2.putText(output, "Left pupil:  " + str(left_pupil),
                        (x1, y1 + 50), cv2.FONT_HERSHEY_DUPLEX, 0.4,
                        (147, 58, 31), 1)
            cv2.putText(output, "Right pupil: " + str(right_pupil),
                        (x1, y1 + 65), cv2.FONT_HERSHEY_DUPLEX, 0.4,
                        (147, 58, 31), 1)

        cv2.imshow("Camera", output)
        if cv2.waitKey(5) == 27:
            break

    yolo.close_session()
Ejemplo n.º 6
0
def imagecov(photoname, relative_eye_size=1.5):
    global count
    '''
    Keep the image in the folder source_image and 
    put in the name of image in photoname
    '''
    photoname = photoname
    sourcename = DIRNAME + '/source_img/' + photoname
    finalname = DIRNAME + '/static/' + str(count) + ".jpg"
    '''
    You can change the relative eye size to optimize the image further
    '''
    # relative_eye_size = 1.5

    gaze = GazeTracking()
    frame = cv2.imread(sourcename)

    # cv2.imshow("Demo1", frame)

    gaze.refresh(frame)
    frame = gaze.annotated_frame()

    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    try:
        distance = (left_pupil[0] - right_pupil[0]) * (
            left_pupil[0] - right_pupil[0]
        ) + (left_pupil[1] - right_pupil[1]) * (left_pupil[1] - right_pupil[1])
    except:
        return False
    distance = np.sqrt(distance)
    print(distance)
    face_image = Image.open(sourcename)
    eye_image = Image.open(DIRNAME + '/source_img/redeye.png')

    eye_image = eye_image.resize((int(distance * 2 * relative_eye_size),
                                  int(distance * relative_eye_size)))
    eye_image = eye_image.rotate(15)

    Image.Image.paste(face_image, eye_image,
                      (left_pupil[0] - int(distance * relative_eye_size),
                       left_pupil[1] - int(distance * relative_eye_size / 2)),
                      eye_image)
    Image.Image.paste(face_image, eye_image,
                      (right_pupil[0] - int(distance * relative_eye_size),
                       right_pupil[1] - int(distance * relative_eye_size / 2)),
                      eye_image)
    count += 1
    # face_image.show()
    face_image.save(finalname)
    # eye_image.show()
    return True
Ejemplo n.º 7
0
    def __init__(self):
        user32 = ctypes.windll.user32

        self.img_width = user32.GetSystemMetrics(0) // 5
        self.img_height = self.img_width // 2

        self.act = ""
        self.acts = []
        self.act_time = 0
        self.act_started = False
        self.act_ended = True
        self.act_start_time = time.time()
        self.act_stop_time = time.time()
        self.current_gesture = ""
        self.gesture_text = ""
        self.end_of_gesture = False
        self.gesture_end_start_time = time.time()
        self.gesture_end_stop_time = time.time()
        self.gests = dict()
        self.detection_of_end = False
        self.list_of_acts = {"Closed left": 0, "Closed right": 0, "Neither": 0}
        self.counter = 0
        self.time_of_output = 0
        self.end_of_display_image = True
        self.current_act = ""

        self.read_settings()

        self.gaze = GazeTracking()

        self.webcam = cv2.VideoCapture(self.webcam_number)
        self.webcam.set(3, self.webcam_width)
        self.webcam.set(4, self.webcam_height)

        self.screen_width = int(self.webcam.get(3))
        self.screen_height = int(self.webcam.get(4))

        window_name = "Aurelium"
        cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

        self.dim = (self.img_width, self.img_height)

        self.load_graphics()

        self.run()

        cv2.destroyAllWindows()
Ejemplo n.º 8
0
def adhdRes():

    #app3 code
    # p = multiprocessing.Process(target = ValuePredictor, name="valuePredictor", args=())
    # p.start()
    # time.sleep(10)
    # p.terminate()

    sns.set(style="ticks", context="talk")
    plt.style.use("dark_background")

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        frame = gaze.annotated_frame()

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        if (left_pupil == (0, 0) or right_pupil == (0, 0)):
            pass
        else:
            plt.plot(left_pupil, right_pupil)

        cv2.imshow("Demo", frame)

        if cv2.waitKey(1) == ord(' '):
            break

    plt.savefig('2.png')

    img = open_image('./2.png')
    result = ValuePredictor(img)
    if result == 'ASD':
        prediction = "ADHD"
    else:
        prediction = "No ADHD"
    return render_template("./test/adhd.html", prediction=prediction)
Ejemplo n.º 9
0
def gaze():

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    face_detect = 1

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        if face_detect:
            phonix_tracking.detect_video()

        frame = gaze.annotated_frame()
        text = ""

        # if gaze.is_blinking():
        #     text = "Blinking"
        #     print("blinking")

        if gaze.is_right():
            text = "Looking right"
            print("right")

        elif gaze.is_left():
            text = "Looking left"
            print("left")

        # elif gaze.is_center():
        #     text = "Looking center"
        #     print("center")


        # cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        # cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        # cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        # cv2.imshow("Demo", frame)

# esc(27번키) 를 누르면 프로그램 종료
        if cv2.waitKey(1) == 27:
            break
Ejemplo n.º 10
0
def eye_tracking(image_path):
    gaze = GazeTracking()
    frame = cv2.imread(image_path)
    gaze.refresh(frame)

    frame = gaze.annotated_frame()

    if gaze.is_right():
        value = 0.5
    elif gaze.is_left():
        value = 0.5
    elif gaze.is_center():
        value = 1
    else:
        value = 0
    return value
Ejemplo n.º 11
0
def run_gazetracker(seconds):
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    current_time = time.time()
    gaze_matrix = []
    while time.time() - current_time <= 60:
        # We get a new frame from the webcam
        success, frame = webcam.read()
        if not success:
            print('NOT SUCCESSFUL')
            break
        else:

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right"
            elif gaze.is_left():
                text = "Looking left"
            elif gaze.is_center():
                text = "Looking center"

            cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (147, 58, 31), 2)

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()
            cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

            cv2.imshow("Demo", frame)
            gaze_matrix.append([time.time(), left_pupil, right_pupil])

            if cv2.waitKey(1) == 27:
                break
            print(left_pupil)
            print(right_pupil)
            print(gaze_matrix)
Ejemplo n.º 12
0
    def get_frame(self):
        '''success, image = self.video.read()
        image=cv2.resize(image,None,fx=ds_factor,fy=ds_factor,interpolation=cv2.INTER_AREA)
        gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        face_rects=face_cascade.detectMultiScale(gray,1.3,5)
        for (x,y,w,h) in face_rects:
        	cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
        	break
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()'''
        gaze = GazeTracking()
        webcam = cv2.VideoCapture(0)

        while True:
            # We get a new frame from the webcam
            _, frame = webcam.read()

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right,please look at the screen"
            elif gaze.is_left():
                text = "Looking left,please look at the screen"
            elif gaze.is_center():
                text = "Looking center"

            cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (147, 58, 31), 2)

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()
            cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            '''cv2.imshow("Demo", frame)
            if cv2.waitKey(1) == 27:
                break'''
            #comment lower part and decomment upper part for unique tab
            ret, jpeg = cv2.imencode('.jpg', frame)
            return jpeg.tobytes()
Ejemplo n.º 13
0
	def __init__(self):
		# Load the parameters
		self.conf = config()
		# initialize dlib's face detector (HOG-based) and then create the
		# facial landmark predictor
		print("[INFO] loading facial landmark predictor...")
		self.detector = dlib.get_frontal_face_detector()
		self.predictor = dlib.shape_predictor(self.conf.shape_predictor_path)
		
		# grab the indexes of the facial landmarks for the left and
		# right eye, respectively
		(self.lStart, self.lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
		(self.rStart, self.rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
		
		# initialize the video stream and sleep for a bit, allowing the
		# camera sensor to warm up
		self.cap = cv2.VideoCapture(0)
		if self.conf.vedio_path == 0:
			self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
		_, sample_frame = self.cap.read()
		
		# Introduce mark_detector to detect landmarks.
		self.mark_detector = MarkDetector()
		
		# Setup process and queues for multiprocessing.
		self.img_queue = Queue()
		self.box_queue = Queue()
		self.img_queue.put(sample_frame)
		self.box_process = Process(target=get_face, args=(
			self.mark_detector, self.img_queue, self.box_queue,))
		self.box_process.start()
		
		# Introduce pose estimator to solve pose. Get one frame to setup the
		# estimator according to the image size.
		self.height, self.width = sample_frame.shape[:2]
		self.pose_estimator = PoseEstimator(img_size=(self.height, self.width))
		
		# Introduce scalar stabilizers for pose.
		self.pose_stabilizers = [Stabilizer(
			state_num=2,
			measure_num=1,
			cov_process=0.1,
			cov_measure=0.1) for _ in range(6)]
		
		self.tm = cv2.TickMeter()
		# Gaze tracking
		self.gaze = GazeTracking()
Ejemplo n.º 14
0
def getEyeResults():
    gaze = GazeTracking()
    frame = cv2.imread("./images/analysis/proctor.png")
    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Blinking"
    elif gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_center():
        text = "Looking center"
    print(text)
    return text
Ejemplo n.º 15
0
def read_gaze(dirPath):
    # 디렉토리 내 디렉터리, 파일 recursive하게 읽는 건 나중에 하고
    # 우선 주어진 한 디렉터리에 대해서만 진행
    print("[O] Read left eye gaze is running...")
    gaze = GazeTracking()
    left = []
    target_dir = dirPath
    fileList = os.listdir(target_dir)
    for file in fileList:
        image = Image.open(target_dir + "\\" + file)
        data = np.asarray(image)
        # print(data.shape)     # (720 ,1280, 3)
        gaze.refresh(data)
        left_pupil = gaze.pupil_left_coords()
        left.append(left_pupil)
    print(left)
    print("[=] Read left eye gaze is closing...")
    return left
Ejemplo n.º 16
0
def GazeYourEye(video, student):
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(video)
    result = []
    while True:
        value, frame = webcam.read()
        if value == False: break
        gaze.refresh(frame)

        frame = gaze.annotated_frame()

        if gaze.is_blinking():
            result.append('B')
        elif gaze.is_right():
            result.append('R')
        elif gaze.is_left():
            result.append('L')
        elif gaze.is_center():
            result.append('C')

        if cv2.waitKey(1) == 27:
            break
    whole = len(result)

    ret = [
        round(result.count('C') / whole * 100, 2),
        round(result.count('B') / whole * 100, 2),
        round(result.count('L') / whole * 100, 2),
        round(result.count('R') / whole * 100, 2)
    ]

    student = Students.query.filter(
        Students.student_number == student.student_number)
    student.update({
        'eye_ratio_center': ret[0],
        'eye_ratio_blink': ret[1],
        'eye_ratio_left': ret[2],
        'eye_ratio_right': ret[3]
    })

    data = np.array([[ret[0], ret[1], ret[2], ret[3]]])
    [result] = load_model.predict(data)
    student.update({'eye_result': bool(result)})
    db.session.commit()
Ejemplo n.º 17
0
def get_data():
    x = []
    y = []
    gaze = GazeTracking()
    for files in os.listdir('output/mouse_old3/'):
        if 'jpg' in files:
            continue
        obj = []
        txt = open(os.path.join('output/mouse_old3/',
                                files)).read().split('\n')
        center = cv2.imread(
            os.path.join('output/mouse_old3/', files[:-4] + '_center.jpg'))
        left = cv2.imread(
            os.path.join('output/mouse_old3/', files[:-4] + '_1.jpg'))
        right = cv2.imread(
            os.path.join('output/mouse_old3/', files[:-4] + '_2.jpg'))
        stop = False
        try:
            for im in [center, left, right]:
                gaze.refresh(im)
                if gaze.pupil_left_coords() is None or gaze.pupil_right_coords(
                ) is None:
                    stop = True
                    break
                obj.extend([
                    gaze.eye_left.center[0],
                    gaze.eye_left.center[1],
                    gaze.eye_right.center[0],
                    gaze.eye_right.center[1],
                    gaze.pupil_left_coords()[0],
                    gaze.pupil_left_coords()[1],
                    gaze.pupil_right_coords()[0],
                    gaze.pupil_right_coords()[1],
                ])
            if stop:
                continue
            for line in txt:
                if len(line.split(',')) == 2:
                    a, b = line.split(',')
                    y.append(np.array((float(a) / 1920, float(b) / 1080)))
            x.append(np.array(obj))
        except cv2.error:
            pass
    return np.array(x, dtype='float32'), np.array(y)
Ejemplo n.º 18
0
def startCam():
    import cv2
    from gaze_tracking import GazeTracking
    import time

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    startTime = time.time()
    totalFrames = 0
    framesDistracted = 0
    framesFocused = 0

    while True:
        _, frame = webcam.read()
        totalFrames += 1
        gaze.refresh(frame)
        frame = gaze.annotated_frame()

        if gaze.is_blinking():
            framesDistracted += 1
        elif gaze.is_right():
            framesDistracted += 1
        elif gaze.is_left():
            framesDistracted += 1
        elif gaze.is_center():
            framesFocused += 1
        else:
            framesDistracted += 1

        cv2.imshow("Camera", frame)

        if cv2.waitKey(1) == ord('q'):
            break

    webcam.release()
    cv2.destroyAllWindows()

    totalTime = truncate(time.time() - startTime, 2)
    percentFocused = truncate((framesFocused / totalFrames) * 100, 2)
    percentDistracted = truncate((framesDistracted / totalFrames) * 100, 2)

    return totalTime, percentFocused, percentDistracted
Ejemplo n.º 19
0
    def __init__(self):
        self.model = model_face()
        self.model.load_weights('model/model.h5')

        self.gaze = GazeTracking()

        # cv2.ocl.setUseOpenCL(True)

        self.emotion_dict = {
            0: "Angry",
            1: "Disgusted",
            2: "Fearful",
            3: "Happy",
            4: "Neutral",
            5: "Sad",
            6: "Surprised"
        }

        self.cap = cv2.VideoCapture(0)
        self.facecasc = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')
Ejemplo n.º 20
0
    def eyeTrack(self):

        gaze = GazeTracking()
        blinkCount = 0

        while True:

            # Grab a single frame of video
            ret, frame = self.video_capture.read()

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Goz Kirpildi"
                blinkCount += 1
            elif gaze.is_right():
                text = "Saga Bakildi"
            elif gaze.is_left():
                text = "Sola Bakildi"
            elif gaze.is_center():
                text = "Merkeze Bakildi"

            cv2.putText(frame, text, (0, 30), cv2.FONT_HERSHEY_DUPLEX, 1,
                        (147, 58, 31), 2)

            # Display the resulting image
            cv2.imshow('Video', frame)
            print("Goz Kırpma: " + str(blinkCount))

            if blinkCount >= 3:
                return 1

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Ejemplo n.º 21
0
        #           gaze2.pupil_left_coords()[0], gaze2.pupil_left_coords()[1], gaze2.pupil_right_coords()[0], gaze2.pupil_right_coords()[1],
        #           gazecenter.eye_left.center[0], gazecenter.eye_left.center[1], gazecenter.eye_right.center[0], gazecenter.eye_right.center[1],
        #           gazecenter.pupil_left_coords()[0], gazecenter.pupil_left_coords()[1], gazecenter.pupil_right_coords()[0], gazecenter.pupil_right_coords()[1],
        #           ]
        # samples.append(np.array(sample, dtype='float32'))
        # targets.append(np.array([x / 1920, y / 1080], dtype='float32'))
        return


if __name__ == '__main__':
    raw_show = np.zeros((1080, 1920, 3), dtype='uint8')
    cv2.namedWindow("d", cv2.WND_PROP_FULLSCREEN)
    cv2.setMouseCallback('d', draw_events)
    cv2.setWindowProperty("d", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
    cv2.imshow('d', raw_show)
    gaze1 = GazeTracking()
    gaze2 = GazeTracking()
    gazecenter = GazeTracking()
    webcam1 = cam.WebcamThread(0, "Face detector 1").start()

    # webcam1.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    # webcam1.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    n = len(os.listdir('/home/palm/PycharmProjects/true/output/mouse')) // 3
    samples = []
    targets = []
    trained = False
    while True:
        # We get a new frame from the webcam
        t = time.time()
        try:
            _, frame1 = webcam1.read()
Ejemplo n.º 22
0
import cv2.cv2 as cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking(0)


def analyze(frame):

    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""
    eye_position = 0

    if gaze.is_blinking():
        text = "Blinking"
        eye_position = 0
    elif gaze.is_left():
        text = "Looking left"
        eye_position = 1
    elif gaze.is_center():
        text = "Looking center"
        eye_position = 2
    elif gaze.is_right():
        text = "Looking right"
        eye_position = 3
    elif gaze.not_found_face():
        text = "Not found face"
        eye_position = -1

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)

    left_pupil = gaze.pupil_left_coords()
Ejemplo n.º 23
0
def get_data():
    l = []
    center_left = []
    center_right = []
    l_x = []
    l_y = []
    r_x = []
    r_y = []
    d = {'Time': [], 'Left eye': [], 'Right eye': []}

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        frame = gaze.annotated_frame()
        text = ""

        if gaze.is_blinking():
            text = "Blinking"
            l.append(datetime.datetime.now())
        elif gaze.is_left():
            text = "Looking left"
        elif gaze.is_center():
            text = "Looking right"

        cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                    (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        try:
            l_x.append(gaze.pupil_left_coords()[0])
            l_y.append(gaze.pupil_left_coords()[1])
            r_x.append(gaze.pupil_right_coords()[0])
            r_y.append(gaze.pupil_right_coords()[1])
        except:
            l_x.append(0)
            l_y.append(0)
            r_x.append(0)
            r_y.append(0)
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        #print((left_pupil,right_pupil))
        try:
            d['Left eye'].append((left_pupil[0], left_pupil[1]))
            d['Right eye'].append((right_pupil[0], right_pupil[1]))
            d['Time'].append(datetime.datetime.now())
        except:
            d['Left eye'].append(0)
            d['Right eye'].append(0)
            d['Time'].append(datetime.datetime.now())

        cv2.imshow("Frame", frame)

        if cv2.waitKey(1) == 27:
            break

    eye_coordinates = pd.DataFrame(d)
    eye_coordinates.columns = ['Time', 'Left eye', 'Right eye']
    eye_blinking = pd.Series(l)
    return eye_coordinates  #,eye_blinking,center_right,center_left,l_x,l_y,r_x,r_y
Ejemplo n.º 24
0
def main():
    m = get_monitors()[0]
    h, w = m.height, m.width
    h = ((h - (h % 3)) / 3) - 45  #adjust for different screen size
    w = ((w - (w % 3)) / 3) - 35

    sg.theme('DarkGrey3')
    layout = [[
        sg.Frame('', [[
            sg.Image(
                'images/test.png', size=(w, h), key='img' + str(fr * 3 + fc))
        ]],
                 pad=(5, 5),
                 background_color='yellow',
                 key='frm' + str(fr * 3 + fc)) for fc in range(3)
    ] for fr in range(3)]
    layout.extend([[sg.Button('Exit', size=(10, 1), font='Helvetica 14')]])
    #update image with update(filename='')
    #enable events for images so theyre clickable
    window = sg.Window('GazePOD', layout, finalize=True, resizable=True)
    #window.Maximize()

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    calibrate(gaze, webcam, window)

    h = []
    v = []
    d = []
    i = 0
    while True:
        #get event or timeout
        e, val = window.read(timeout=TO)
        if e == 'Exit' or e == sg.WIN_CLOSED:
            break
        elif e == 'Calibrate':
            calibrate(gaze, webcam, window)
            continue

        #get frame and gaze
        _, frame = webcam.read()
        gaze.refresh(frame)
        sh = gaze.horizontal_ratio()
        sv = gaze.vertical_ratio()

        #add to queue
        if sh is not None:
            h.append(sh)
            if len(h) > QLEN:
                h.pop(0)
        else:
            continue
        if sv is not None:
            v.append(sv)
            if len(v) > QLEN:
                v.pop(0)
        else:
            continue

        #get average gaze direction
        if i == QLEN:
            i = -QLEN
            s = get_dir(mean(h), mean(v))
            key = 'img' + str(s)
            window[key].update(visible=False)
            d.append(s)
            if len(d) > DLEN:
                d.pop(0)
        else:
            i += 1
            continue

        #select image
        if d.count(d[0]) == len(d):
            #select direction
            key = 'img' + str(d[0])
            window[key].update(visible=True)
        else:
            #set background color of d[0]
            pass

    window.close()
def main(args):
    filename = args["input_file"]
    faceCascade = cv2.CascadeClassifier(
        'models/haarcascade_frontalface_default.xml')
    model = load_model('models/facenet_keras.h5')

    if filename is None:
        isVideo = False
        webcam = cv2.VideoCapture(0)
        webcam.set(3, args['wh'][0])
        webcam.set(4, args['wh'][1])
    else:
        isVideo = True
        webcam = cv2.VideoCapture(filename)
        fps = webcam.get(cv2.webcam_PROP_FPS)
        width = int(webcam.get(cv2.webcam_PROP_FRAME_WIDTH))
        height = int(webcam.get(cv2.webcam_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        name, ext = osp.splitext(filename)
        out = cv2.VideoWriter(args["output_file"], fourcc, fps,
                              (width, height))

    # Variable Setting
    hpd = headpose.HeadposeDetection(
        args["landmark_type"], args["landmark_predictor"])  #import headpose
    gaze = GazeTracking()  # import gazetracking
    yellocard = 0
    redcard = 0
    tempval = 0
    timee = int(
        input("시험 시간을 입력하세요(Minute): "))  # Input time for limit test time
    max_time_end = time.time() + (60 * timee)

    # Infinity Loop for Detect Cheating for Online test
    while (webcam.isOpened()):

        ret, frame = webcam.read()  # Read wabcam
        gaze.refresh(frame)
        frame = gaze.annotated_frame()  # Mark pupil for frame

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=3,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)  # face structure

        # Get point from pupil
        if gaze.is_blinking():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_right():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_left():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        elif gaze.is_center():
            yellocard = yellocard - 1
            yellocard = notnegative(yellocard)
        else:
            yellocard = yellocard + 2

        # Get redcard optiom
        if yellocard > 50:
            yellocard = 0
            tempval = tempval + 1
            redcard = redcard + 1

        # if get 1redcard, then give Aural and Text Warning(Loop)
        if tempval == 1:
            text1 = "WARNING"
            cv2.putText(frame, text1, (10, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (0, 0, 255), 2)
            my_thread = threading.Thread(target=Sound)
            my_thread.start()
            tempval = 0

    # if you are not GPU environment, Do not run this code by # --------------
    # if get 2redcard, then give Picture Warning(Once)
        if redcard == 2:
            warn_img = cv2.imread("Warning/warning.png", cv2.IMREAD_COLOR)
            cv2.imshow('Warning', warn_img)
            cv2.waitKey(1)
            redcard = 2.1
    # -----------------------------------------------------------------------
    # Get log consistently
        print("<< *의심수준:", yellocard, " || ", "*경고횟수:", redcard, " >>")

        #Detect head position
        if isVideo:
            frame, angles = hpd.process_image(frame)
            if frame is None:
                break
            else:
                out.write(frame)
        else:
            frame, angles = hpd.process_image(frame)

            if angles is None:
                pass
            else:  #angles = [x,y,z] , get point from headposition
                if angles[0] > 15 or angles[0] < -15 or angles[
                        1] > 15 or angles[1] < -15 or angles[2] > 15 or angles[
                            2] < -15:
                    yellocard = yellocard + 2
                else:
                    yellocard = yellocard - 1
                    yellocard = notnegative(yellocard)

        yellocard = yellocard + hpd.yello(frame)
        if yellocard < 0:
            yellocard = notnegative(yellocard)

    # Draw a rectangle around the faces and predict the face name
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                          2)  # take the face pixels from the frame
            crop_frame = frame[y:y + h, x:x +
                               w]  # turn the face pixels back into an image
            new_crop = Image.fromarray(
                crop_frame
            )  # resize the image to meet the size requirment of facenet
            new_crop = new_crop.resize(
                (160, 160))  # turn the image back into a tensor
            crop_frame = np.asarray(
                new_crop)  # get the face embedding using the face net model
            face_embed = get_embedding(
                model, crop_frame
            )  # it is a 1d array need to reshape it as a 2d tensor for svm
            face_embed = face_embed.reshape(
                -1, face_embed.shape[0])  # predict using our SVM model
            pred = svm.predict(face_embed)  # get the prediction probabiltiy
            pred_prob = svm.predict_proba(
                face_embed)  # pred_prob has probabilities of each class

            # get name
            class_index = pred[0]
            class_probability = pred_prob[0, class_index] * 100
            predict_names = out_encoder.inverse_transform(pred)
            text = 'Predicted: %s (%.3f%%)' % (predict_names[0],
                                               class_probability)

            #add the name to frame but only if the pred is above a certain threshold
            if (class_probability > 70):
                cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                            (0, 0, 255), 2)

        # Display the resulting frame
            cv2.imshow('POCAS', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            print("관리자에 의해 시험이 강제 종료 되었습니다")
            PrintResult(yellocard, redcard)
            Fail(timee, redcard)
            break
        elif time.time() > max_time_end:
            print(timee, "분의 시험이 종료되었습니다.")
            PrintResult(yellocard, redcard)
            Fail(timee, redcard)
            break

    # When everything done, release the webcam
    webcam.release()
    if isVideo:
        out.release()
        cv2.destroyAllWindows()
Ejemplo n.º 26
0
def calculate_cog_load():
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    pupil_position = Pupil_position('center', 0)

    t = dt.datetime.now()
    start_time = dt.datetime.now()
    blink_count = 0
    saccades = 0
    pupil_dilation_x = []
    pupil_dilation_y = []
    fixations = [0]
    minute = 0
    blink_rate = 0
    saccades_rate = 0
    pup_dil_x = 0
    pup_dil_y = 0
    fixation_avg = 0
    cogload = 0

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        response = requests.get(
            "https://api.fitbit.com/1/user/7QCRW3/activities/heart/date/today/today.json",
            headers=header).json()

        frame = gaze.annotated_frame()
        text = ""

        pupil_dilation_x.append(gaze.horizontal_ratio())
        pupil_dilation_y.append(gaze.vertical_ratio())

        horizontal_ratio = gaze.horizontal_ratio()
        vertical_ratio = gaze.vertical_ratio()

        if horizontal_ratio is not None:
            pupil_dilation_x.append(horizontal_ratio)

        if vertical_ratio is not None:
            pupil_dilation_y.append(vertical_ratio)

        if gaze.is_blinking():
            text = "Blinking"
            blink_count = blink_count + 1

        elif gaze.is_right():
            delta = dt.datetime.now() - t
            position = Pupil_position('right', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking right"

        elif gaze.is_left():
            delta = dt.datetime.now() - t
            position = Pupil_position('left', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking left"

        elif gaze.is_center():
            delta = dt.datetime.now() - t
            position = Pupil_position('center', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking center"

        cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                    (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()

        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Blink Rate: " + str(blink_rate), (90, 195),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Saccades Rate: " + str(saccades_rate), (90, 225),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Pupil dilation x: " + str(pup_dil_x), (90, 255),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Pupil dilation y: " + str(pup_dil_y), (90, 285),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Fixation: " + str(fixation_avg), (90, 315),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Cognitive Load: " + str(cogload), (90, 345),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        delta = dt.datetime.now() - t

        elapsed_time = dt.datetime.now() - start_time
        elapsed_time_second = elapsed_time.seconds

        cv2.putText(frame, "Elapsed Time: " + str(elapsed_time_second),
                    (90, 375), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.imshow("Demo", frame)

        if delta.seconds >= 10:
            minute = minute + 1
            blink_rate = blink_count / 10
            saccades_rate = saccades / 10

            Not_none_values_x = filter(None.__ne__, pupil_dilation_x)
            Not_none_values_y = filter(None.__ne__, pupil_dilation_y)

            pupil_dilation_x = list(Not_none_values_x)
            pupil_dilation_y = list(Not_none_values_y)

            pup_dil_x = sum(pupil_dilation_x) / len(pupil_dilation_x)
            pup_dil_y = sum(pupil_dilation_y) / len(pupil_dilation_y)

            fixation_avg = sum(fixations) / len(fixations)

            blink_count = 0
            saccades = 0

            pupil_position = Pupil_position('center', 0)

            t = dt.datetime.now()

            pupil_dilation_x = []
            pupil_dilation_y = []
            fixations = [0]

            print(
                response['activities-heart-intraday']['dataset'][-1]['value'])

            cogload = blink_rate   \
                + math.sqrt(pup_dil_x * pup_dil_x + pup_dil_y * pup_dil_y) \
                + saccades_rate \
                - fixation_avg

            print(blink_rate)
            print(pup_dil_x)
            print(pup_dil_y)
            print(saccades_rate)
            print(fixation_avg)
            print(cogload)
            write_csv('data.csv', minute, blink_rate, pup_dil_x, pup_dil_y,
                      fixation_avg, saccades_rate, cogload)

        if cv2.waitKey(33) == 27:
            break
        time.sleep(0.25)
Ejemplo n.º 27
0
    'blink_threshold', 'bf_timestep'
])

### Initialize headpose-estimator
face_d = FaceDetector()

sess = onnxruntime.InferenceSession(
    f'headpose/pretrained/fsanet-1x1-iter-688590.onnx')

sess2 = onnxruntime.InferenceSession(
    f'headpose/pretrained/fsanet-var-iter-688590.onnx')

print("ONNX models loaded")

# initialize gaze tracking
gaze = GazeTracking(BLINK_THRESHOLD)

# capture video from file
# cap = cv2.VideoCapture(f'{MEDIA_PATH}{VIDEO_NAME}')
cap = cv2.VideoCapture(f'{VIDEO_PATH}')

# get fps of video file
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'XVID')

# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
Ejemplo n.º 28
0
def eyeGaze():
    gaze = GazeTracking()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-i",
        help=
        'Path to input image or video file. Skip this argument to capture frames from a camera.'
    )

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        "pretrained_model/shape_predictor_68_face_landmarks.dat")

    args = parser.parse_args()
    cap = cv2.VideoCapture(args.i if args.i else 0)

    while cv2.waitKey(1) < 0:
        t = time.time()
        ret, frame = cap.read()
        if not ret:
            cv2.waitKey()
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = detector(gray)
        if faces is not None:
            i = np.zeros(shape=(frame.shape), dtype=np.uint8)
        for face in faces:
            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Blinking"
            elif gaze.is_right():
                text = "Looking right"
            elif gaze.is_left():
                text = "Looking left"
            elif gaze.is_center():
                text = "Looking center"
            left = face.left()
            top = face.top()
            right = face.right()
            bottom = face.bottom()
            cv2.rectangle(frame, (left, top), (right, bottom), (147, 58, 31),
                          2)
            cv2.rectangle(frame, (left, bottom - 10), (right, bottom),
                          (147, 58, 31), cv2.FILLED)
            cv2.putText(frame, text, (left + 2, bottom - 2),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
        #cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1, (147, 58, 31), 2)
        '''left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.7, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.7, (147, 58, 31), 1)'''

        cv2.imshow("Demo", frame)

        print("Time : {:.3f}".format(time.time() - t))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    print('[INFO] Stopping System')
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 29
0
import cv2.cv2 as cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking(1)


def analyze(frame):

    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""
    eye_position = 0

    if gaze.is_blinking():
        text = "Blinking"
        eye_position = 0
    elif gaze.is_left():
        text = "Looking left"
        eye_position = 1
    elif gaze.is_center():
        text = "Looking center"
        eye_position = 2
    elif gaze.is_right():
        text = "Looking right"
        eye_position = 3
    elif gaze.not_found_face():
        text = "Not found face"
        eye_position = -1

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                (147, 58, 31), 2)
Ejemplo n.º 30
0
def gaze():

    gaze = GazeTracking()
    # webcam = cv2.VideoCapture(0)
    distraction_point = 0
    tm = time.localtime()
    photo_block = []

    while True:
        # We get a new frame from the webcam
        # _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        # gaze.refresh(frame)

        # frame = gaze.annotated_frame()
        text = ""

        if gaze.is_blinking():
            text = "Blinking"
            # print("blinking")

        if gaze.is_right():
            text = "Looking right"
            # print("right")

        elif gaze.is_left():
            text = "Looking left"
            # print("left")

        elif gaze.is_center():
            text = "Looking center"
            # print("center")

        ## 수 정 시 작

        hori_ratio = gaze.horizontal_ratio()
        verti_ratio = gaze.vertical_ratio()

        try:

            if curr_location == [0, 0]:
                curr_location = [hori_ratio, verti_ratio]
                print(curr_location)
            else:
                prev_location = curr_location
                curr_location = [hori_ratio, verti_ratio]
                hori_diff = curr_location[0] - prev_location[0]
                verti_diff = curr_location[1] - prev_location[1]

                if prev_second2 == -1:
                    prev_second2 = tm.tm_sec
                    print(prev_second2)
                else:
                    curr_second2 = tm.tm_sec
                    if curr_second2 - prev_second2 == 1 or curr_second2 - prev_second2 < 0:
                        distance.append((hori_diff**2) + (verti_diff**2))
                        prev_second2 = curr_second2

                        if len(photo_block) < 3:
                            photo_block.append((hori_diff**2))

                # len(distance), sum(distance)임의 값 설정
                if len(distance) > 59:
                    if sum(distance) > 1:
                        print('주의 산만')
                        distraction_point += 1
                        distance = distance[1:]

        except:
            curr_location = [0.5, 0.5]