Esempio n. 1
0
	def __init__(self):

		'''
		calibrate: get center
		'''
		print("Calibrating: look at center of screen")
		time.sleep(2)
		timeout_start = time.time()
		print("Calibrating...")
		gaze = GazeTracking()
		webcam = cv2.VideoCapture(0)
		# EyeTracker(webcam)
		# webcam.set(3, 2560);
		# webcam.set(4, 1080);

		imageWidth  = webcam.get(3) # float
		imageHeight = webcam.get(4) # float

		self.imageCenter = (imageWidth/2, imageHeight/2)
		print("image center", self.imageCenter)
		
		mouse = Controller()
		screenCenter = [2560/2, 1080/2]
		mouse.position = tuple(screenCenter)

		while True:
			# We get a new frame from the webcam
			_, frame = webcam.read()

			# We send this frame to GazeTracking to analyze it
			gaze.refresh(frame)

			frame = gaze.annotated_frame()

			left_pupil = gaze.pupil_left_coords()
			right_pupil = gaze.pupil_right_coords()

			# cv2.imshow("Demo", frame)

			# EyeTracker()
			# eye_coord = EyeTracker().trackeyes(frame)
			# if eye_coord is not None:
			# 	print(eye_coord)
			# 	self.fine_eyeCenter = eye_coord


			if left_pupil is not None and right_pupil is not None:
				self.coarse_eyeCenter = np.average([left_pupil, right_pupil], axis=0)
				print(self.coarse_eyeCenter)

				break

			if cv2.waitKey(1) == 27:
				break
Esempio n. 2
0
    def __init__(self):
        '''
		calibrate: get center
		'''
        print("Calibrating: look at center of screen")
        time.sleep(2)
        timeout_start = time.time()
        print("Calibrating...")
        gaze = GazeTracking()
        webcam = cv2.VideoCapture(0)
        webcam.set(3, 2560)
        webcam.set(4, 1080)

        # while time.time < timeout_start + time.time():
        # # timer
        # test = 0
        #    if test == 5:
        #        break
        #    test -= 1

        while True:
            # We get a new frame from the webcam
            _, frame = webcam.read()

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()

            # cv2.imshow("Demo", frame)

            if left_pupil is not None and right_pupil is not None:
                self.eyeCenter = np.average([left_pupil, right_pupil], axis=0)
                print(self.eyeCenter)

                break

            if cv2.waitKey(1) == 27:
                break
Esempio n. 3
0
def main():
    camera = Cameras.EXTERNAL
    bufferSize = 20  # change this number to increase the time it takes to switch cameras, righnow its around 10 secs
    counter = 0
    mod = 3  # this one as well

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(Cameras.EXTERNAL.value)

    prev_pupils = [(1.0, 1.0) for i in range(bufferSize)]
    curr_pupils = [(1.0, 1.0) for i in range(bufferSize)]

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # limits the number of samples taken
        if (counter % mod == 0):
            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()

            prev_pupils.pop(0)
            prev_pupils.append(curr_pupils.pop(0))
            curr_pupils.append((left_pupil, right_pupil))

            if ((listAllNone(curr_pupils) and listNotNone(prev_pupils)) or
                (listNotNone(curr_pupils) and listAllNone(prev_pupils))):

                activateZoom()

                if camera == Cameras.EXTERNAL: camera = Cameras.BUILT_IN
                else: camera = Cameras.EXTERNAL

                toggleCamera(camera)
                deactivateZoom()

        counter += 1
Esempio n. 4
0
"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
import math
import queue

import cv2
from GazeTracking.gaze_tracking import GazeTracking
from Worker import Worker

gaze = GazeTracking()

# settings
"""
video_path = "videos/zoom_fail.mp4"
frame_width = 320
frame_height = 200
y_padding = 80
frames_positions = [(0, 1), (0, 3), (2, 0), (2, 3)]
"""

video_path = "videos/checkout.mp4"
frame_width = 960
frame_height = 540
y_padding = 0
frames_positions = [(0, 0), (0, 1), (1, 0), (1, 1)]

capture = cv2.VideoCapture(video_path)

# create and start threads
Esempio n. 5
0
    def MoveMouse(self):
        gaze = GazeTracking()
        webcam = cv2.VideoCapture(0)
        # webcam.set(3, 2560);
        # webcam.set(4, 1080);
        mouse = Controller()
        screenCenter = [2560 / 2, 1080 / 2]
        mouse.position = tuple(screenCenter)
        scaleFactor = 1.2
        pid = PID(.5, .5, 0.05, setpoint=1)
        eyeStateLIST = []

        scaledChange = [0, 0]

        while True:
            # print(changeX)
            controlChangeX = pid((mouse.position[0] - screenCenter[0]) -
                                 scaledChange[0])
            controlChangeY = pid((screenCenter[1] - mouse.position[1]) -
                                 scaledChange[1])
            # We get a new frame from the webcam
            _, frame = webcam.read()
            frame = cv2.flip(frame, 1)

            eye_coord = EyeTracker().trackeyes(frame)

            # We send this frame to GazeTracking to analyze it

            # text = ""
            # eyeState = ""
            # if gaze.is_blinking():
            # 	eyeState = "Blinking"
            # 	eyeStateNum = 1
            # else:
            # 	eyeStateNum = 0

            # eyeStateLIST.append(eyeStateNum)
            # if len(eyeStateLIST) > 6:
            # 	eyeStateAvg = np.rint(np.mean(eyeStateLIST[-5:-1]))
            # 	del eyeStateLIST[0]
            # else:
            # 	eyeStateAvg = 0

            if eye_coord is not None:
                newCoord = np.average([eye_coord[0], eye_coord[1]], axis=0)
                changeX = newCoord[0] - self.imageCenter[0]
                changeY = newCoord[1] - self.imageCenter[1]

                # if changex > changeBuffer or changey > changeBuffer:
                change = [changeX, changeY]
                # else:
                scaledChange = np.average([[controlChangeX, controlChangeY],
                                           [change[0] * 40, change[1] * 10]],
                                          axis=0)

                newPos = np.add(screenCenter, np.multiply(scaledChange, 1))

                # print(newPos)
                if newPos[0] > 0 and newPos[0] < 2560 and newPos[
                        1] > 0 and newPos[1] < 1080:
                    mouse.position = newPos
                else:
                    break

                # if eyeStateAvg == 1:
                # 	mouse.click(Button.left, 1)
            print(mouse.position)

            if cv2.waitKey(1) == 27:
                break
import cv2
from GazeTracking.gaze_tracking import GazeTracking
from i3if import i3focus
import numpy as np

gaze = GazeTracking()
webcam = cv2.VideoCapture(0)


def main():
    focus = i3focus()
    state = 'left'
    counter = 0

    while True:
        counter += 1
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        frame = gaze.annotated_frame()
        text = ""

        look = gaze.horizontal_ratio()
        if look is not None:
            if look > 0.68 and state != "left":
                focus.on_eye_change(1)
                state = 'left'
            if look < 0.61 and state != 'right':
Esempio n. 7
0
import cv2
from GazeTracking.gaze_tracking import GazeTracking

gaze = GazeTracking()
cap = cv2.VideoCapture(0)

if not cap.isOpened():
    raise IOError("Cannot open webcam")

while(True):
    ret, frame = cap.read()
    cv2.imshow('Input', frame)

    gaze.refresh(frame)
    new_frame = gaze.annotated_frame()
    cv2.imshow("Demo", new_frame)

    if (cv2.waitKey(1) == 27):
        break

# cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
Esempio n. 8
0
    def MoveMouse(self):

        gaze = GazeTracking()
        webcam = cv2.VideoCapture(0)
        webcam.set(3, 2560)
        webcam.set(4, 1080)
        mouse = Controller()
        newPos = [2560 / 2, 1080 / 2]
        mouse.position = tuple(newPos)
        scaleFactor = 2.5
        pid = PID(1, 0.1, 0.05, setpoint=1)
        eyeStateLIST = []

        while True:
            controlX = 4 * pid(mouse.position[0] - newPos[0])
            controlY = 1.8 * pid(mouse.position[1] - newPos[1])

            # print("Current position:")
            # We get a new frame from the webcam
            _, frame = webcam.read()
            frame = cv2.flip(frame, 1)
            # webcam.set(3, 2560)
            # webcam.set(4, 1080)

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""
            eyeState = ""
            if gaze.is_blinking():
                eyeState = "Blinking"
                eyeStateNum = 1
            else:
                eyeStateNum = 0

            eyeStateLIST.append(eyeStateNum)
            if len(eyeStateLIST) > 10:
                eyeStateAvg = np.rint(np.mean(eyeStateLIST[-9:-1]))
            else:
                eyeStateAvg = 0

            # elif gaze.is_right():
            # 	text = "Looking right"
            # elif gaze.is_left():
            # 	text = "Looking left"
            # elif gaze.is_center():
            # 	text = "Looking center"
            # print(eyeStateLIST)
            # print(eyeStateAvg)

            cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                        (147, 58, 31), 2)

            left_pupil = gaze.pupil_left_coords()
            right_pupil = gaze.pupil_right_coords()
            cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
            cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                        cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

            cv2.imshow("Demo", frame)

            if left_pupil is not None and right_pupil is not None:
                newCoord = np.average([left_pupil, right_pupil], axis=0)
                newCoord[0] *= 4
                newCoord[1] *= 1.8
                # changeX = self.eyeCenter[0]-newCoord[0]
                # changeY = newCoord[1]-self.eyeCenter[1]

                changeX = controlX * 10
                changeY = controlY * 10

                print(changeX, changeY)

                # if changeX < changeBuffer or changeY < changeBuffer:
                # change = [changeX, changeY]
                # else:
                # newPos = np.add(screenCenter, scaledChange)

                newPos = (newCoord[0] + changeX, newCoord[1] + changeY)
                # print("\n", newCoord)
                # print(newPos)
                # print(newPos)
                # print("Destination:")

                mouse.position = newPos
                # time.sleep(0.01)
                # if eyeState == "Blinking":
                # 	mouse.click(Button.left, 1)

            if cv2.waitKey(1) == 27:
                break
Esempio n. 9
0
	def MoveMouse(self):

		gaze = GazeTracking()
		webcam = cv2.VideoCapture(0)
		# webcam.set(3, 2560);
		# webcam.set(4, 1080);
		mouse = Controller()
		screenCenter = [2560/2, 1080/2]
		# mouse.position = tuple(screenCenter)
		scaleFactor = 1.2
		pid = PID(.2, .2, 0.01, setpoint=1)
		eyeStateLIST = []

		scaledChange = [0,0]

		while True:
			# print(changeX)
			controlChangeX = pid((mouse.position[0] - screenCenter[0]) - scaledChange[0])
			controlChangeY = pid((screenCenter[1] - mouse.position[1]) - scaledChange[1])
			# We get a new frame from the webcam
			_, webcam_frame = webcam.read()

			FaceTracker()
			face_center = FaceTracker().trackface(webcam_frame)
			# print(face_center)
			# FaceTracker()
			# face_frame = FaceTracker().trackeyes(webcam_frame)
			# face_frame = FaceTracker().get_face_frame()
			frame = cv2.flip(webcam_frame, 1)

			# We send this frame to GazeTracking to analyze it
			gaze.refresh(frame)

			frame = gaze.annotated_frame()
			text = ""
			eyeState = ""
			if gaze.is_blinking():
				eyeState = "Blinking"
				eyeStateNum = 1
			else:
				eyeStateNum = 0

			eyeStateLIST.append(eyeStateNum)
			if len(eyeStateLIST) > 6:
				eyeStateAvg = np.rint(np.mean(eyeStateLIST[-5:-1]))
				del eyeStateLIST[0]
			else:
				eyeStateAvg = 0

			# elif gaze.is_right():
			# 	text = "Looking right"
			# elif gaze.is_left():
			# 	text = "Looking left"
			# elif gaze.is_center():
			# 	text = "Looking center"
			# print(eyeStateLIST)
			# print(eyeStateAvg)
			cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)

			left_pupil = gaze.pupil_left_coords()
			right_pupil = gaze.pupil_right_coords()
			cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
			cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

			cv2.imshow("Demo", frame)

			if left_pupil is not None:
				coarse_newCoord = left_pupil
			if right_pupil is not None:
				coarse_newCoord = right_pupil

			if left_pupil is not None or right_pupil is not None:
				# coarse_newCoord = np.average([left_pupil, right_pupil], axis=0)
				changeX = coarse_newCoord[0]-self.imageCenter[0]
				changeY = coarse_newCoord[1]-self.imageCenter[1]

				# if changex > changeBuffer or changey > changeBuffer:
				change = [changeX, changeY]
				# else:
				scaledChange = np.average([[controlChangeX, controlChangeY], [change[0]*25, change[1]*10]], axis=0)

				newPos = np.add(screenCenter, np.multiply(scaledChange,1))

				# print(newPos)
				if newPos[0] > 0 and newPos[0] < 2560 and newPos[1] > 0 and newPos[1] < 1080:
					mouse.position = newPos	
				else:
					# break
					pass

				if eyeStateAvg == 1:					
					mouse.click(Button.left, 1)
				print(mouse.position) 
			else:
				########################3
				# fine control pupil follower


				pass
				# EyeTracker()
				# fine_newCoord = EyeTracker().trackeyes(frame)
				# print(fine_newCoord)

			if cv2.waitKey(1) == 27:
				break
Esempio n. 10
0
"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""

import cv2
from GazeTracking.gaze_tracking import GazeTracking

gaze = GazeTracking()
webcam = cv2.VideoCapture(0)

while True:
    # We get a new frame from the webcam
    _, frame = webcam.read()
    frame = cv2.flip(frame, flipCode=-1)

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Blinking"
    elif gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_center():
        text = "Looking center"