Exemplo n.º 1
0
    def __init__(self, testMode, rotation):
        self.blinked = False
        self.captured = False
        self.rotation = rotation
        self.photoList = dict()
        self.genderList = list()
        self.ageList = list()
        self.timeList = list()

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        self.W = None
        self.H = None

        # Model
        self.face_cascade = cv2.CascadeClassifier()
        if not self.face_cascade.load('haarcascade_frontalface_alt.xml'):
            print("face detector model not loaded")
        self.age_net = cv2.dnn.readNetFromCaffe(
            model_directory + '/deploy_age.prototxt',
            model_directory + '/age_net.caffemodel')
        self.gender_net = cv2.dnn.readNetFromCaffe(
            model_directory + '/deploy_gender.prototxt',
            model_directory + '/gender_net.caffemodel')

        # Edit this for development
        print("Running on " + platform.system())
        if platform.system() == 'Linux' or platform.system() == "linux2":
            self.cam = cv2.VideoCapture(-1)
        else:
            self.cam = cv2.VideoCapture(1)
        self.cam.set(3, 1920)
        self.cam.set(4, 1920)
        self.gaze = GazeTracking(testMode)
Exemplo n.º 2
0
 def __init__(self):
     self.gaze = GazeTracking()
     self.webcam = cv2.VideoCapture(0)
     self.hori = []
     self.verti = []
     self.circle = []
     self.i = 0
     self.window1 = Tk()
     pass
Exemplo n.º 3
0
 def __init__(self):
     self.cv_bridge = CvBridge()
     self.gaze = GazeTracking()
     self.publish_annotated_frame = rospy.get_param(
         "~publish_annotated_frame", True)
     if self.publish_annotated_frame:
         self.annotated_frame_publisher = rospy.Publisher(
             'image_annotated_raw', Image, queue_size=10)
     self.gaze_publisher = rospy.Publisher('gaze_state',
                                           GazeState,
                                           queue_size=10)
Exemplo n.º 4
0
    def __init__(self):
        user32 = ctypes.windll.user32

        self.img_width = user32.GetSystemMetrics(0) // 5
        self.img_height = self.img_width // 2

        self.act = ""
        self.acts = []
        self.act_time = 0
        self.act_started = False
        self.act_ended = True
        self.act_start_time = time.time()
        self.act_stop_time = time.time()
        self.current_gesture = ""
        self.gesture_text = ""
        self.end_of_gesture = False
        self.gesture_end_start_time = time.time()
        self.gesture_end_stop_time = time.time()
        self.gests = dict()
        self.detection_of_end = False
        self.list_of_acts = {"Closed left": 0, "Closed right": 0, "Neither": 0}
        self.counter = 0
        self.time_of_output = 0
        self.end_of_display_image = True
        self.current_act = ""

        self.read_settings()

        self.gaze = GazeTracking()

        self.webcam = cv2.VideoCapture(self.webcam_number)
        self.webcam.set(3, self.webcam_width)
        self.webcam.set(4, self.webcam_height)

        self.screen_width = int(self.webcam.get(3))
        self.screen_height = int(self.webcam.get(4))

        window_name = "Aurelium"
        cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

        self.dim = (self.img_width, self.img_height)

        self.load_graphics()

        self.run()

        cv2.destroyAllWindows()
Exemplo n.º 5
0
def main():
    # Initialize webcam
    webcam = cv2.VideoCapture(0)

    # Start GazeTracking
    gaze = GazeTracking()
    thread_gaze = Thread(target=gaze.run, args=(webcam, ))
    thread_gaze.name = "ThreadGaze"
    #thread_gaze.daemon = True
    thread_gaze.start()

    # Calibrate
    size = pyautogui.size()
    # changed manually to test only in my primary monitor
    size = (1920, 1080)
    calibration = Calibration(gaze, size)

    # Show annotated camera
    annotated_frame = AnnotatedFrame(gaze)
    thread_annotated = Thread(target=annotated_frame.show)
    thread_annotated.name = "ThreadAnnotated"
    #thread_annotated.daemon = True
    thread_annotated.start()

    # Initialize mouse control
    mouse_control = MouseControl(gaze, calibration)
    mouse_control.run()
Exemplo n.º 6
0
	def __init__(self):
		# Load the parameters
		self.conf = config()
		# initialize dlib's face detector (HOG-based) and then create the
		# facial landmark predictor
		print("[INFO] loading facial landmark predictor...")
		self.detector = dlib.get_frontal_face_detector()
		self.predictor = dlib.shape_predictor(self.conf.shape_predictor_path)
		
		# grab the indexes of the facial landmarks for the left and
		# right eye, respectively
		(self.lStart, self.lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
		(self.rStart, self.rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
		
		# initialize the video stream and sleep for a bit, allowing the
		# camera sensor to warm up
		self.cap = cv2.VideoCapture(0)
		if self.conf.vedio_path == 0:
			self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
		_, sample_frame = self.cap.read()
		
		# Introduce mark_detector to detect landmarks.
		self.mark_detector = MarkDetector()
		
		# Setup process and queues for multiprocessing.
		self.img_queue = Queue()
		self.box_queue = Queue()
		self.img_queue.put(sample_frame)
		self.box_process = Process(target=get_face, args=(
			self.mark_detector, self.img_queue, self.box_queue,))
		self.box_process.start()
		
		# Introduce pose estimator to solve pose. Get one frame to setup the
		# estimator according to the image size.
		self.height, self.width = sample_frame.shape[:2]
		self.pose_estimator = PoseEstimator(img_size=(self.height, self.width))
		
		# Introduce scalar stabilizers for pose.
		self.pose_stabilizers = [Stabilizer(
			state_num=2,
			measure_num=1,
			cov_process=0.1,
			cov_measure=0.1) for _ in range(6)]
		
		self.tm = cv2.TickMeter()
		# Gaze tracking
		self.gaze = GazeTracking()
Exemplo n.º 7
0
def read_gaze(dirPath):
    # 디렉토리 내 디렉터리, 파일 recursive하게 읽는 건 나중에 하고
    # 우선 주어진 한 디렉터리에 대해서만 진행
    print("[O] Read left eye gaze is running...")
    gaze = GazeTracking()
    left = []
    target_dir = dirPath
    fileList = os.listdir(target_dir)
    for file in fileList:
        image = Image.open(target_dir + "\\" + file)
        data = np.asarray(image)
        # print(data.shape)     # (720 ,1280, 3)
        gaze.refresh(data)
        left_pupil = gaze.pupil_left_coords()
        left.append(left_pupil)
    print(left)
    print("[=] Read left eye gaze is closing...")
    return left
Exemplo n.º 8
0
def gaze():

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    face_detect = 1

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        if face_detect:
            phonix_tracking.detect_video()

        frame = gaze.annotated_frame()
        text = ""

        # if gaze.is_blinking():
        #     text = "Blinking"
        #     print("blinking")

        if gaze.is_right():
            text = "Looking right"
            print("right")

        elif gaze.is_left():
            text = "Looking left"
            print("left")

        # elif gaze.is_center():
        #     text = "Looking center"
        #     print("center")


        # cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        # cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        # cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        # cv2.imshow("Demo", frame)

# esc(27번키) 를 누르면 프로그램 종료
        if cv2.waitKey(1) == 27:
            break
Exemplo n.º 9
0
def getEyeResults():
    gaze = GazeTracking()
    frame = cv2.imread("./images/analysis/proctor.png")
    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Blinking"
    elif gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_center():
        text = "Looking center"
    print(text)
    return text
Exemplo n.º 10
0
    def __init__(self):
        self.model = model_face()
        self.model.load_weights('model/model.h5')

        self.gaze = GazeTracking()

        # cv2.ocl.setUseOpenCL(True)

        self.emotion_dict = {
            0: "Angry",
            1: "Disgusted",
            2: "Fearful",
            3: "Happy",
            4: "Neutral",
            5: "Sad",
            6: "Surprised"
        }

        self.cap = cv2.VideoCapture(0)
        self.facecasc = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')
Exemplo n.º 11
0
def GazeYourEye(video, student):
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(video)
    result = []
    while True:
        value, frame = webcam.read()
        if value == False: break
        gaze.refresh(frame)

        frame = gaze.annotated_frame()

        if gaze.is_blinking():
            result.append('B')
        elif gaze.is_right():
            result.append('R')
        elif gaze.is_left():
            result.append('L')
        elif gaze.is_center():
            result.append('C')

        if cv2.waitKey(1) == 27:
            break
    whole = len(result)

    ret = [
        round(result.count('C') / whole * 100, 2),
        round(result.count('B') / whole * 100, 2),
        round(result.count('L') / whole * 100, 2),
        round(result.count('R') / whole * 100, 2)
    ]

    student = Students.query.filter(
        Students.student_number == student.student_number)
    student.update({
        'eye_ratio_center': ret[0],
        'eye_ratio_blink': ret[1],
        'eye_ratio_left': ret[2],
        'eye_ratio_right': ret[3]
    })

    data = np.array([[ret[0], ret[1], ret[2], ret[3]]])
    [result] = load_model.predict(data)
    student.update({'eye_result': bool(result)})
    db.session.commit()
Exemplo n.º 12
0
def eye_tracking(image_path):
    gaze = GazeTracking()
    frame = cv2.imread(image_path)
    gaze.refresh(frame)

    frame = gaze.annotated_frame()

    if gaze.is_right():
        value = 0.5
    elif gaze.is_left():
        value = 0.5
    elif gaze.is_center():
        value = 1
    else:
        value = 0
    return value
Exemplo n.º 13
0
def imagecov(photoname, relative_eye_size=1.5):
    global count
    '''
    Keep the image in the folder source_image and 
    put in the name of image in photoname
    '''
    photoname = photoname
    sourcename = DIRNAME + '/source_img/' + photoname
    finalname = DIRNAME + '/static/' + str(count) + ".jpg"
    '''
    You can change the relative eye size to optimize the image further
    '''
    # relative_eye_size = 1.5

    gaze = GazeTracking()
    frame = cv2.imread(sourcename)

    # cv2.imshow("Demo1", frame)

    gaze.refresh(frame)
    frame = gaze.annotated_frame()

    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    try:
        distance = (left_pupil[0] - right_pupil[0]) * (
            left_pupil[0] - right_pupil[0]
        ) + (left_pupil[1] - right_pupil[1]) * (left_pupil[1] - right_pupil[1])
    except:
        return False
    distance = np.sqrt(distance)
    print(distance)
    face_image = Image.open(sourcename)
    eye_image = Image.open(DIRNAME + '/source_img/redeye.png')

    eye_image = eye_image.resize((int(distance * 2 * relative_eye_size),
                                  int(distance * relative_eye_size)))
    eye_image = eye_image.rotate(15)

    Image.Image.paste(face_image, eye_image,
                      (left_pupil[0] - int(distance * relative_eye_size),
                       left_pupil[1] - int(distance * relative_eye_size / 2)),
                      eye_image)
    Image.Image.paste(face_image, eye_image,
                      (right_pupil[0] - int(distance * relative_eye_size),
                       right_pupil[1] - int(distance * relative_eye_size / 2)),
                      eye_image)
    count += 1
    # face_image.show()
    face_image.save(finalname)
    # eye_image.show()
    return True
Exemplo n.º 14
0
def startCam():
    import cv2
    from gaze_tracking import GazeTracking
    import time

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    startTime = time.time()
    totalFrames = 0
    framesDistracted = 0
    framesFocused = 0

    while True:
        _, frame = webcam.read()
        totalFrames += 1
        gaze.refresh(frame)
        frame = gaze.annotated_frame()

        if gaze.is_blinking():
            framesDistracted += 1
        elif gaze.is_right():
            framesDistracted += 1
        elif gaze.is_left():
            framesDistracted += 1
        elif gaze.is_center():
            framesFocused += 1
        else:
            framesDistracted += 1

        cv2.imshow("Camera", frame)

        if cv2.waitKey(1) == ord('q'):
            break

    webcam.release()
    cv2.destroyAllWindows()

    totalTime = truncate(time.time() - startTime, 2)
    percentFocused = truncate((framesFocused / totalFrames) * 100, 2)
    percentDistracted = truncate((framesDistracted / totalFrames) * 100, 2)

    return totalTime, percentFocused, percentDistracted
Exemplo n.º 15
0
def adhdRes():

    #app3 code
    # p = multiprocessing.Process(target = ValuePredictor, name="valuePredictor", args=())
    # p.start()
    # time.sleep(10)
    # p.terminate()

    sns.set(style="ticks", context="talk")
    plt.style.use("dark_background")

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        frame = gaze.annotated_frame()

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        if (left_pupil == (0, 0) or right_pupil == (0, 0)):
            pass
        else:
            plt.plot(left_pupil, right_pupil)

        cv2.imshow("Demo", frame)

        if cv2.waitKey(1) == ord(' '):
            break

    plt.savefig('2.png')

    img = open_image('./2.png')
    result = ValuePredictor(img)
    if result == 'ASD':
        prediction = "ADHD"
    else:
        prediction = "No ADHD"
    return render_template("./test/adhd.html", prediction=prediction)
Exemplo n.º 16
0
def get_data():
    x = []
    y = []
    gaze = GazeTracking()
    for files in os.listdir('output/mouse_old3/'):
        if 'jpg' in files:
            continue
        obj = []
        txt = open(os.path.join('output/mouse_old3/',
                                files)).read().split('\n')
        center = cv2.imread(
            os.path.join('output/mouse_old3/', files[:-4] + '_center.jpg'))
        left = cv2.imread(
            os.path.join('output/mouse_old3/', files[:-4] + '_1.jpg'))
        right = cv2.imread(
            os.path.join('output/mouse_old3/', files[:-4] + '_2.jpg'))
        stop = False
        try:
            for im in [center, left, right]:
                gaze.refresh(im)
                if gaze.pupil_left_coords() is None or gaze.pupil_right_coords(
                ) is None:
                    stop = True
                    break
                obj.extend([
                    gaze.eye_left.center[0],
                    gaze.eye_left.center[1],
                    gaze.eye_right.center[0],
                    gaze.eye_right.center[1],
                    gaze.pupil_left_coords()[0],
                    gaze.pupil_left_coords()[1],
                    gaze.pupil_right_coords()[0],
                    gaze.pupil_right_coords()[1],
                ])
            if stop:
                continue
            for line in txt:
                if len(line.split(',')) == 2:
                    a, b = line.split(',')
                    y.append(np.array((float(a) / 1920, float(b) / 1080)))
            x.append(np.array(obj))
        except cv2.error:
            pass
    return np.array(x, dtype='float32'), np.array(y)
Exemplo n.º 17
0
    def eyeTrack(self):

        gaze = GazeTracking()
        blinkCount = 0

        while True:

            # Grab a single frame of video
            ret, frame = self.video_capture.read()

            # We send this frame to GazeTracking to analyze it
            gaze.refresh(frame)

            frame = gaze.annotated_frame()
            text = ""

            if gaze.is_blinking():
                text = "Goz Kirpildi"
                blinkCount += 1
            elif gaze.is_right():
                text = "Saga Bakildi"
            elif gaze.is_left():
                text = "Sola Bakildi"
            elif gaze.is_center():
                text = "Merkeze Bakildi"

            cv2.putText(frame, text, (0, 30), cv2.FONT_HERSHEY_DUPLEX, 1,
                        (147, 58, 31), 2)

            # Display the resulting image
            cv2.imshow('Video', frame)
            print("Goz Kırpma: " + str(blinkCount))

            if blinkCount >= 3:
                return 1

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Exemplo n.º 18
0
        #           gaze2.pupil_left_coords()[0], gaze2.pupil_left_coords()[1], gaze2.pupil_right_coords()[0], gaze2.pupil_right_coords()[1],
        #           gazecenter.eye_left.center[0], gazecenter.eye_left.center[1], gazecenter.eye_right.center[0], gazecenter.eye_right.center[1],
        #           gazecenter.pupil_left_coords()[0], gazecenter.pupil_left_coords()[1], gazecenter.pupil_right_coords()[0], gazecenter.pupil_right_coords()[1],
        #           ]
        # samples.append(np.array(sample, dtype='float32'))
        # targets.append(np.array([x / 1920, y / 1080], dtype='float32'))
        return


if __name__ == '__main__':
    raw_show = np.zeros((1080, 1920, 3), dtype='uint8')
    cv2.namedWindow("d", cv2.WND_PROP_FULLSCREEN)
    cv2.setMouseCallback('d', draw_events)
    cv2.setWindowProperty("d", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
    cv2.imshow('d', raw_show)
    gaze1 = GazeTracking()
    gaze2 = GazeTracking()
    gazecenter = GazeTracking()
    webcam1 = cam.WebcamThread(0, "Face detector 1").start()

    # webcam1.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    # webcam1.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    n = len(os.listdir('/home/palm/PycharmProjects/true/output/mouse')) // 3
    samples = []
    targets = []
    trained = False
    while True:
        # We get a new frame from the webcam
        t = time.time()
        try:
            _, frame1 = webcam1.read()
Exemplo n.º 19
0
        image = cv2.resize(image, (320, 540))
        images.append(image)

    model = LinearRegression()
    model.fit(x, y)
    raw_show = np.zeros((1080, 1920, 3), dtype='uint8')
    for i in range(6):
        for j in range(2):
            image = images[np.random.randint(0, len(images) - 1)]
            x = (i * 320, (i + 1) * 320)
            y = (j * 540, (j + 1) * 540)
            raw_show[y[0]:y[1], x[0]:x[1], :] = image
    webcam = cv2.VideoCapture(0)
    webcam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    webcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    gaze = GazeTracking()
    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()
        raw = frame.copy()
        image = frame.copy()
        gaze.refresh(frame)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()

        blue = frame[..., 0]
        green = frame[..., 1]
        red = frame[..., 2]
        mask1 = green > red * 1.7
        idx = (mask1 == 0)
Exemplo n.º 20
0
import cv2.cv2 as cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking(0)


def analyze(frame):

    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""
    eye_position = 0

    if gaze.is_blinking():
        text = "Blinking"
        eye_position = 0
    elif gaze.is_left():
        text = "Looking left"
        eye_position = 1
    elif gaze.is_center():
        text = "Looking center"
        eye_position = 2
    elif gaze.is_right():
        text = "Looking right"
        eye_position = 3
    elif gaze.not_found_face():
        text = "Not found face"
        eye_position = -1

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)

    left_pupil = gaze.pupil_left_coords()
Exemplo n.º 21
0
import cv2
from gaze_tracking import GazeTracking

gaze = GazeTracking()
webcam = cv2.VideoCapture(0)

while True:
    _, frame = webcam.read()

    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking(): text = " Blinking"
    elif gaze.is_right(): text = " Looking right"
    elif gaze.is_left(): text = " Looking left"
    elif gaze.is_center(): text = " Looking center"

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                (147, 58, 31), 2)

    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    cv2.putText(frame, "Left Eye Coords :  " + str(left_pupil), (90, 130),
                cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv2.putText(frame, "Right Eye Coords: " + str(right_pupil), (90, 165),
                cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

    cv2.imshow("Demo", frame)
Exemplo n.º 22
0

RECORD_VIDEO = False
USE_GESTURES = True

LEFT_LOOK = None
LEFT_LOOK_THRESHOLD = 3

RIGHT_LOOK = None
RIGHT_LOOK_THRESHOLD = 5

BLINKING_COUNT = 0

# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
gaze = GazeTracking()

# drone = CoDrone.CoDrone()
# drone.connect()

#Capture video from the Wifi Connection to FPV module
#RTSP =(Real Time Streaming Protocol)
# cap = cv2.VideoCapture('rtsp://192.168.100.1/cam1/mpeg4')
i = 0
print('start')
while True:
    current_time = time.time()
    if(LEFT_LOOK != None):
        if(current_time > (LEFT_LOOK + LEFT_LOOK_THRESHOLD)):
            LEFT_LOOK = None
    if(RIGHT_LOOK != None):
Exemplo n.º 23
0
import cv2
from gaze_tracking import GazeTracking
from page import Page
from sklearn.svm import SVC
import pandas as pd
import numpy as np
#
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
outn = "init.avi"

fps = webcam.get(cv2.CAP_PROP_FPS)
print(fps)
_, frame = webcam.read()
(hgt, wid, dep) = frame.shape
frc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
out = cv2.VideoWriter(outn, frc, 25.0, (wid * 2, hgt), 1)

page_initialization = Page(-1, gaze, webcam, out)
gaze.set_clf(page_initialization.initialization())
# x = []
# y = []
#
# data_size = 150
#
# horizontal = pd.read_csv('trained_models/dict_horizontal.csv', sep=',', header=None).values
# vertical = pd.read_csv('trained_models/dict_vertical.csv', sep=',', header=None).values
# for i in range(6):
#     x = horizontal[i][1:]
#     y = vertical[i][1:]
#     xy = np.concatenate((np.array(x).reshape(data_size, 1), np.array(y).reshape(data_size, 1)), axis = 1)
Exemplo n.º 24
0
from string import Template
from pathlib import Path
import csv
import numpy as np
import PIL.Image as Image
import base64
import torch
import torchvision.transforms as transforms
import imageio
from aip import AipFace
import cv2
from gaze_tracking import GazeTracking

import datetime
from code.model import GazeLSTM
gaze = GazeTracking()

# from drive.gaze360.code.My_hdf5 import *
# import My_hdf5 首先测试不需要使用到My_hdf5
WIDTH, HEIGHT = 960, 720
total = set()


# Loading the model
def load_model(model_path, on_gpu):
    model_v = GazeLSTM()
    model = torch.nn.DataParallel(model_v)  #.cuda()
    if not on_gpu:
        checkpoint = torch.load(model_path, map_location='cpu')
    else:
        checkpoint = torch.load(model_path)
Exemplo n.º 25
0
def get_data():
    l = []
    center_left = []
    center_right = []
    l_x = []
    l_y = []
    r_x = []
    r_y = []
    d = {'Time': [], 'Left eye': [], 'Right eye': []}

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        frame = gaze.annotated_frame()
        text = ""

        if gaze.is_blinking():
            text = "Blinking"
            l.append(datetime.datetime.now())
        elif gaze.is_left():
            text = "Looking left"
        elif gaze.is_center():
            text = "Looking right"

        cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                    (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        try:
            l_x.append(gaze.pupil_left_coords()[0])
            l_y.append(gaze.pupil_left_coords()[1])
            r_x.append(gaze.pupil_right_coords()[0])
            r_y.append(gaze.pupil_right_coords()[1])
        except:
            l_x.append(0)
            l_y.append(0)
            r_x.append(0)
            r_y.append(0)
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        #print((left_pupil,right_pupil))
        try:
            d['Left eye'].append((left_pupil[0], left_pupil[1]))
            d['Right eye'].append((right_pupil[0], right_pupil[1]))
            d['Time'].append(datetime.datetime.now())
        except:
            d['Left eye'].append(0)
            d['Right eye'].append(0)
            d['Time'].append(datetime.datetime.now())

        cv2.imshow("Frame", frame)

        if cv2.waitKey(1) == 27:
            break

    eye_coordinates = pd.DataFrame(d)
    eye_coordinates.columns = ['Time', 'Left eye', 'Right eye']
    eye_blinking = pd.Series(l)
    return eye_coordinates  #,eye_blinking,center_right,center_left,l_x,l_y,r_x,r_y
Exemplo n.º 26
0
"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""

import cv2
from gaze_tracking import GazeTracking

gaze = GazeTracking()
webcam = cv2.VideoCapture('test_video.mov')

while True:
    # We get a new frame from the webcam
    _, frame = webcam.read()

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Blinking"
    elif gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_center():
        text = "Looking center"

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
Exemplo n.º 27
0
import cv2
import serial
from gaze_tracking import GazeTracking

gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
#ser = serial.Serial('COM3', 9600)

while True:
    # We get a new frame from the webcam
    _, frame = webcam.read()

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Down"
        #ser.write(b'B')
    elif gaze.is_right():
        text = "Right"
        #ser.write(b'R')
    elif gaze.is_left():
        text = "Left"
        #ser.write(b'L')
    elif gaze.is_up():
        text = "Up"
        ser.write(b'F')
    elif gaze.is_center():
"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
from __future__ import division
import os
import cv2
from gaze_tracking import GazeTracking

gaze = GazeTracking()
# webcam = cv2.VideoCapture(0)
video_root = '/home/himanshu/Downloads'
video_name = 'P45.avi'

if not os.path.exists(os.path.join(video_root, 'MIT_images', video_name[:-4])):
    os.mkdir(os.path.join(video_root, 'MIT_images', video_name[:-4]))
    os.system("ffmpeg -i {0}/{2} -vf fps=30 {0}/MIT_images/{1}/output%06d.png".
              format(video_root, video_name[:-4], video_name))
    # os.system("ffmpeg -i {0}/P45.avi -vf fps=30 {0}/MIT_images/{1}/output%06d.png".format(video_root, video_name[:-4]))

# while True:
# We get a new frame from the webcam
img_root = '/home/himanshu/Downloads/MIT_images/P45'
left = 0
right = 0
center = 0
blinking = 0
for fname in os.listdir(img_root):
    frame = cv2.imread(os.path.join(img_root, fname))
    frame = gaze.perspective_transform(frame, angle_x=65, angle_y=50)
    # print(frame.shape)
Exemplo n.º 29
0
import cv2.cv2 as cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking(1)


def analyze(frame):

    gaze.refresh(frame)
    frame = gaze.annotated_frame()
    text = ""
    eye_position = 0

    if gaze.is_blinking():
        text = "Blinking"
        eye_position = 0
    elif gaze.is_left():
        text = "Looking left"
        eye_position = 1
    elif gaze.is_center():
        text = "Looking center"
        eye_position = 2
    elif gaze.is_right():
        text = "Looking right"
        eye_position = 3
    elif gaze.not_found_face():
        text = "Not found face"
        eye_position = -1

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                (147, 58, 31), 2)
Exemplo n.º 30
0
    'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}


def get_video_type(filename):
    filename, ext = os.path.splitext(filename)
    if ext in VIDEO_TYPE:
        return VIDEO_TYPE[ext]
    return VIDEO_TYPE['avi']


#
# Gaze tracking code
#

gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
filename = 'video.avi'
frames_per_second = 10
res = '480p'
out = cv2.VideoWriter(filename, get_video_type(filename),
                      frames_per_second + 1, get_dims(webcam, res))

pupils_lost = False
activity_threshold = 20
continuous_detected = 0
continuous_undetected = 0
blink_count = 0
left_look_count = 0
right_look_count = 0
frame_count = 0