Пример #1
0
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f",
                "--face",
                required=True,
                help="path to where the face cascade resides")
ap.add_argument("-e",
                "--eye",
                required=True,
                help="path to where the eye cascade resides")
ap.add_argument("-v", "--video", help="path to wthe (optional) video file")
args = vars(ap.parse_args())

et = EyeTracker(args["face"], args["eye"])

if not args.get("video", False):
    camera = cv2.VideoCapture(0)

else:
    camera = cv2.VideoCapture(args["video"])

while True:
    (grabbed, frame) = camera.read()

    if args.get("video") and not grabbed:
        break

    frame = image_utils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Пример #2
0
link = "100.1.1.1"

# Initialize QT application
app = QtGui.QApplication(sys.argv)

# Initialize Tracker
eyetracker = EyeTracker(link=link,
                        dummy_mode=False,
                        sprate=1000,
                        thresvel=35,
                        thresacc=9500,
                        illumi=2,
                        caltype='HV5',
                        dodrift=False,
                        trackedeye='right',
                        display_type=display_type,
                        ptw=app,
                        bgcol=(127, 127, 127),
                        distance=550,
                        resolution=resolution,
                        winsize=(400, 300),
                        inner_tgcol=(127, 127, 127),
                        outer_tgcol=(255, 255, 255),
                        targetsize_out=1.5,
                        targetsize_in=0.5)

app.lastWindowClosed.connect(QtCore.QCoreApplication.instance().quit)

# Show application
eyetracker.display.gui.show()
Пример #3
0
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 19:21:53 2019

@author: David
"""
import cv2
from eyetracker import EyeTracker

camera = cv2.VideoCapture(0)
et = EyeTracker("cascades\\haarcascade_frontalface_default.xml",
                "cascades\\haarcascade_eye.xml")
while True:
    (grabbed, frame) = camera.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = et.track(gray)
    for rect in rects:
        cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]),
                      (0, 255, 0), 2)
    cv2.imshow("Tracking", frame)

    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

camera.release()
cv2.destroyAllWindows()
Пример #4
0
#faces = []
facesPath = []
currFaces = 0

num_of_images = len(args["images"])

for i,im in enumerate(args["images"]):
    if any(x == os.path.splitext(im)[1][1:] for x in ('jpg','jpeg','png','bmp')):
        # Read image
        image = cv2.imread(im)
        #imagecopy = image.copy()
        imageName = os.path.dirname(im)[7:]
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        
        # Create eyetracker object
        et = EyeTracker(args["faces"],args["eyes"])
        
        # Optimize image size and classify accordingly
        image = imutils.optimize(image,lowerLim,upperLim)
        if image.shape[0]==lowerLim or image.shape[1]==lowerLim:
            allRects = et.track(image,FscaleFactor=1.04,FminNeighbors=3,FminSize=(2,2))
        else:
            allRects = et.track(image)
        
        # Cycle through all faces and eyes
        for (i,rect) in enumerate(allRects):
            if i%3==0:
                (Fx,Fy,Fw,Fh) = rect
                #cv2.rectangle(imagecopy,(Fx,Fy),(Fx+Fw,Fy+Fh),(0,0,255),1)
                face = image[Fy:Fy+Fh,Fx:Fx+Fw]
                e2c = e1c = (0,0)
Пример #5
0
                core.wait(0.1)

            # wait for sometime
            core.wait(waittime)

            # check for keypresses
            if len(event.getKeys()) > 0:
                break
                experiment_cleanup(win)

            event.clearEvents()

    def on_data(self, x, y):
        eye = visual.Circle(self.win,
                            pos=(x, y),
                            fillColor=[0.5, 0.5, 0.5],
                            size=0.05,
                            lineWidth=1.5)
        eye.draw()
        pass

    def experiment_cleanup(self, win):

        # cleanup
        win.close()
        core.quit()


exp = Experiment()
et = EyeTracker(exp.initialize_exp, exp.on_data)
#initialize_exp()
Пример #6
0
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument('-f',
                '--face',
                required=True,
                help='path to where the face cascade resides')
ap.add_argument('-e',
                '--eye',
                required=True,
                help='Path to where the eye cascade resides')
ap.add_argument('-v', '--video', help='path to the video')
args = vars(ap.parse_args())

et = EyeTracker(args['face'], args['eye'])

if not args.get('video', False):
    camera = cv2.VideoCapture(0)
else:
    camera = cv2.VideoCapture(args['video'])

while True:
    (grabbed, frame) = camera.read()

    if args.get('video') and not grabbed:
        break

    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Пример #7
0
else:
    raise Exception(
        'Oops. If you want to try out the Qt version, you might use example_Qt.py'
    )

# From this point, the EyeTracker wrapper class works the same whatever we are using a pygame or a psychopy window
eyetracker = EyeTracker(link=link,
                        dummy_mode=use_dummy,
                        sprate=1000,
                        thresvel=35,
                        thresacc=9500,
                        illumi=2,
                        caltype='HV9',
                        dodrift=False,
                        trackedeye='right',
                        display_type=display_type,
                        ptw=win,
                        bgcol=(0, 0, 0),
                        distance=distance,
                        resolution=resolution,
                        winsize=screen_size,
                        inner_tgcol=(0, 0, 0),
                        outer_tgcol=(1, 1, 1),
                        targetsize_out=1.0,
                        targetsize_in=0.25)

# Checking methods (fixationtest, checkfixation)
checking = Checking(eyetracker, eyetracker.display, radius=2)

# Run Eyelink and do a calibration
eyetracker.run()
ap.add_argument("-f", "--face", required=True,
                help="path to where the face cascade resides")
ap.add_argument("-e", "--eye", required=True,
                help="path to where the eye cascade resides")
ap.add_argument("-i", "--images", required=True,
                help="path to file containing list of images to process")
ap.add_argument("-a", "--aspect", required=False,
                help="Define aspect ratio (note this doesn't squash or stretch the image)")
args = vars(ap.parse_args())

# Aspect ratio
target_aspect = args["aspect"]

# Setup face finder and eye tracker
# TODO - Rework to just use Face Detection?
et = EyeTracker(args["face"], args["eye"])

# Get list of images
fileList = open(args["images"]).readlines()
for imageInfo in fileList:
    file = imageInfo.split(";")[0]
    label = imageInfo.split(";")[1]

    # Filename processing
    print "Processing file:" + file
    dirname, filename = os.path.split(os.path.relpath(file))

    # load the image then resize and convert it to grayscale
    image = cv2.imread(file)
    frame = imgutils.resize(image, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)