Ejemplo n.º 1
0
    def connect(self, camnr=0, mode='RGB', **kwargs):
        """Use this function to implement the initialisation of a specific
		type of eye tracking.
		
		camnr			-	Integer that indicates what webcam should be
						used. Default = 0.
		
		mode			-	String that indicates how the captured frame
						should be processed before it's returned.
						'R' returns the red component of the frame,
						'G' returns the green component of the frame,
						'B' returns the blue component of the frame,
						'RGB' returns the greyscale version of the
						frame (converted by OpenCV). Default = 'RGB'.
		"""

        # Only initialise if it hasn't been done yet.
        if not self._connected:

            # Set mode and camera number
            self._camnr = camnr
            self._mode = mode

            # DEBUG message.
            _message(u'debug', u'webcam.WebCamTracker.connect', \
             u"Connecting to webcam %d." % (self._camnr))

            # Initialise the webcam.
            self._vidcap = cv2.VideoCapture(self._camnr)
            self._connected = True

            # DEBUG message.
            _message(u'debug', u'webcam.WebCamTracker.connect', \
             u"Successfully connected to webcam %d!" % (self._camnr))
Ejemplo n.º 2
0
	def _get_frame(self):
		
		"""Use this function to implement how the EyeTracker should obtain
		a frame. A frame is supposed to be a two-dimensional image, usually
		an image from a webcam or a video that was converted to greyscale.
		Note that a frame should be a numpy.ndarray with shape=(w,h), and
		dtype='uint8'. In addition to the frame, a success Boolean should
		be returned by this function. It tells the functions that call
		_get_frame whether a new frame was available. (See below what the
		returned values should be exactly.)
		
		IMPORTANT: This function should not have any keyword arguments.
		Any settings should be handled through properties of self.
		
		Returns
		
		success, frame	-	success is a Boolean that indicates whether
						a frame could be obtained.
						frame is a numpy.ndarray with unsigned,
						8-bit integers that reflect the greyscale
						values of the image. If no frame could be
						obtained, None will be returned.
		"""
		
		# Obtain a frame.
		_message(u'message', u'generic.EyeTracker._get_frame', \
			u"Implement your own _get_frame functionality")
		
		return False, None
Ejemplo n.º 3
0
	def _get_frame(self):
		
		"""Use this function to implement how the EyeTracker should obtain
		a frame. A frame is supposed to be a two-dimensional image, usually
		an image from a webcam or a video that was converted to greyscale.
		Note that a frame should be a numpy.ndarray with shape=(w,h), and
		dtype='uint8'. In addition to the frame, a success Boolean should
		be returned by this function. It tells the functions that call
		_get_frame whether a new frame was available. (See below what the
		returned values should be exactly.)
		
		IMPORTANT: This function should not have any keyword arguments.
		Any settings should be handled through properties of self.
		
		Returns
		
		success, frame	-	success is a Boolean that indicates whether
						a frame could be obtained.
						frame is a numpy.ndarray with unsigned,
						8-bit integers that reflect the greyscale
						values of the image. If no frame could be
						obtained, None will be returned.
		"""
		
		# Obtain a frame.
		_message(u'message', u'generic.EyeTracker._get_frame', \
			u"Implement your own _get_frame functionality")
		
		return False, None
Ejemplo n.º 4
0
	def connect(self, camnr=0, mode='RGB', **kwargs):
		
		"""Use this function to implement the initialisation of a specific
		type of eye tracking.
		
		camnr			-	Integer that indicates what webcam should be
						used. Default = 0.
		
		mode			-	String that indicates how the captured frame
						should be processed before it's returned.
						'R' returns the red component of the frame,
						'G' returns the green component of the frame,
						'B' returns the blue component of the frame,
						'RGB' returns the greyscale version of the
						frame (converted by OpenCV). Default = 'RGB'.
		"""
		
		# Only initialise if it hasn't been done yet.
		if not self._connected:
			
			# Set mode and camera number
			self._camnr = camnr
			self._mode = mode

			# DEBUG message.
			_message(u'debug', u'webcam.WebCamTracker.connect', \
				u"Connecting to webcam %d." % (self._camnr))
		
			# Initialise the webcam.
			self._vidcap = cv2.VideoCapture(self._camnr)
			self._connected = True

			# DEBUG message.
			_message(u'debug', u'webcam.WebCamTracker.connect', \
				u"Successfully connected to webcam %d!" % (self._camnr))
Ejemplo n.º 5
0
    def _close(self):
        """Doesn't really do anything, but is implemented for consistency's
		sake.
		"""

        # DEBUG message.
        _message(u'debug', u'images.ImageTracker.close', \
         u"Closed connection.")
Ejemplo n.º 6
0
	def _close(self):
		
		"""Doesn't really do anything, but is implemented for consistency's
		sake.
		"""

		# DEBUG message.
		_message(u'debug', u'images.ImageTracker.close', \
			u"Closed connection.")
Ejemplo n.º 7
0
    def _close(self, **kwargs):
        """Use this function to implement the specifics of closing a
		connection in your eye-tracking implementation. You could, for
		example, use it to close the connection to a webcam. This function
		is automatically called when the close() method is passed, and this
		setup allows you to pass your own keyword arguments to close (which
		will then be passed on to _close).
		"""

        # CUSTOM IMPLEMENTATION HERE
        _message(u'message', u'generic.EyeTracker._close', \
         u"Implement your own _close functionality")
Ejemplo n.º 8
0
	def _close(self, **kwargs):
		
		"""Use this function to implement the specifics of closing a
		connection in your eye-tracking implementation. You could, for
		example, use it to close the connection to a webcam. This function
		is automatically called when the close() method is passed, and this
		setup allows you to pass your own keyword arguments to close (which
		will then be passed on to _close).
		"""
		
		# CUSTOM IMPLEMENTATION HERE
		_message(u'message', u'generic.EyeTracker._close', \
			u"Implement your own _close functionality")
Ejemplo n.º 9
0
    def _close(self):
        """Closes the connection to the OpenCV VideoCapture.
		"""

        # DEBUG message.
        _message(u'debug', u'webcam.WebCamTracker.close', \
         u"Disconnecting from webcam.")

        # Release the video capture from the current webcam.
        self._vidcap.release()

        # DEBUG message.
        _message(u'debug', u'webcam.WebCamTracker.close', \
         u"Successfully disconnected from webcam.")
Ejemplo n.º 10
0
	def _close(self):
		
		"""Closes the connection to the OpenCV VideoCapture.
		"""

		# DEBUG message.
		_message(u'debug', u'webcam.WebCamTracker.close', \
			u"Disconnecting from webcam.")

		# Release the video capture from the current webcam.
		self._vidcap.release()

		# DEBUG message.
		_message(u'debug', u'webcam.WebCamTracker.close', \
			u"Successfully disconnected from webcam.")
Ejemplo n.º 11
0
    def start_recording(self):
        """Starts the writing of samples to the log file.
		"""

        # Only start recording if it isn't currently active.
        if not self._recording:
            _message(u'debug', u'generic.EyeTracker.__init__', \
             u"Starting recording, and re-opening logfile '%s'." \
             % (self._logfilename))
            # Signal the recording thread to start.
            self._recording = True
            # Re-open the logfile.
            self._loglock.acquire(True)
            self._logfile = open(self._logfilename, u'a')
            self._loglock.release()
Ejemplo n.º 12
0
	def start_recording(self):
		
		"""Starts the writing of samples to the log file.
		"""

		# Only start recording if it isn't currently active.
		if not self._recording:
			_message(u'debug', u'generic.EyeTracker.__init__', \
				u"Starting recording, and re-opening logfile '%s'." \
				% (self._logfilename))
			# Signal the recording thread to start.
			self._recording = True
			# Re-open the logfile.
			self._loglock.acquire(True)
			self._logfile = open(self._logfilename, u'a')
			self._loglock.release()
Ejemplo n.º 13
0
    def stop_recording(self):
        """Pauses the writing of samples to the log file.
		"""

        # Only pause recording if recording is currently active.
        if self._recording:
            # Signal the recording Thread to stop what it's doing.
            self._recording = False
            # Wait for a bit, to allow the emptying of the local queue.
            time.sleep(0.2)
            # Close the logfile.
            self._loglock.acquire(True)
            self._logfile.close()
            self._loglock.release()
            _message(u'debug', u'generic.EyeTracker.__init__', \
             u"Stopped recording, and closed logfile '%s'" \
             % (self._logfilename))
Ejemplo n.º 14
0
	def _get_frame(self):
		
		"""Reads the next frame from the active OpenCV VideoCapture.
		
		Keyword Arguments
		
		Returns
		
		success, frame	-	success is a Boolean that indicates whether
						a frame could be obtained.
						frame is a numpy.ndarray with unsigned,
						8-bit integers that reflect the greyscale
						values of the image. If no frame could be
						obtained, None will be returned.
		"""
		
		# Take a photo with the webcam.
		# (ret is the return value: True if everything went ok, False if
		# there was a problem. frame is the image taken from the webcam as
		# a NumPy ndarray, where the image is coded as BGR
		ret, frame = self._vidcap.read()
		
		# If a new frame was available, proceed to process and return it.		
		if ret:
			# Return the red component of the obtained frame.
			if self._mode == 'R':
				return ret, frame[:,:,2]
			# Return the green component of the obtained frame.
			elif self._mode == 'G':
				return ret, frame[:,:,1]
			# Return the blue component of the obtained frame.
			elif self._mode == 'B':
				return ret, frame[:,:,0]
			# Convert to grey.
			elif self._mode == 'RGB':
				return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			# Throw an exception if the mode can't be recognised.
			else:
				_message(u'error', u'webcam.WebCamTracker._get_frame', \
					u"Mode '%s' not recognised. Supported modes: 'R', 'G', 'B', or 'RGB'." \
					% (self._mode))
		
		# If a new frame wasn't available, return None.
		else:
			return ret, None
Ejemplo n.º 15
0
	def _get_frame(self):
		
		"""Reads the next frame from the active OpenCV VideoCapture.
		
		Keyword Arguments
		
		Returns
		
		success, frame	-	success is a Boolean that indicates whether
						a frame could be obtained.
						frame is a numpy.ndarray with unsigned,
						8-bit integers that reflect the greyscale
						values of the image. If no frame could be
						obtained, None will be returned.
		"""
		
		# Take a photo with the webcam.
		# (ret is the return value: True if everything went ok, False if
		# there was a problem. frame is the image taken from the webcam as
		# a NumPy ndarray, where the image is coded as BGR
		ret, frame = self._vidcap.read()
		
		# If a new frame was available, proceed to process and return it.		
		if ret:
			# Return the red component of the obtained frame.
			if self._mode == 'R':
				return ret, frame[:,:,2]
			# Return the green component of the obtained frame.
			elif self._mode == 'G':
				return ret, frame[:,:,1]
			# Return the blue component of the obtained frame.
			elif self._mode == 'B':
				return ret, frame[:,:,0]
			# Convert to grey.
			elif self._mode == 'RGB':
				return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			# Throw an exception if the mode can't be recognised.
			else:
				_message(u'error', u'webcam.WebCamTracker._get_frame', \
					u"Mode '%s' not recognised. Supported modes: 'R', 'G', 'B', or 'RGB'." \
					% (self._mode))
		
		# If a new frame wasn't available, return None.
		else:
			return ret, None
Ejemplo n.º 16
0
	def stop_recording(self):
		
		"""Pauses the writing of samples to the log file.
		"""

		# Only pause recording if recording is currently active.
		if self._recording:
			# Signal the recording Thread to stop what it's doing.
			self._recording = False
			# Wait for a bit, to allow the emptying of the local queue.
			time.sleep(0.2)
			# Close the logfile.
			self._loglock.acquire(True)
			self._logfile.close()
			self._loglock.release()
			_message(u'debug', u'generic.EyeTracker.__init__', \
				u"Stopped recording, and closed logfile '%s'" \
				% (self._logfilename))
Ejemplo n.º 17
0
	def connect(self, imgdir=None, mode=u'RGB', **kwargs):
		
		"""Use this function to implement the initialisation of a specific
		type of eye tracking.
		
		imgdir		-	String that is a path to the directory that
						contains the images of eyes. The names in
						the frame directory will be sorted, and are
						thus expected to be in a sortable format;
						for example '00000001.png', '00000002.png',
						etc. If None is passed, an Exception will be
						raised. Default = None.
		
		mode			-	String that indicates how the captured frame
						should be processed before it's returned.
						'R' returns the red component of the frame,
						'G' returns the green component of the frame,
						'B' returns the blue component of the frame,
						'RGB' returns the greyscale version of the
						frame (converted by OpenCV). Default = 'RGB'.
		"""
		
		# Only initialise if it hasn't been done yet.
		if not self._connected:
			
			# Set mode and camera number
			self._imgdir = imgdir
			self._mode = mode

			# DEBUG message.
			_message(u'debug', u'images.ImageTracker.connect', \
				u"Checking directory %s." % (self._imgdir))
		
			# Check whether the directory exists.
			if self._imgdir == None:
				_message(u'error', u'images.ImageTracker.connect', \
					u"No directory specified; use the imgdir keyword to pass the path to a directory with eye images.")
			if not os.path.isdir(self._imgdir):
				_message(u'error', u'images.ImageTracker.connect', \
					u"Image directory does not exist ('%s')" \
					% (self._imgdir))
			self._framenames = os.listdir(self._imgdir)
			self._framenames.sort()
			self._framenr = 0
			self._nframes = len(self._framenames)
			self._connected = True

			# DEBUG message.
			_message(u'debug', u'images.ImageTracker.connect', \
				u"Successfully connected to directory %s!" % (self._imgdir))
Ejemplo n.º 18
0
    def connect(self, imgdir=None, mode=u'RGB', **kwargs):
        """Use this function to implement the initialisation of a specific
		type of eye tracking.
		
		imgdir		-	String that is a path to the directory that
						contains the images of eyes. The names in
						the frame directory will be sorted, and are
						thus expected to be in a sortable format;
						for example '00000001.png', '00000002.png',
						etc. If None is passed, an Exception will be
						raised. Default = None.
		
		mode			-	String that indicates how the captured frame
						should be processed before it's returned.
						'R' returns the red component of the frame,
						'G' returns the green component of the frame,
						'B' returns the blue component of the frame,
						'RGB' returns the greyscale version of the
						frame (converted by OpenCV). Default = 'RGB'.
		"""

        # Only initialise if it hasn't been done yet.
        if not self._connected:

            # Set mode and camera number
            self._imgdir = imgdir
            self._mode = mode

            # DEBUG message.
            _message(u'debug', u'images.ImageTracker.connect', \
             u"Checking directory %s." % (self._imgdir))

            # Check whether the directory exists.
            if self._imgdir == None:
                _message(u'error', u'images.ImageTracker.connect', \
                 u"No directory specified; use the imgdir keyword to pass the path to a directory with eye images.")
            if not os.path.isdir(self._imgdir):
                _message(u'error', u'images.ImageTracker.connect', \
                 u"Image directory does not exist ('%s')" \
                 % (self._imgdir))
            self._framenames = os.listdir(self._imgdir)
            self._framenames.sort()
            self._framenr = 0
            self._nframes = len(self._framenames)
            self._connected = True

            # DEBUG message.
            _message(u'debug', u'images.ImageTracker.connect', \
             u"Successfully connected to directory %s!" % (self._imgdir))
Ejemplo n.º 19
0
	def __init__(self, logfile=u'default', facedetect=True, eyedetect=True, \
		pupthreshold=50, glintthreshold=200, glintdetect=True, \
		pupsizemode=u'diameter', minfacesize=(30,30), Lexpect=(0.7,0.4), \
		Rexpect=(0.3,0.4), maxpupdist=0.2, maxpupsize=0.3, maxcpu=6, \
		**kwargs):
		
		"""Initialises an EyeTracker class.
		
		Keyword Arguments
		
		logfile		-	A string that indicates the path to the log
						file. An extension will be added
						automatically. Default = 'default'.
		
		facedetect		-	A Boolean that indicates whether face
						detection should be attempted before further
						processing (eye detection, and pupil/glint
						detection). Set this to False if you will
						be using the EyeTracker from close to an
						eye, in which cases faces need and could not
						be detected. Default = True.
		
		pupthreshold	-	An integer that indicates what the highest
						luminance value is that is still considered
						to be part of the pupil. This value needs to
						be between 0 and 255. Default = 50.
		
		glintthreshold	-	An integer that indicates what the lowest
						luminance value is that is still considered
						to be part of the glint. This value needs to
						be between 0 and 255. Default = 200.
		
		glintdetect		-	A Boolean that indicates whether the glint
						(the corneal reflection) should also be
						detected. Default = True.
		
		pupsizemode		-	A string that indicates how the pupil size
						should be reported.
						'diameter' reports the width of the rect in
						which the thresholded pupil fits.
						'surface' reports the number of thresholded
						pixels that are assumed to be the pupil.
		
		minfacesize		-	A (w,h) tuple that indicates what size a
						face should minimally be. Default = (30,30)
		
		Lexpect		-	A (x,y) tuple that indicates where the left
						eye is expected to be. Note that the
						coordinates are in relative space, where
						(0,0) is the top-left of the image, (0,1)
						is the bottom-left, and (1,1) is the
						bottom-right. Also note that the left eye is
						likely to be on the right side of the image,
						and the right eye is likely to be in the
						left part of the image. Default = (0.7,0.4)
		
		Rexpect		-	A (x,y) tuple that indicates where the right
						eye is expected to be. Note that the
						coordinates are in relative space, where
						(0,0) is the top-left of the image, (0,1)
						is the bottom-left, and (1,1) is the
						bottom-right. Also note that the left eye is
						likely to be on the right side of the image,
						and the right eye is likely to be in the
						left part of the image. Default = (0.3,0.4)
		
		maxpupdist		-	A float that indicates what the maximal
						allowable distance is between the expected
						eye position, and the position of detected
						potential eye. The maximal distance is
						defined as a proportion of the image height.
						It can also be set to None. Default = (0.2)
		
		maxpupsize		-	A float that indicates what the maximal
						allowable width is of the detected eyes. The
						maximal size is defined as a proportion of
						the image width. It can also be set to None.
						Default = (0.3)

		maxcpu		-	Integer indicating the maximum amount of
						parallel processes that will be doing all
						of the image processing. This happens in
						parallel to speed things up; the processing
						time on one CPU can't keep up with the
						camera's sampling rate. Default = 6.
		"""

		# DEBUG message.
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Initialising a new EyeTracker.")
		
		# GENERAL SETTINGS
		# Face detection yes/no, and from what size.
		self._facedetect = facedetect
		self._minfacesize = minfacesize
		# Face eye yes/no.
		self._eyedetect = eyedetect
		# Eye detection settings. These are relative positions of where
		# each eye is expected to be in a frame, how far away detected eyes
		# are allowed to be from the expected locations, and how large the
		# detected eyes are allowed to be. (All defined as proportions of
		# the frame's width and height.)
		self._Lexpect = Lexpect
		self._Rexpect = Rexpect
		self._maxpupdist = maxpupdist
		self._maxpupsize = maxpupsize
		# Pupil detection thresholds (dark for pupil, bright for glint),
		# and additional options that determine whether glints should be
		# detected, and how the pupil size should be reported.
		self._pupt = pupthreshold
		self._glit = glintthreshold
		self._glintdetect = glintdetect
		self._pupsizemode = pupsizemode
		
		# ALIVE EVENT
		# This event signals whether the tracker is still alive. It should
		# only be cleared when closing the connection to the tracker!
		self._alive = Event()
		self._alive.set()
		
		# FRAME OBTAINING THREAD
		# Boolean that turns to True when a connection with the source of
		# frames has been established.
		self._connected = False
		# We need a Queue for frames that are generated in the obtainer
		# Thread. The Queue is read out by the parallel processes.
		self._framequeue = Queue()
		# We need a lock to prevent potential simultaneous attempts to
		# access the image source at the same time. This shouldn't actually
		# be possible in the current implementation, but may be added in
		# the future.
		self._sourcelock = Lock()
		# Start the frame obtaining Thread
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting a Thread to obtain frames.")
		self._frame_obtainer_thread = Thread(target=self._frame_obtainer, \
			args=[self._alive, self._framequeue])
		self._frame_obtainer_thread.name = u'frame_obtainer'
		self._frame_obtainer_thread.daemon = True
		self._frame_obtainer_thread.start()

		# PARALLEL PROCESSING
		# We need a Queue for samples that are generated in the parallel
		# processes that are simultaneously processing new frames.
		self._samplequeue = Queue()
		# Check how many CPUs we can use.
		cpus = cpu_count()
		if cpus > maxcpu:
			cpus = maxcpu
		# Start parallel processes to do image processing.
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting %d parallel processes to process frames into samples." \
			% (cpus-1))
		self._frame_processes = []
		for i in range(1, cpus):
			p = Process(target=_frame_processer, \
				args=[self._alive, self._framequeue, self._samplequeue, \
				self._pupt, self._glit, self._facedetect, self._eyedetect, \
				self._minfacesize, self._Lexpect, self._Rexpect, \
				self._maxpupdist, self._maxpupsize, self._glintdetect, \
				self._pupsizemode])
			p.name = u'frame_processor_%d' % (i)
			p.daemon = True
			p.start()
			self._frame_processes.append(p)
		
		# SAMPLE WRITING
		# Variable that keeps track of the latest sample.
		self._latest_sample = [0, numpy.zeros((2,5))*numpy.NaN]
		# Boolean that signals whether the recording Thread should be
		# active or not.
		self._recording = False
		# Lock to prevent simultaneous access to the log file.
		self._loglock = Lock()
		# The log file is an open text file. It will be opened when
		# self._start_recording is called, and it will be closed when
		# self._stop_recording is called. Between calling those two
		# functions, samples will be appended to the log. To prevent
		# samples from being appended to an existing log file, here we
		# open a new logfile with in 'w' mode, thereby erasing any existing
		# content of a previous log file. This means users need to be
		# careful when naming their files, to prevent overwriting.
		self._logfilename = u'%s.tsv' % (logfile)
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Creating new logfile '%s'." \
			% (self._logfilename))
		# Create a header for the log file.
		l = [u'time']
		l.extend([u'Lpx', u'Lpy', u'Lps', u'Lgx', u'Lgy'])
		l.extend([u'Rpx', u'Rpy', u'Rps', u'Rgx', u'Rgy'])
		line = u'\t'.join(map(unicode, l)) + u'\n'
		# Create a new log file.
		self._loglock.acquire(True)
		self._logfile = open(self._logfilename, u'w')
		self._logfile.write(line)
		self._logfile.close()
		self._loglock.release()

		# Start the sample logging Thread
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting a Thread to log samples to file '%s'." \
			% (self._logfilename))
		self._sample_logging_thread = Thread(target=self._sample_logger, \
			args=[self._alive, self._samplequeue])
		self._sample_logging_thread.name = u'sample_logger'
		self._sample_logging_thread.daemon = True
		self._sample_logging_thread.start()
		
		# CUSTOM INITIALISATION
		# Run the custom initialisation procedure.
		self.connect(**kwargs)
Ejemplo n.º 20
0
from __init__ import _message, _DEBUG, _DEBUGDIR, _EYECASCADE, _FACECASCADE

import time
from threading import Thread
from multiprocessing import cpu_count, Event, Lock, Process, Queue

import cv2
import numpy
from scipy import ndimage

# # # # #
# DEBUG MODE
# In DEBUG mode, create a Matplotlib figure.
if _DEBUG:
	_message(u'debug', u'generic', \
		u"DEBUG mode active; creating plots of each frame's processing steps.")
	import os
	from matplotlib import patches, pyplot
	global _FIG, _AX
	_FIG, _AX = pyplot.subplots(nrows=2, ncols=3)


# # # # # # # # # # # # # # #
# GENERIC EYE TRACKER CLASS #
# # # # # # # # # # # # # # #
#
# This class is intended to act as a parent class to specific implementations
# of eye tracking through different image sources. The routines are generic
# image processing routines that take care of the eye-tracking part of things,
# but the input of images needs to be implemented in a sub-class. For an
# example of such a sub-class, see pygazetracker.webcam.WebCamTracker.
Ejemplo n.º 21
0
from __init__ import _message, _DEBUG, _DEBUGDIR, _EYECASCADE, _FACECASCADE

import time
from threading import Thread
from multiprocessing import cpu_count, Event, Lock, Process, Queue

import cv2
import numpy
from scipy import ndimage

# # # # #
# DEBUG MODE
# In DEBUG mode, create a Matplotlib figure.
if _DEBUG:
	_message(u'debug', u'generic', \
		u"DEBUG mode active; creating plots of each frame's processing steps.")
	import os
	from matplotlib import patches, pyplot
	global _FIG, _AX
	_FIG, _AX = pyplot.subplots(nrows=2, ncols=3)


# # # # # # # # # # # # # # #
# GENERIC EYE TRACKER CLASS #
# # # # # # # # # # # # # # #
#
# This class is intended to act as a parent class to specific implementations
# of eye tracking through different image sources. The routines are generic
# image processing routines that take care of the eye-tracking part of things,
# but the input of images needs to be implemented in a sub-class. For an
# example of such a sub-class, see pygazetracker.webcam.WebCamTracker.
Ejemplo n.º 22
0
    def _get_frame(self):
        """Reads the next frame from the image directory.
		
		Keyword Arguments
		
		Returns
		
		success, frame	-	success is a Boolean that indicates whether
						a frame could be obtained.
						frame is a numpy.ndarray with unsigned,
						8-bit integers that reflect the greyscale
						values of the image. If no frame could be
						obtained, None will be returned.
		"""

        # Check if there is a next image. If there isn't, disconnect.
        if self._framenr >= self._nframes:
            ret = False
            self._connected = False
        # Load the next image.
        else:
            # Construct the path to the current image.
            framepath = os.path.join(self._imgdir, \
             self._framenames[self._framenr])
            # Use OpenCV to load the image. This will return a numerical
            # representation of the image in BGR format. It can also
            # return None if imread fails.
            frame = cv2.imread(framepath)
            # If no image was read, set the return value to False. If
            # an image was loaded, the return value should be True, which
            # will allow further processing.
            if frame is None:
                ret = False
            else:
                ret = True
            # Increase the frame counter by one, so that next time this
            # function is called, the next image will be loaded.
            self._framenr += 1

        # If a new frame was available, proceed to process and return it.
        if ret:
            # Return the red component of the obtained frame.
            if self._mode == 'R':
                return ret, frame[:, :, 2]
            # Return the green component of the obtained frame.
            elif self._mode == 'G':
                return ret, frame[:, :, 1]
            # Return the blue component of the obtained frame.
            elif self._mode == 'B':
                return ret, frame[:, :, 0]
            # Convert to grey.
            elif self._mode == 'RGB':
                return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Throw an exception if the mode can't be recognised.
            else:
                _message(u'error', u'webcam.WebCamTracker._get_frame', \
                 u"Mode '%s' not recognised. Supported modes: 'R', 'G', 'B', or 'RGB'." \
                 % (self._mode))

        # If a new frame wasn't available, return None.
        else:
            return ret, None
Ejemplo n.º 23
0
	def __init__(self, logfile=u'default', facedetect=True, eyedetect=True, \
		pupthreshold=50, glintthreshold=200, glintdetect=True, \
		pupsizemode=u'diameter', minfacesize=(30,30), Lexpect=(0.7,0.4), \
		Rexpect=(0.3,0.4), maxpupdist=0.2, maxpupsize=0.3, maxcpu=6, \
		**kwargs):
		
		"""Initialises an EyeTracker class.
		
		Keyword Arguments
		
		logfile		-	A string that indicates the path to the log
						file. An extension will be added
						automatically. Default = 'default'.
		
		facedetect		-	A Boolean that indicates whether face
						detection should be attempted before further
						processing (eye detection, and pupil/glint
						detection). Set this to False if you will
						be using the EyeTracker from close to an
						eye, in which cases faces need and could not
						be detected. Default = True.
		
		pupthreshold	-	An integer that indicates what the highest
						luminance value is that is still considered
						to be part of the pupil. This value needs to
						be between 0 and 255. Default = 50.
		
		glintthreshold	-	An integer that indicates what the lowest
						luminance value is that is still considered
						to be part of the glint. This value needs to
						be between 0 and 255. Default = 200.
		
		glintdetect		-	A Boolean that indicates whether the glint
						(the corneal reflection) should also be
						detected. Default = True.
		
		pupsizemode		-	A string that indicates how the pupil size
						should be reported.
						'diameter' reports the width of the rect in
						which the thresholded pupil fits.
						'surface' reports the number of thresholded
						pixels that are assumed to be the pupil.
		
		minfacesize		-	A (w,h) tuple that indicates what size a
						face should minimally be. Default = (30,30)
		
		Lexpect		-	A (x,y) tuple that indicates where the left
						eye is expected to be. Note that the
						coordinates are in relative space, where
						(0,0) is the top-left of the image, (0,1)
						is the bottom-left, and (1,1) is the
						bottom-right. Also note that the left eye is
						likely to be on the right side of the image,
						and the right eye is likely to be in the
						left part of the image. Default = (0.7,0.4)
		
		Rexpect		-	A (x,y) tuple that indicates where the right
						eye is expected to be. Note that the
						coordinates are in relative space, where
						(0,0) is the top-left of the image, (0,1)
						is the bottom-left, and (1,1) is the
						bottom-right. Also note that the left eye is
						likely to be on the right side of the image,
						and the right eye is likely to be in the
						left part of the image. Default = (0.3,0.4)
		
		maxpupdist		-	A float that indicates what the maximal
						allowable distance is between the expected
						eye position, and the position of detected
						potential eye. The maximal distance is
						defined as a proportion of the image height.
						It can also be set to None. Default = (0.2)
		
		maxpupsize		-	A float that indicates what the maximal
						allowable width is of the detected eyes. The
						maximal size is defined as a proportion of
						the image width. It can also be set to None.
						Default = (0.3)

		maxcpu		-	Integer indicating the maximum amount of
						parallel processes that will be doing all
						of the image processing. This happens in
						parallel to speed things up; the processing
						time on one CPU can't keep up with the
						camera's sampling rate. Default = 6.
		"""

		# DEBUG message.
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Initialising a new EyeTracker.")
		
		# GENERAL SETTINGS
		# Face detection yes/no, and from what size.
		self._facedetect = facedetect
		self._minfacesize = minfacesize
		# Face eye yes/no.
		self._eyedetect = eyedetect
		# Eye detection settings. These are relative positions of where
		# each eye is expected to be in a frame, how far away detected eyes
		# are allowed to be from the expected locations, and how large the
		# detected eyes are allowed to be. (All defined as proportions of
		# the frame's width and height.)
		self._Lexpect = Lexpect
		self._Rexpect = Rexpect
		self._maxpupdist = maxpupdist
		self._maxpupsize = maxpupsize
		# Pupil detection thresholds (dark for pupil, bright for glint),
		# and additional options that determine whether glints should be
		# detected, and how the pupil size should be reported.
		self._pupt = pupthreshold
		self._glit = glintthreshold
		self._glintdetect = glintdetect
		self._pupsizemode = pupsizemode
		
		# ALIVE EVENT
		# This event signals whether the tracker is still alive. It should
		# only be cleared when closing the connection to the tracker!
		self._alive = Event()
		self._alive.set()
		
		# FRAME OBTAINING THREAD
		# Boolean that turns to True when a connection with the source of
		# frames has been established.
		self._connected = False
		# We need a Queue for frames that are generated in the obtainer
		# Thread. The Queue is read out by the parallel processes.
		self._framequeue = Queue()
		# We need a lock to prevent potential simultaneous attempts to
		# access the image source at the same time. This shouldn't actually
		# be possible in the current implementation, but may be added in
		# the future.
		self._sourcelock = Lock()
		# Start the frame obtaining Thread
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting a Thread to obtain frames.")
		self._frame_obtainer_thread = Thread(target=self._frame_obtainer, \
			args=[self._alive, self._framequeue])
		self._frame_obtainer_thread.name = u'frame_obtainer'
		self._frame_obtainer_thread.daemon = True
		self._frame_obtainer_thread.start()

		# PARALLEL PROCESSING
		# We need a Queue for samples that are generated in the parallel
		# processes that are simultaneously processing new frames.
		self._samplequeue = Queue()
		# Check how many CPUs we can use.
		cpus = cpu_count()
		if cpus > maxcpu:
			cpus = maxcpu
		# Start parallel processes to do image processing.
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting %d parallel processes to process frames into samples." \
			% (cpus-1))
		self._frame_processes = []
		for i in range(1, cpus):
			p = Process(target=_frame_processer, \
				args=[self._alive, self._framequeue, self._samplequeue, \
				self._pupt, self._glit, self._facedetect, self._eyedetect, \
				self._minfacesize, self._Lexpect, self._Rexpect, \
				self._maxpupdist, self._maxpupsize, self._glintdetect, \
				self._pupsizemode])
			p.name = u'frame_processor_%d' % (i)
			p.daemon = True
			p.start()
			self._frame_processes.append(p)
		
		# SAMPLE WRITING
		# Variable that keeps track of the latest sample.
		self._latest_sample = [0, numpy.zeros((2,5))*numpy.NaN]
		# Boolean that signals whether the recording Thread should be
		# active or not.
		self._recording = False
		# Lock to prevent simultaneous access to the log file.
		self._loglock = Lock()
		# The log file is an open text file. It will be opened when
		# self._start_recording is called, and it will be closed when
		# self._stop_recording is called. Between calling those two
		# functions, samples will be appended to the log. To prevent
		# samples from being appended to an existing log file, here we
		# open a new logfile with in 'w' mode, thereby erasing any existing
		# content of a previous log file. This means users need to be
		# careful when naming their files, to prevent overwriting.
		self._logfilename = u'%s.tsv' % (logfile)
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Creating new logfile '%s'." \
			% (self._logfilename))
		# Create a header for the log file.
		l = [u'time']
		l.extend([u'Lpx', u'Lpy', u'Lps', u'Lgx', u'Lgy'])
		l.extend([u'Rpx', u'Rpy', u'Rps', u'Rgx', u'Rgy'])
		line = u'\t'.join(map(unicode, l)) + u'\n'
		# Create a new log file.
		self._loglock.acquire(True)
		self._logfile = open(self._logfilename, u'w')
		self._logfile.write(line)
		self._logfile.close()
		self._loglock.release()

		# Start the sample logging Thread
		_message(u'debug', u'generic.EyeTracker.__init__', \
			u"Starting a Thread to log samples to file '%s'." \
			% (self._logfilename))
		self._sample_logging_thread = Thread(target=self._sample_logger, \
			args=[self._alive, self._samplequeue])
		self._sample_logging_thread.name = u'sample_logger'
		self._sample_logging_thread.daemon = True
		self._sample_logging_thread.start()
		
		# CUSTOM INITIALISATION
		# Run the custom initialisation procedure.
		self.connect(**kwargs)
Ejemplo n.º 24
0
	def _get_frame(self):
		
		"""Reads the next frame from the image directory.
		
		Keyword Arguments
		
		Returns
		
		success, frame	-	success is a Boolean that indicates whether
						a frame could be obtained.
						frame is a numpy.ndarray with unsigned,
						8-bit integers that reflect the greyscale
						values of the image. If no frame could be
						obtained, None will be returned.
		"""
		
		# Check if there is a next image. If there isn't, disconnect.
		if self._framenr >= self._nframes:
			ret = False
			self._connected = False
		# Load the next image.
		else:
			# Construct the path to the current image.
			framepath = os.path.join(self._imgdir, \
				self._framenames[self._framenr])
			# Use OpenCV to load the image. This will return a numerical
			# representation of the image in BGR format. It can also
			# return None if imread fails.
			frame = cv2.imread(framepath)
			# If no image was read, set the return value to False. If
			# an image was loaded, the return value should be True, which
			# will allow further processing.
			if frame is None:
				ret = False
			else:
				ret = True
			# Increase the frame counter by one, so that next time this
			# function is called, the next image will be loaded.
			self._framenr += 1
		
		# If a new frame was available, proceed to process and return it.		
		if ret:
			# Return the red component of the obtained frame.
			if self._mode == 'R':
				return ret, frame[:,:,2]
			# Return the green component of the obtained frame.
			elif self._mode == 'G':
				return ret, frame[:,:,1]
			# Return the blue component of the obtained frame.
			elif self._mode == 'B':
				return ret, frame[:,:,0]
			# Convert to grey.
			elif self._mode == 'RGB':
				return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			# Throw an exception if the mode can't be recognised.
			else:
				_message(u'error', u'webcam.WebCamTracker._get_frame', \
					u"Mode '%s' not recognised. Supported modes: 'R', 'G', 'B', or 'RGB'." \
					% (self._mode))
		
		# If a new frame wasn't available, return None.
		else:
			return ret, None