def __init__(self, path='data', ext='.asc', startTrialKey='start_trial',
		endTrialKey='stop_trial', variableKey='var', dtype='|S128', maxN=None,
		maxTrialId=None, requireEndTrial=True, traceFolder='traces',
		offlineDriftCorr=False, skipList=[], blinkReconstruct=False, only=None,
		acceptNonMatchingColumns=True):

		"""
		Constructor. Reads all Eyelink ASCII files from a specific folder.

		Keyword arguments:
		path			--	the folder containing the csv files (default='data')
		ext				--	the extension of the csv files (default='.csv')
		startTrialKey	-- 	the start trial keyword (default='start_trial')
		endTrialKey		--	the stop trial keyword (default='stop_trial')
		variableKey		--	the variable keyword (default='var')
		dtype			--	the numpy dtype to be used (default='|S128')
		maxN			--	the maximum number of subjects to process
							(default=None)
		maxTrialId		--	the maximum number of trials to process
							(default=None)
		requireEndTrial	--	indicates whether an exception should be raised if a
							trial hasn't been neatly closed. Otherwise the trial
							is simply disregarded. (default=True)
		traceFolder		--	the folder to save the gaze traces. Traces are saved
							as 3d numpy arrays (x, y, pupil size) in .npy
							format. To start collecting traces, set
							`self.tracePhase` to a value. Use the value
							'__baseline__' to use an automatic baseline.
							(default='traces')
		offlineDriftCorr	--	Indicates whether coordinates should be
								corrected based on the drift-correction check,
								in case the 'active' drift correctrion is
								disabled (as on the Eyelink 1000).
								(default=False)
		skipList		--	A list of trialIDs that should not be processed.
							(default=[])
		blinkReconstruct	--	Indicates whether pupil size should be
								interpolated during blinks. (default=False)
		only			--	A list of files that should be analyzed, or None
							to analyze all files. Mostly useful for debugging
							purposes. (default=None)
		acceptNonMatchingColumns	--- Boolean indicating whether or not to
										raise an exception if current dm and
										to-be-added dm
										do not have identical column headers.
										If set to True, the intersection of
										column headers is used and the check
										is not carried out. If set to False,
										the the check is carried out.
										(default=True)
		"""

		self.startTrialKey = startTrialKey
		self.endTrialKey = endTrialKey
		self.variableKey = variableKey
		self.dtype = dtype
		self.requireEndTrial = requireEndTrial
		self.maxTrialId = maxTrialId
		self.tracePhase = None
		self.traceFolder = traceFolder
		self.traceSmoothParams = None
		self.offlineDriftCorr = offlineDriftCorr
		self.driftAdjust = 0,0
		self.skipList = skipList
		self.blinkReconstruct = blinkReconstruct
		self.acceptNonMatchingColumns = acceptNonMatchingColumns
		self.traceImg = '--traceimg' in sys.argv
		self.tracePlot = '--traceplot' in sys.argv

		print '\nScanning \'%s\'' % path
		self.dm = None
		nFile = 0
		for fname in os.listdir(path):
			if only != None and fname not in only:
				print 'Skipping %s ...' % fname
				continue
			if os.path.splitext(fname)[1] == ext:
				sys.stdout.write('Reading %s ...' % fname)
				sys.stdout.flush()
				a = self.parseFile(os.path.join(path, fname))
				dm = DataMatrix(a)

				if self.dm == None:
					self.dm = dm
				else:

					# If column headers are not identical:
					if self.dm.columns() != dm.columns():

						# Determine warning message:
						warningMsg = "The column headers are not identical. Difference:\n%s"\
						% "\n".join(list(set(self.dm.columns()).\
							symmetric_difference(set(dm.columns()))))

						# Determine whether to only print the warning,
						# or to raise an exception:
						if not acceptNonMatchingColumns:
							raise Exception(warningMsg)
						if acceptNonMatchingColumns:
							print warningMsg

					self.dm += dm

				print '(%d rows)' % len(dm)
				nFile += 1
			if maxN != None and nFile >= maxN:
				break
		print '%d files\n' % nFile