def start(self): """Start accepting responses, and record the onset time of this ResponseCollector """ self['onset_time'] = pylink.currentTime() self['resp'] = None getExperiment().response_collectors.append(self) self.running = True
def setDataViewerBG(self,display,screen_image_file = None,interest_area_file = None): self.dataViewerBG = display self.bgImageFile = screen_image_file or os.path.join(getExperiment()['data_file_root_name']+"_screenimages","image_%s%sjpg"%(Trial.bgNumber,os.extsep)) self.bgIAfile = interest_area_file or os.path.join(getExperiment()['data_file_root_name']+"_interest_areas","image_%s%sias"%(Trial.bgNumber,os.extsep)) display.write_screen_image_file(os.path.join(getExperiment()['data_directory'],self.bgImageFile)) display.write_interest_area_file(os.path.join(getExperiment()['data_directory'],self.bgIAfile)) Trial.bgNumber += 1
def stopRecording(): """Stop eyetracker recording """ if getExperiment().recording: pylink.endRealTimeMode() pylink.msecDelay(100) getTracker().stopRecording() getExperiment().recording = False
def __init__(self,pattern="continuous",color=None,bgcolor=None): self.metadata = {"experiment":"pupil_calibration","pattern":pattern} self.pattern = pattern self.color = color or getExperiment()['color'] self.bgcolor = bgcolor or getExperiment()['bgcolor'] self.target = VisionEgg.MoreStimuli.Target2D(size=(6.0,6.0),color = self.color) self.targetVP = VisionEgg.Core.Viewport(screen=getExperiment().screen,stimuli=[self.target]) self.rtPeriod = ("END_FILLER","END_RT")
def stop(self): """Log the subject's response, and stop monitoring for responses """ if self.running: getExperiment().response_collectors.remove(self) if self.params.get('cresp',False) != False: #There is a correct response, so log accuracy self.params['acc'] = int(self.params['resp']==self.params['cresp']) print " Response: %(resp)s (%(cresp)s)" % self.params self.log() self.running = False
def __init__(self, pattern="continuous", color=None, bgcolor=None): self.metadata = {"experiment": "pupil_calibration", "pattern": pattern} self.pattern = pattern self.color = color or getExperiment()['color'] self.bgcolor = bgcolor or getExperiment()['bgcolor'] self.target = VisionEgg.MoreStimuli.Target2D(size=(6.0, 6.0), color=self.color) self.targetVP = VisionEgg.Core.Viewport(screen=getExperiment().screen, stimuli=[self.target]) self.rtPeriod = ("END_FILLER", "END_RT")
def __init__(self): if not serial: raise "serial module must be installed to use button box." try: self.port = serial.Serial(getExperiment()['buttonbox_com']-1,getExperiment()['buttonbox_baud'],timeout=0) except serial.SerialException: raise "Error trying to connect to button box at com %s, baud %s"%(getExperiment()['buttonbox_com'],getExperiment()['buttonbox_baud']) self.resetTime = pylink.currentTime() self.port.write("e5") # Reset the button box's rt timer self.buffer = []
def record(self, *args, **keywords): """Runs the trial displays while communicating with the eyetracker. TO-DO: currently contains a hack for stopping audio when trial is aborted. This needs to be done in a more general and implementation-independent way. """ while 1: getTracker().flushKeybuttons(1) Trial.trialNumber += 1 getLog().push() getLog().logAttributes(trialNumber=Trial.trialNumber) getLog().logAttributes(getattr(self, 'metadata', {})) getTracker().sendMessage('TRIALID %s' % (Trial.trialNumber)) getTracker().drawText("Trial_%s\n" % (Trial.trialNumber), pos=(1, 20)) getTracker().sendCommand("record_status_message 'TRIALID %s'" % (Trial.trialNumber)) self.sendDataViewerBG() self.sendRTperiod() try: result = self.run(*args, **keywords) except TrialAbort, abort: for rc in getExperiment().response_collectors: rc.stop() # HACK!!! # Need to find a good implementation-independent way of ensuring that sound streams get stopped. pygame.mixer.stop() getExperiment().recording = False pylink.endRealTimeMode() getLog().logAttributes(trial_abort=abort.abortAction) for key, value in getLog().currentData().iteritems(): setTrialVar(key, value) pygame.time.delay(1) getLog().pop() getTracker().sendMessage('TRIAL_RESULT %d' % (abort.abortAction)) if abort.abortAction == pylink.REPEAT_TRIAL: pass elif abort.abortAction == pylink.TRIAL_ERROR: calibrateTracker() elif abort.abortAction == pylink.SKIP_TRIAL: return None else: raise else: getLog().logAttributes(trial_abort=0) for key, value in getLog().currentData().iteritems(): setTrialVar(key, value) pygame.time.delay(1) getLog().pop() getTracker().sendMessage('TRIAL_RESULT 0') return result
def stop(self): """Log the subject's response, and stop monitoring for responses """ if self.running: getExperiment().response_collectors.remove(self) if self.params.get( 'cresp', False ) != False: #There is a correct response, so log accuracy self.params['acc'] = int( self.params['resp'] == self.params['cresp']) print " Response: %(resp)s (%(cresp)s)" % self.params self.log() self.running = False
def record(self,*args,**keywords): """Runs the trial displays while communicating with the eyetracker. TO-DO: currently contains a hack for stopping audio when trial is aborted. This needs to be done in a more general and implementation-independent way. """ while 1: getTracker().flushKeybuttons(1) Trial.trialNumber += 1 getLog().push() getLog().logAttributes(trialNumber=Trial.trialNumber) getLog().logAttributes(getattr(self,'metadata',{})) getTracker().sendMessage('TRIALID %s'%(Trial.trialNumber)) getTracker().drawText("Trial_%s\n"%(Trial.trialNumber),pos=(1,20)) getTracker().sendCommand("record_status_message 'TRIALID %s'"%(Trial.trialNumber)) self.sendDataViewerBG() self.sendRTperiod() try: result = self.run(*args,**keywords) except TrialAbort,abort: for rc in getExperiment().response_collectors: rc.stop() # HACK!!! # Need to find a good implementation-independent way of ensuring that sound streams get stopped. pygame.mixer.stop() getExperiment().recording = False pylink.endRealTimeMode() getLog().logAttributes(trial_abort=abort.abortAction) for key,value in getLog().currentData().iteritems(): setTrialVar(key,value) pygame.time.delay(1) getLog().pop() getTracker().sendMessage('TRIAL_RESULT %d'%(abort.abortAction)) if abort.abortAction == pylink.REPEAT_TRIAL: pass elif abort.abortAction == pylink.TRIAL_ERROR: calibrateTracker() elif abort.abortAction == pylink.SKIP_TRIAL: return None else: raise else: getLog().logAttributes(trial_abort=0) for key,value in getLog().currentData().iteritems(): setTrialVar(key,value) pygame.time.delay(1) getLog().pop() getTracker().sendMessage('TRIAL_RESULT 0') return result
def __init__(self): if not serial: raise "serial module must be installed to use button box." try: self.port = serial.Serial(getExperiment()['buttonbox_com'] - 1, getExperiment()['buttonbox_baud'], timeout=0) except serial.SerialException: raise "Error trying to connect to button box at com %s, baud %s" % ( getExperiment()['buttonbox_com'], getExperiment()['buttonbox_baud']) self.resetTime = pylink.currentTime() self.port.write("e5") # Reset the button box's rt timer self.buffer = []
def drawToBuffer(self): """helper method, not directly called in EyeScript scripts in general. Helper method that draws the display to a buffer. The buffer may be swapped to the screen to show the display, or saved as a screenshot. """ # set the background color t1 = pygame.time.get_ticks() getExperiment().screen.parameters.bgcolor = self['bgcolor'] # clear the screen (with the background color) getExperiment().screen.clear() t2 = pygame.time.get_ticks() # draw the viewport self.viewport.draw()
def prepareStimulus(self,imagefile): """helper method, not directly called in EyeScript scripts in general. Private helper method which takes a filename or file-like object and returns a TextureStimulus""" tex=Texture(pygame.image.load(imagefile)) aligndictx = {'left':0,'center':.5,'right':1} aligndicty = {'top':0,'center':.5,'bottom':1} align=self['align'] margins=self['margins'] self.viewport = fullViewport([TextureStimulus(texture=tex,mipmaps_enabled = False,position=( aligndictx[align[0]]*(getExperiment()['screen_size'][0]-tex.size[0]-margins[0]-margins[2])+margins[0], aligndicty[align[1]]*(getExperiment()['screen_size'][1]-tex.size[1]-margins[1]-margins[3])+margins[1] ) ) ])
def setDataViewerBG(self, display, screen_image_file=None, interest_area_file=None): self.dataViewerBG = display self.bgImageFile = screen_image_file or os.path.join( getExperiment()['data_file_root_name'] + "_screenimages", "image_%s%sjpg" % (Trial.bgNumber, os.extsep)) self.bgIAfile = interest_area_file or os.path.join( getExperiment()['data_file_root_name'] + "_interest_areas", "image_%s%sias" % (Trial.bgNumber, os.extsep)) display.write_screen_image_file( os.path.join(getExperiment()['data_directory'], self.bgImageFile)) display.write_interest_area_file( os.path.join(getExperiment()['data_directory'], self.bgIAfile)) Trial.bgNumber += 1
def __init__(self, logging=None, **params): """Set parameters for the ResponseCollector, and initialize the input device if it hasn't already been initialized. Possible parameters: cresp: the correct response. Default is False (no correct response). cresp=None means the correct response is to not respond; any response will be considered incorrect possible_resp: List of responses which will be accepted by the ResponseCollector. Any responses not in possible_resp will be ignored. Set possible_resp = ['any'] to accept any response from the ResponseCollector's device logging: list of strings representing parameters and variables to record in the log files may include 'rt','acc','resp','cresp','onset_time', or any parameters of the object. For example, having the argument logging=['rt','resp'] would result in the RT and the response being logged for this ResponseCollector Default: [] if cresp == False (no correct response), or ['rt','acc','resp','cresp'] if there is a correct response. name: the name to identify the ResponseCollector in the log files. For example, having the arguments name="tone_response" and logging=['rt','resp','acc'] would result in RTs, responses, and accuracies for this ResponseCollector being logged as, respectively, tone_response.rt, tone_response.resp, and tone_response.acc duration: time limit in milliseconds for the subject to respond, or 'infinite' for no limit, or 'stimulus' to accept responses only as long as the associated Display object is running. ('stimulus' is equivalent to 'infinite' if the response collector does not belong to any display.) min_rt: Time before a response will be accepted (e.g. to prevent accidental rapid key repeats being accepted) """ self.params = getExperiment().params.copy() self.update(params) self.setdefault('name', self.__class__.__name__) self.setdefault('cresp', False) self.setdefault('duration', 'stimulus') if logging == None: self.logging = self['cresp'] != False and [ 'rt', 'acc', 'resp', 'cresp' ] or [] else: self.logging = logging self['rt'] = self['resp'] = self['acc'] = None defaultDeviceName = self.__class__.__name__ + "Device" if hasattr(devices, defaultDeviceName): setUpDevice(getattr(devices, defaultDeviceName))
def __init__(self,logging = None,**params): """Set parameters for the ResponseCollector, and initialize the input device if it hasn't already been initialized. Possible parameters: cresp: the correct response. Default is False (no correct response). cresp=None means the correct response is to not respond; any response will be considered incorrect possible_resp: List of responses which will be accepted by the ResponseCollector. Any responses not in possible_resp will be ignored. Set possible_resp = ['any'] to accept any response from the ResponseCollector's device logging: list of strings representing parameters and variables to record in the log files may include 'rt','acc','resp','cresp','onset_time', or any parameters of the object. For example, having the argument logging=['rt','resp'] would result in the RT and the response being logged for this ResponseCollector Default: [] if cresp == False (no correct response), or ['rt','acc','resp','cresp'] if there is a correct response. name: the name to identify the ResponseCollector in the log files. For example, having the arguments name="tone_response" and logging=['rt','resp','acc'] would result in RTs, responses, and accuracies for this ResponseCollector being logged as, respectively, tone_response.rt, tone_response.resp, and tone_response.acc duration: time limit in milliseconds for the subject to respond, or 'infinite' for no limit, or 'stimulus' to accept responses only as long as the associated Display object is running. ('stimulus' is equivalent to 'infinite' if the response collector does not belong to any display.) min_rt: Time before a response will be accepted (e.g. to prevent accidental rapid key repeats being accepted) """ self.params = getExperiment().params.copy() self.update(params) self.setdefault('name',self.__class__.__name__) self.setdefault('cresp',False) self.setdefault('duration','stimulus') if logging == None: self.logging = self['cresp'] != False and ['rt','acc','resp','cresp'] or [] else: self.logging = logging self['rt'] = self['resp'] = self['acc'] = None defaultDeviceName = self.__class__.__name__+"Device" if hasattr(devices,defaultDeviceName): setUpDevice(getattr(devices,defaultDeviceName))
def start(self): """Execute operations necessary when response collection is started. Check if the eyetracker is recording Check the eye being recorded from Initialize the fixatedArea attribute to None """ ResponseCollector.start(self) if not getExperiment().recording: raise "Must be recording to monitor gaze position!" self.eyeUsed = getTracker().eyeAvailable() if self.eyeUsed == 2: # binocular recording self.eyeUsed = getExperiment().params.get('eye_used',1) elif self.eyeUsed == -1: # neither eye available! raise TrialAbort(pylink.TRIAL_ERROR) ## getTracker().resetData() self.fixatedArea = None
def driftCorrect(color=None, bgcolor=None, target=None): """Draw a target and perform drift correction Arguments: color: An RGB 3-tuple specifying the foreground color for the drift correction screen. Default is the experiment 'color' parameter E.g. color = (0,0,0) is black, color = (1,1,1) is white. bgcolor: An RGB 3-tuple specifying the background color. Default is the experiment 'bgcolor' parameter target: A 2-tuple (x,y) specifying the coordinates of the fixation target to be displayed. Default is the center of the screen """ if getExperiment().recording: raise EyetrackerError( "Attempt to drift correct while recording in progress") # If no target is specified, the target is the center of the screen target = target or (getExperiment().params['screen_size'][0] / 2, getExperiment().params['screen_size'][1] / 2) color = color or getExperiment().params['color'] bgcolor = bgcolor or getExperiment().params['bgcolor'] getExperiment().eyelinkGraphics.setCalibrationColors(color, bgcolor) mouseVisibility = pygame.mouse.set_visible(False) while 1: try: error = getTracker().doDriftCorrect(target[0], target[1], 1, 1) if error == 27: calibrateTracker((color, bgcolor)) else: print "drift correct error %s" % error break except RuntimeError: pass pygame.mouse.set_visible(mouseVisibility)
def driftCorrect(color=None,bgcolor=None,target=None): """Draw a target and perform drift correction Arguments: color: An RGB 3-tuple specifying the foreground color for the drift correction screen. Default is the experiment 'color' parameter E.g. color = (0,0,0) is black, color = (1,1,1) is white. bgcolor: An RGB 3-tuple specifying the background color. Default is the experiment 'bgcolor' parameter target: A 2-tuple (x,y) specifying the coordinates of the fixation target to be displayed. Default is the center of the screen """ if getExperiment().recording: raise EyetrackerError("Attempt to drift correct while recording in progress") # If no target is specified, the target is the center of the screen target = target or (getExperiment().params['screen_size'][0]/2,getExperiment().params['screen_size'][1]/2) color = color or getExperiment().params['color'] bgcolor = bgcolor or getExperiment().params['bgcolor'] getExperiment().eyelinkGraphics.setCalibrationColors(color,bgcolor) mouseVisibility = pygame.mouse.set_visible(False) while 1: try: error=getTracker().doDriftCorrect(target[0],target[1],1,1) if error == 27: calibrateTracker((color,bgcolor)) else: print "drift correct error %s"%error break except RuntimeError: pass pygame.mouse.set_visible(mouseVisibility)
def start(self): """Execute operations necessary when response collection is started. Check if the eyetracker is recording Check the eye being recorded from Initialize the fixatedArea attribute to None """ ResponseCollector.start(self) if not getExperiment().recording: raise "Must be recording to monitor gaze position!" self.eyeUsed = getTracker().eyeAvailable() if self.eyeUsed == 2: # binocular recording self.eyeUsed = getExperiment().params.get('eye_used', 1) elif self.eyeUsed == -1: # neither eye available! raise TrialAbort(pylink.TRIAL_ERROR) ## getTracker().resetData() self.fixatedArea = None
def write_screen_image_file(self,filename): """helper method, not directly called in EyeScript scripts in general. Save a screenshot of the display Argument: the filename for the screenshot """ self.drawToBuffer() image = getExperiment().screen.get_framebuffer_as_image() screensDirectory = os.path.dirname(filename) if screensDirectory and not os.path.isdir(screensDirectory): os.makedirs(screensDirectory) image.save(filename)
def startRecording(): """Commence eyetracker recording and verify that it's working. """ getTracker().resetData() getTracker().startRecording(1, 1, 1, 1) getExperiment().recording = True pylink.beginRealTimeMode(100) try: if not getTracker().waitForBlockStart(1000, 1, 1): raise Exception("waitForBlockStart failed") except Exception: getTracker().drawText("LINK DATA NOT RECEIVED!", pos=(1, 20)) pylink.endRealTimeMode() pylink.msecDelay(2000) getTracker().stopRecording() print "LINK DATA NOT RECEIVED!" raise TrialAbort(pylink.TRIAL_ERROR)
def startRecording(): """Commence eyetracker recording and verify that it's working. """ getTracker().resetData() getTracker().startRecording(1,1,1,1) getExperiment().recording = True pylink.beginRealTimeMode(100) try: if not getTracker().waitForBlockStart(1000,1,1): raise Exception("waitForBlockStart failed") except Exception: getTracker().drawText("LINK DATA NOT RECEIVED!",pos=(1,20)) pylink.endRealTimeMode() pylink.msecDelay(2000) getTracker().stopRecording() print "LINK DATA NOT RECEIVED!" raise TrialAbort(pylink.TRIAL_ERROR)
def handleEvent(self,event): if not getExperiment().recording: self.stop() return False if getTracker(): action = getTracker().isRecording() if action != pylink.TRIAL_OK: raise TrialAbort(action) return self.checkEyeLink() else: # So that the experiment can be tested without the eyetracker, # just fake a response after 2000 milliseconds. if pylink.currentTime() > self['onset_time'] + 2000: self['resp'] = self['possible_resp'] and self['possible_resp'][0] self['rt'] = 2000 self['rt_time'] = self['onset_time'] + 2000 self.stop() return True
def handleEvent(self, event): if not getExperiment().recording: self.stop() return False if getTracker(): action = getTracker().isRecording() if action != pylink.TRIAL_OK: raise TrialAbort(action) return self.checkEyeLink() else: # So that the experiment can be tested without the eyetracker, # just fake a response after 2000 milliseconds. if pylink.currentTime() > self['onset_time'] + 2000: self['resp'] = self['possible_resp'] and self['possible_resp'][ 0] self['rt'] = 2000 self['rt_time'] = self['onset_time'] + 2000 self.stop() return True
def __init__(self,stimulus=[],logging = None,**params): """Set up the VisionEgg viewport for eventual display, and set this object's parameters. Arguments (all except 'stimulus' are optional keyword arguments): stimulus: a list of VisionEgg stimuli to display. (stimulus may be a different type for subclasses) bgcolor: background color, over which the stimuli will be drawn; an RGB 3-tuple cresp: the correct response. Default is False (no correct response). cresp=None means the correct response is to not respond; any response will be considered incorrect possible_resp: List of responses which will be accepted by the ResponseCollector. Any responses not in possible_resp will be ignored. Set possible_resp = ['any'] to accept any response from the ResponseCollector's device logging: list of strings representing parameters and variables to record in the log files may include 'rt','acc','resp','cresp','onset_time','swap_time', or any parameters of the object. For example, having the argument logging=['rt','resp'] would result in the RT and the response being logged for this display Default: [] if cresp == False (no correct response), or ['rt','acc','resp','cresp'] if there is a correct response. name: the name to identify the display in the log files. For example, having the arguments name="question" and logging=['rt','resp'] would result in RTs and responses for this display being logged as, respectively, question.rt and question.resp duration: maximum time to show the display (in ms), or 'infinite' for no limit. The stimulus will remain on the screen after the display terminates, until the next display is run. min_rt: Time before a response will be accepted (e.g. to prevent accidental rapid key repeats being accepted) interest_areas: list of IA objects, specifying interest areas for Data Viewer analysis and for gaze-contingent experiments Set to 'True' to calculate interest areas automatically (usually for text displays) background_for: Trial object for which this display defines the background image and interest areas. This display's image and interest areas will be loaded for that trial in the EyeLink Data Viewer interest_area_file: filename for saving the interest areas. The file will be in data_directory (experiment parameter). Default: a unique ID number prefixed with 'image_' if background_for is set; otherwise no interest area file will be written screen_image_file: filename for saving the screenshot of the display. The file will be in data_directory (experiment parameter). Default: a unique ID number prefixed with "image_' if background_for is set; otherwise no screen image file will be written response_collectors: List of ResponseCollector objects to handle subjects' responses to the stimulus. These response collectors will be started at the instant this display is shown, and this display will stop running when the first response collector (if any) returns a response. The stimulus will remain on the screen after the display terminates, until the next display is run. Most often, response_device, below, will be used instead of response_collectors. response_device: The name of a class in response_collectors.py that will handle the subject's responses (e.g. Keyboard, ButtonBox, etc.). If the response_collectors parameter is set, then response_device will have no effect. Otherwise, response_collectors will be set to a list containing (only) an object of the class specified by response_device. The parameters of that response collector object (e.g. cresp, possible_resp, duration, name, min_rt) are set by setting those keywords when the Display is created. The response collector object will not log its own parameters (i.e. its logging parameter will be set to []) but the display object will inherit its parameters (like rt, resp, acc, etc.) (except when the display object has a parameter of the same name, e.g. onset_time). Example: stim = TextDisplay(probe,response_device=ButtonBox,possible_resp=[1,2],cresp=1,duration=2000,name="probe",logging=['rt','resp','acc']) stim.run() if stim['acc']: TextDisplay("Correct! Response time: %s"%stim['rt']).run() else: TextDisplay("Incorrect.").run() does the same thing as rc = ButtonBox(possible_resp=[1,2],cresp=1,duration=2000,name="probe",logging=['rt','resp','acc']) TextDisplay(probe,response_collectors=[rc],duration=2000).run() if rc['acc']: TextDisplay("Correct! Response time: %s"%rc['rt']).run() else: TextDisplay("Incorrect.").run() """ # Parameters not set with keywords to __init__ will default to the experiment object's parameters (which default to the values in defaults.py) self.params = getExperiment().params.copy() self.update(params) self.setdefault('name',self.__class__.__name__) # If the response_collectors keyword wasn't set, then create a response collector of the class specified by response_device, with the keywords passed to __init__ if self.get('response_collectors',None) == None: self['response_collectors'] = [self['response_device'](**params.copy())] self.auto_response_collector = True # Remember that we automatically created the response collector # If auto_response_collector == True then the display will inherit the response collector's data (see the __getitem__ method below) self['response_collectors'][0].logging = [] # The display can log the data it inherits from the response collector # so having the response collector do logging would be redundant else: self.auto_response_collector = False self.params.setdefault('duration',"infinite") if logging == None: # Defaults for logging # If cresp was set and if we can log data inherited from an automatically created response collector, then log rt, acc, resp, and cresp; otherwise no logging. self.logging = (self.get('cresp',False) != False and self.auto_response_collector) and ['rt','acc','resp','cresp'] or [] else: self.logging = logging self.prepareStimulus(stimulus) # prepareStimulus is defined differently for each child class if self.get('background_for',None): # If this display is going to be the background image for a trial in the Data Viewer, then create unique names for the screenshot and interest area files # if the names were not already set through keywords Display.bgNumber += 1 self.setdefault('screen_image_file', os.path.join(getExperiment()['data_file_root_name']+"_screenimages","image_%s%sjpg"%(Display.bgNumber,os.extsep))) if self.get('interest_areas',None): self.setdefault('interest_area_file', os.path.join(getExperiment()['data_file_root_name']+"_interest_areas","image_%s%sias"%(Display.bgNumber,os.extsep))) self['background_for'].dataViewerBG = self # Let the trial know that this display will be its background image if self.get('screen_image_file',None): self.write_screen_image_file(os.path.join(getExperiment()['data_directory'],self['screen_image_file'])) if self.get('interest_area_file',None): self.write_interest_area_file(os.path.join(getExperiment()['data_directory'],self['interest_area_file']))
def prepareStimulus(self, text): """helper method, not directly called in EyeScript scripts in general. Takes a string and returns a list of VisionEgg Text objects, wrapped as necessary prepareStimulus also calculates interest areas if the interest_area_file parameter has been set (and if interest areas have not already been set) """ self.text = text margins = self['margins'] align = self['align'] #font_desc_string = (pygame.font.match_font(self['font_name'], self['bold'], self['italic']) or self['font_name']) + " " + str(self['font_size']) font_desc_string = str(self['font_name']) + " " + str(self['font_size']) textparams = {'font_descr_string': font_desc_string, 'color':self['color']} # height will be the total height of all the lines of text height = 0 stimulus = [] lines = [line.rstrip('\n') or " " for line in text.split('\n')] lines.reverse() self.wrap = False screenwidth = getExperiment()['screen_size'][0]-margins[0]-margins[2] wordwidths = [] rightedges = [] calculateIAs = self.get('interest_areas',False) == True or (self.get('interest_area_file',False) and self.get('interest_areas',True) == True ) while lines: line = lines.pop() linelen = len(line.split()) wordwidths.append([]) rightedges.append([]) lineText = None if calculateIAs: for i in range(linelen): # Find the width of the first i+1 words try: firstwords = PangoText(text=line.rsplit(None,linelen-i-1)[0],**textparams) except TextureTooLargeError: if i==0: raise WordTooLargeError(line.split()[0]) break rightedge = firstwords.parameters.size[0] if rightedge > screenwidth: if i==0: raise WordTooLargeError(line.split()[0]) lines.append(line.split(None,i)[-1]) break wordwidths[-1].append(PangoText(text=line.split()[i],**textparams).parameters.size[0]) rightedges[-1].append(rightedge) lineText = firstwords else: # If we're not calculating interest areas # then try to estimate where the line breaks should be from the width of the text # This is generally much faster than the method above. testString = line wordsFitting = "" wordsFittingWidth = 0 wordsFittingCount = 0 wordsNotFittingCount = 1 + len(testString.split()) while True: try: testText = PangoText(text=testString,**textparams) except TextureTooLargeError: wordsNotFitting = testString wordsNotFittingWidth = None wordsNotFittingCount = len(testString.split()) if wordsNotFittingCount == 1 + wordsFittingCount: break runOffPoint = (len(wordsNotFitting) + len(wordsFitting)) / 2 else: testSize = testText.parameters.size[0] if testSize > screenwidth: wordsNotFitting = testString # wordsNotFitting holds the smallest part of the line so far that we've found doesn't fit. wordsNotFittingWidth = testSize wordsNotFittingCount = len(testString.split()) else: wordsFitting = testString # wordsFitting holds the largest part of the line so far that we've found fits on the screen. wordsFittingWidth = testSize wordsFittingCount = len(testString.split()) lineText = testText if wordsNotFittingCount <= 1 + wordsFittingCount: # We've found the largest part of the line that will fit on the screen -- if we add one more word it won't fit. # wordsNotFittingCount could be equal to wordsFittingCount if we stripped whitespace off the end of a line to make it fit. break #Interpolate between wordsNotFitting and wordsFitting to estimate the index of the first character that runs off the screen if wordsNotFittingWidth: # If we know the width of wordsNotFitting # then interpolate based on the widths of wordsFitting and wordsNotFitting runOffPoint = int((len(wordsNotFitting) - len(wordsFitting)) * (screenwidth - wordsFittingWidth) / (wordsNotFittingWidth - wordsFittingWidth) ) + len(wordsFitting) else: # If we don't know the width of wordsNotFitting (because of a TextureTooLargeError) # then extrapolate based on the width of wordsFitting runOffPoint = min(int(len(wordsFitting) * screenwidth / wordsFittingWidth),len(wordsNotFitting)) # Find the word containing our estimate of the first character running off the screen try: runOffWord = line[runOffPoint:].split()[0] except IndexError: # The line ends in whitespace so the split yields the empty string runOffWord = "" testString = line[:runOffPoint] + runOffWord # Have we already tested a string of this length? if len(testString) >= len(wordsNotFitting): # Then test the words up to the first word known not to fit. testString = line.rsplit(None,linelen-(wordsNotFittingCount-1))[0] if wordsFitting == "" and line: raise WordTooLargeError(line.split()[0]) if wordsFitting != line: lines.append(line.split(None,wordsFittingCount)[-1]) # Add the part that didn't fit as a new line stimulus.append(lineText or PangoText(text=" ",**textparams)) height = sum([lineText.parameters.size[1] for lineText in stimulus])+self['vertical_spacing']*(len(stimulus) - 1) self['singleline_height'] = stimulus[0].parameters.size[1] if align[1]=='top': ypos = getExperiment()['screen_size'][1]-margins[1] elif align[1] == 'bottom': ypos = margins[3] + height elif align[1] == 'center': ypos = (getExperiment()['screen_size'][1] + margins[3] - margins[1] + height) / 2 else: raise "TextDisplay %s: invalid argument for align: %s"%(self['name'],align[1]) self.setdefault('interest_area_labels',text.split()) self['interest_areas'] = [] for linenum,textline in enumerate(stimulus): ypos -= textline.parameters.size[1] linewidth = textline.parameters.size[0] #Calculate the position of the line on the screen # xpos is the x coordinate of the left edge of the line if align[0]=='left': xpos = margins[0] elif align[0]=='right': xpos = getExperiment()['screen_size'][0]-margins[2]-linewidth elif align[0]=='center': xpos = (getExperiment()['screen_size'][0] - margins[2] + margins[0] - linewidth)/2 textline.set(position=(xpos,ypos)) # Calculate the coordinates of the interest areas if calculateIAs: for wordnum,rightedge in enumerate(rightedges[linenum]): rightGazeError = xpos + rightedge + self['buffer_size'] if wordnum == len(rightedges[linenum]) - 1: iaRight = rightGazeError else: rightMidspace = xpos + (rightedges[linenum][wordnum+1]-wordwidths[linenum][wordnum+1] + rightedge) / 2 iaRight = (self['ia_fill'] and [rightMidspace] or [min(rightMidspace,rightGazeError)])[0] leftGazeError = xpos + rightedge - wordwidths[linenum][wordnum] - self['buffer_size'] if wordnum == 0: iaLeft = leftGazeError else: leftMidspace = xpos + (rightedge - wordwidths[linenum][wordnum] + rightedges[linenum][wordnum-1])/2 iaLeft = (self['ia_fill'] and [leftMidspace] or [max(leftMidspace,leftGazeError)])[0] topGazeError = getExperiment()['screen_size'][1]-ypos-textline.parameters.size[1]-self['buffer_size'] if linenum == 0: iaTop = topGazeError else: topMidspace = getExperiment()['screen_size'][1]-ypos-textline.parameters.size[1]-self['vertical_spacing']/2 iaTop = (self['ia_fill'] and [topMidspace] or [max(topMidspace,topGazeError)])[0] bottomGazeError = getExperiment()['screen_size'][1]-ypos+self['buffer_size'] if linenum == len(stimulus)-1: iaBottom = bottomGazeError else: bottomMidspace = getExperiment()['screen_size'][1]-ypos+self['vertical_spacing']/2 iaBottom = (self['ia_fill'] and [bottomMidspace] or [min(bottomMidspace,bottomGazeError)])[0] if self.get('interest_area_labels',None): try: iaLabel = self['interest_area_labels'][len(self['interest_areas'])] except IndexError: raise InterestAreaLabelError("Not enough interest area labels provided for text: %s"%text) else: iaLabel = text.split()[len(self['interest_areas'])] self['interest_areas'].append(InterestArea(Rectangle((iaLeft,iaTop,iaRight-iaLeft,iaBottom-iaTop)),label=iaLabel)) ypos-=self['vertical_spacing'] self.viewport = fullViewport(stimulus)
def fullViewport(stimuli): """Helper function, not directly used in EyeScript scripts in general. Make a VisionEgg Viewport covering the whole screen. """ return VisionEgg.Core.Viewport(screen=getExperiment().screen,size=getExperiment().screen.size,stimuli=stimuli)
def __init__(self,stimuli=[],order='random',list_number=None): if list_number == None: self.list_number = getExperiment()['subject'] else: self.list_number = list_number StimList.__init__(self, stimuli=stimuli, order=order)
def gcFixation(target=None,color=None,bgcolor=None,duration=None,buffer_size=None): """Displays a fixation point and waits until the subject has fixated on it for a minimum duration. The Eyetracker """ if buffer_size == None: buffer_size = getExperiment()['gcbuffer_size'] if not getExperiment().recording: raise EyetrackerError("Must be recording when gcFixation is called!") target = target or getExperiment().params.get('fixation_target',( getExperiment().params['screen_size'][0]/2,getExperiment().params['screen_size'][1]/2 ) ) color = color or getExperiment().params['color'] bgcolor = bgcolor or getExperiment().params['bgcolor'] duration = duration or getExperiment().params.get('min_fixation',800) getExperiment().eyelinkGraphics.setCalibrationColors(color,bgcolor) getExperiment().eyelinkGraphics.draw_cal_target(target[0],target[1]) if getTracker(): eyeUsed = getTracker().eyeAvailable() if eyeUsed == 2: # binocular recording eyeUsed = getExperiment().params.get('eye_used',1) elif eyeUsed == -1: # neither eye available! raise TrialAbort(pylink.TRIAL_ERROR) getTracker().resetData() ## getTracker().sendCommand("clear_cross %d %d %d"%(target[0],target[1],15)) fixarea = pygame.Rect(target[0]-buffer_size, target[1]-buffer_size, 2*buffer_size, 2*buffer_size ) infixarea = False while 1: action = getTracker().isRecording() if action != pylink.TRIAL_OK: raise TrialAbort(action) if infixarea: sample = getTracker().getNewestSample() sampledata = sample and ( (eyeUsed == 1 and sample.isRightSample() and sample.getRightEye()) or (eyeUsed == 0 and sample.isLeftSample() and sample.getLeftEye()) or False ) if sampledata: if fixarea.collidepoint(sampledata.getGaze()): if sample.getTime() - fixtime > duration: break else: infixarea = False getTracker().resetData() else: eventType = getTracker().getNextData() if eventType == pylink.STARTFIX or eventType == pylink.FIXUPDATE or eventType == pylink.ENDFIX: ## print "Fixation started" event = getTracker().getFloatData() ## print "event.getEye(): %d"%(event.getEye()) ## print "event.getStartGaze(): (%d,%d)"%event.getStartGaze() if ((event.getType() == pylink.STARTFIX and event.getEye() == eyeUsed and fixarea.collidepoint(event.getStartGaze())) or ((event.getType() == pylink.FIXUPDATE or event.getType() == pylink.ENDFIX) and event.getEye() == eyeUsed and fixarea.collidepoint(event.getAverageGaze()))): ## print "Fixation in critical area!" fixtime = event.getStartTime() infixarea = True else: pygame.time.delay(duration) getExperiment().eyelinkGraphics.erase_cal_target()
def run(self): startRecording() starttime = pylink.currentTime() getExperiment().screen.parameters.bgcolor = self.bgcolor getTracker().sendMessage("SYNCTIME") if self.pattern == "continuous": iteration = 0 filler = False while iteration <= 1.25: if filler == False and iteration >= 0.25: # This is point where we're actually going to use the data # Before this was just to get the subject warmed up filler = True getTracker().sendMessage("END_FILLER") checkForResponse() t = (pylink.currentTime() - starttime) * 0.00012 t = t - sin(8 * t) / 64 iteration = t / (2 * pi) getExperiment().eyelinkGraphics.draw_cal_target( getExperiment()['screen_size'][0] / 2 + 153 * sin(t) + 204 * sin(9 * t), getExperiment()['screen_size'][1] / 2 + 153 * cos(t) + 204 * cos(9 * t)) elif self.pattern == "discrete": getExperiment().eyelinkGraphics.setCalibrationColors( self.color, self.bgcolor) targets = [] for i in range(3): for j in range(3): targets.append([ (i + 0.5) * getExperiment()['screen_size'][0] / 3, (j + 0.5) * getExperiment()['screen_size'][1] / 3 ]) for i in range(1, 3): for j in range(1, 3): targets.append([ i * getExperiment()['screen_size'][0] / 3, j * getExperiment()['screen_size'][1] / 3 ]) random.shuffle(targets) targets.append( targets[0] ) # Redo the first fixation point at the end so we can discard the first one for i, target in enumerate(targets): if i == 1: getTracker().sendMessage("END_FILLER") getExperiment().eyelinkGraphics.draw_cal_target(*target) starttime = pylink.currentTime() while pylink.currentTime() < 1500 + starttime: checkForResponse() else: raise "PupilCalibrationTrial: bad argument to pattern: %s" % self.pattern getTracker().sendMessage("END_RT") stopRecording()
def run(self): startRecording() starttime = pylink.currentTime() getExperiment().screen.parameters.bgcolor = self.bgcolor getTracker().sendMessage("SYNCTIME") if self.pattern == "continuous": iteration = 0 filler = False while iteration <= 1.25: if filler == False and iteration >= 0.25: # This is point where we're actually going to use the data # Before this was just to get the subject warmed up filler = True getTracker().sendMessage("END_FILLER") checkForResponse() t = (pylink.currentTime() - starttime) * 0.00012 t = t - sin(8*t)/64 iteration = t / (2*pi) getExperiment().eyelinkGraphics.draw_cal_target(getExperiment()['screen_size'][0]/2 + 153*sin(t) + 204*sin(9*t),getExperiment()['screen_size'][1]/2 + 153*cos(t) + 204*cos(9*t)) elif self.pattern == "discrete": getExperiment().eyelinkGraphics.setCalibrationColors(self.color,self.bgcolor) targets = [] for i in range(3): for j in range(3): targets.append([(i+0.5)*getExperiment()['screen_size'][0]/3,(j+0.5)*getExperiment()['screen_size'][1]/3]) for i in range(1,3): for j in range(1,3): targets.append([i*getExperiment()['screen_size'][0]/3,j*getExperiment()['screen_size'][1]/3]) random.shuffle(targets) targets.append(targets[0]) # Redo the first fixation point at the end so we can discard the first one for i,target in enumerate(targets): if i == 1: getTracker().sendMessage("END_FILLER") getExperiment().eyelinkGraphics.draw_cal_target(*target) starttime = pylink.currentTime() while pylink.currentTime() < 1500+starttime: checkForResponse() else: raise "PupilCalibrationTrial: bad argument to pattern: %s"%self.pattern getTracker().sendMessage("END_RT") stopRecording()
def __init__(self, stimuli=[], order='random', list_number=None): if list_number == None: self.list_number = getExperiment()['subject'] else: self.list_number = list_number StimList.__init__(self, stimuli=stimuli, order=order)
def gcFixation(target=None, color=None, bgcolor=None, duration=None, buffer_size=None): """Displays a fixation point and waits until the subject has fixated on it for a minimum duration. The Eyetracker """ if buffer_size == None: buffer_size = getExperiment()['gcbuffer_size'] if not getExperiment().recording: raise EyetrackerError("Must be recording when gcFixation is called!") target = target or getExperiment().params.get( 'fixation_target', (getExperiment().params['screen_size'][0] / 2, getExperiment().params['screen_size'][1] / 2)) color = color or getExperiment().params['color'] bgcolor = bgcolor or getExperiment().params['bgcolor'] duration = duration or getExperiment().params.get('min_fixation', 800) getExperiment().eyelinkGraphics.setCalibrationColors(color, bgcolor) getExperiment().eyelinkGraphics.draw_cal_target(target[0], target[1]) if getTracker(): eyeUsed = getTracker().eyeAvailable() if eyeUsed == 2: # binocular recording eyeUsed = getExperiment().params.get('eye_used', 1) elif eyeUsed == -1: # neither eye available! raise TrialAbort(pylink.TRIAL_ERROR) getTracker().resetData() ## getTracker().sendCommand("clear_cross %d %d %d"%(target[0],target[1],15)) fixarea = pygame.Rect(target[0] - buffer_size, target[1] - buffer_size, 2 * buffer_size, 2 * buffer_size) infixarea = False while 1: action = getTracker().isRecording() if action != pylink.TRIAL_OK: raise TrialAbort(action) if infixarea: sample = getTracker().getNewestSample() sampledata = sample and ( (eyeUsed == 1 and sample.isRightSample() and sample.getRightEye()) or (eyeUsed == 0 and sample.isLeftSample() and sample.getLeftEye()) or False) if sampledata: if fixarea.collidepoint(sampledata.getGaze()): if sample.getTime() - fixtime > duration: break else: infixarea = False getTracker().resetData() else: eventType = getTracker().getNextData() if eventType == pylink.STARTFIX or eventType == pylink.FIXUPDATE or eventType == pylink.ENDFIX: ## print "Fixation started" event = getTracker().getFloatData() ## print "event.getEye(): %d"%(event.getEye()) ## print "event.getStartGaze(): (%d,%d)"%event.getStartGaze() if ((event.getType() == pylink.STARTFIX and event.getEye() == eyeUsed and fixarea.collidepoint(event.getStartGaze())) or ((event.getType() == pylink.FIXUPDATE or event.getType() == pylink.ENDFIX) and event.getEye() == eyeUsed and fixarea.collidepoint(event.getAverageGaze()))): ## print "Fixation in critical area!" fixtime = event.getStartTime() infixarea = True else: pygame.time.delay(duration) getExperiment().eyelinkGraphics.erase_cal_target()