Example #1
0
File: run.py Project: peircej/ioHub
    def createPsychoGraphicsWindow(self):
        #create a window
        self.psychoWindow = FullScreenWindow(self.display)
        
        currentPosition=self.mouse.setPosition((0,0))
        self.mouse.setSystemCursorVisibility(False)

        self.instructionText2Pattern='%d'

        self.psychoStim['grating'] = visual.PatchStim(self.psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        self.psychoStim['fixation'] = visual.PatchStim(self.psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        self.psychoStim['title'] = visual.TextStim(win=self.psychoWindow, text="ioHub getEvents Delay Test", pos = [0,125], height=36, color=[1,.5,0], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['instructions'] = visual.TextStim(win=self.psychoWindow, text='Move the mouse around, press keyboard keys and mouse buttons', pos = [0,-125], height=32, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['instructions2'] = visual.TextStim(win=self.psychoWindow, text=self.instructionText2Pattern%(self.totalEventRequestsForTest,), pos = [0,-250],  color=[-1,-1,-1], height=32, colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['keytext'] = visual.TextStim(win=self.psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='left',wrapWidth=800.0)
        self.psychoStim['mouseDot'] = visual.GratingStim(win=self.psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')
    def __init__(self, eyetrackerInterface, targetForegroundColor=None, 
                 targetBackgroundColor=None, screenColor=None, targetOuterDiameter=None, 
                 targetInnerDiameter=None, dc_sounds=["","",""], cal_sounds=["","",""]):
        EyeLinkCustomDisplay.__init__(self)

        self._eyetrackerinterface=eyetrackerInterface
        self.tracker = eyetrackerInterface._eyelink
        self._ioKeyboard=None
        self._ioMouse=None
        
        self.img_size=None
 
        self.imagebuffer = array.array('I')
        self.pal = None

        self.screenSize = self._eyetrackerinterface._display_device.getPixelResolution()
        self.width=self.screenSize[0]
        self.height=self.screenSize[1]

        self.keys=[]
        self.pos = []
        self.state = 0
        
        if sys.byteorder == 'little':
            self.byteorder = 1
        else:
            self.byteorder = 0

        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_OUTER_COLOR=targetForegroundColor
        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_INNER_COLOR=targetBackgroundColor
        EyeLinkCoreGraphicsIOHubPsychopy.WINDOW_BACKGROUND_COLOR=screenColor
        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_OUTER_RADIUS=targetOuterDiameter/2.0,targetOuterDiameter/2.0
        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_INNER_RADIUS=targetInnerDiameter/2.0,targetInnerDiameter/2.0
 
        self.tracker.setOfflineMode();           

        self.window = FullScreenWindow(self._eyetrackerinterface._display_device)
        self.window.setColor(color=self.WINDOW_BACKGROUND_COLOR,colorSpace='rgb255')        
        self.window.flip(clearBuffer=True)
        
        self._createStim()
        
        self._registerEventMonitors()
        self._ioMouse.setSystemCursorVisibility(False)
        self._lastMsgPumpTime=currentSecTime()
        
        self.clearAllEventBuffers()
class EyeLinkCoreGraphicsIOHubPsychopy(EyeLinkCustomDisplay):
    IOHUB_HEARTBEAT_INTERVAL=0.050   # seconds between forced run through of
                                     # micro threads, since one is blocking
                                     # on camera setup.

    IOHUB2PYLINK_KB_MAPPING={
            KeyboardConstants._virtualKeyCodes.VK_F1: pylink.F1_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F2: pylink.F2_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F3: pylink.F3_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F4: pylink.F4_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F5: pylink.F5_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F6: pylink.F6_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F7: pylink.F7_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F8: pylink.F8_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F9: pylink.F9_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F10: pylink.F10_KEY,
            KeyboardConstants._virtualKeyCodes.VK_PAGE_UP: pylink.PAGE_UP,
            KeyboardConstants._virtualKeyCodes.VK_PAGE_UP: pylink.PAGE_DOWN,
            KeyboardConstants._virtualKeyCodes.VK_UP: pylink.CURS_UP,
            KeyboardConstants._virtualKeyCodes.VK_DOWN: pylink.CURS_DOWN,
            KeyboardConstants._virtualKeyCodes.VK_LEFT: pylink.CURS_LEFT,
            KeyboardConstants._virtualKeyCodes.VK_RIGHT: pylink.CURS_RIGHT,
            KeyboardConstants._asciiKeyCodes.BACKSPACE: '\b',
            KeyboardConstants._asciiKeyCodes.RETURN: pylink.ENTER_KEY,
            KeyboardConstants._asciiKeyCodes.ESCAPE: pylink.ESC_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F10: pylink.F10_KEY,
            KeyboardConstants._virtualKeyCodes.VK_F10: pylink.F10_KEY,
            }                                 
    
#    SOUND_MAPPINGS={
#                    -1 : 'wav/error.wav', #cal error beep
#                    -2 : 'wav/error.wav', # DC error beep
#                    0 : 'wav/qbeep.wav', # cal. good beep
#                    1: 'wav/type.wav', # cal target beep
#                    2:  'wav/qbeep.wav', # DC good beep
#                    3 : 'wav/type.wav'  # dc target beep
#                    #'wav/error.wav': None, # file name to Nsound buffer mapping
#                    #'wav/qbeep.wav': None, # file name to Nsound buffer mapping
#                    #'wav/type.wav': None, # file name to Nsound buffer mapping
#                  }

    WINDOW_BACKGROUND_COLOR=(128,128,128)
    CALIBRATION_POINT_OUTER_RADIUS=15.0,15.0
    CALIBRATION_POINT_OUTER_EDGE_COUNT=64
    CALIBRATION_POINT_OUTER_COLOR=(255,255,255)
    CALIBRATION_POINT_INNER_RADIUS=3.0,3.0
    CALIBRATION_POINT_INNER_EDGE_COUNT=32
    CALIBRATION_POINT_INNER_COLOR=(25,25,25)

    def __init__(self, eyetrackerInterface, targetForegroundColor=None, 
                 targetBackgroundColor=None, screenColor=None, targetOuterDiameter=None, 
                 targetInnerDiameter=None, dc_sounds=["","",""], cal_sounds=["","",""]):
        EyeLinkCustomDisplay.__init__(self)

        self._eyetrackerinterface=eyetrackerInterface
        self.tracker = eyetrackerInterface._eyelink
        self._ioKeyboard=None
        self._ioMouse=None
        
        self.img_size=None
 
        self.imagebuffer = array.array('I')
        self.pal = None

        self.screenSize = self._eyetrackerinterface._display_device.getPixelResolution()
        self.width=self.screenSize[0]
        self.height=self.screenSize[1]

        self.keys=[]
        self.pos = []
        self.state = 0
        
        if sys.byteorder == 'little':
            self.byteorder = 1
        else:
            self.byteorder = 0

        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_OUTER_COLOR=targetForegroundColor
        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_INNER_COLOR=targetBackgroundColor
        EyeLinkCoreGraphicsIOHubPsychopy.WINDOW_BACKGROUND_COLOR=screenColor
        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_OUTER_RADIUS=targetOuterDiameter/2.0,targetOuterDiameter/2.0
        EyeLinkCoreGraphicsIOHubPsychopy.CALIBRATION_POINT_INNER_RADIUS=targetInnerDiameter/2.0,targetInnerDiameter/2.0
 
        self.tracker.setOfflineMode();           

        self.window = FullScreenWindow(self._eyetrackerinterface._display_device)
        self.window.setColor(color=self.WINDOW_BACKGROUND_COLOR,colorSpace='rgb255')        
        self.window.flip(clearBuffer=True)
        
        self._createStim()
        
        self._registerEventMonitors()
        self._ioMouse.setSystemCursorVisibility(False)
        self._lastMsgPumpTime=currentSecTime()
        
        self.clearAllEventBuffers()

    def clearAllEventBuffers(self):
        pylink.flushGetkeyQueue();
        self.tracker.resetData()
        self._iohub_server.eventBuffer.clear()
        for d in self._iohub_server.devices:
            d.clearEvents()
            
    def _registerEventMonitors(self):
        self._iohub_server=self._eyetrackerinterface._iohub_server

        if self._iohub_server:
            for dev in self._iohub_server.devices:
                #ioHub.print2err("dev: ",dev.__class__.__name__)
                if dev.__class__.__name__ == 'Keyboard':
                    kbDevice=dev
                elif dev.__class__.__name__ == 'Mouse':
                    mouseDevice=dev

        if kbDevice:
            eventIDs=[]
            for event_class_name in kbDevice.__class__.EVENT_CLASS_NAMES:
                eventIDs.append(getattr(EventConstants,convertCamelToSnake(event_class_name[:-5],False)))

            self._ioKeyboard=kbDevice
            self._ioKeyboard._addEventListener(self,eventIDs)
        else:
            ioHub.print2err("Warning: elCG could not connect to Keyboard device for events.")

        if mouseDevice:
            eventIDs=[]
            for event_class_name in mouseDevice.__class__.EVENT_CLASS_NAMES:
                eventIDs.append(getattr(EventConstants,convertCamelToSnake(event_class_name[:-5],False)))

            self._ioMouse=mouseDevice
            self._ioMouse._addEventListener(self,eventIDs)
        else:
            ioHub.print2err("Warning: elCG could not connect to Mouse device for events.")

    def _unregisterEventMonitors(self):
#        ioHub.print2err('_unregisterEventMonitors')
        if self._ioKeyboard:
            self._ioKeyboard._removeEventListener(self)
        if self._ioMouse:
            self._ioMouse._removeEventListener(self)
     
    def _handleEvent(self,ioe):
        #ioHub.print2err("Got Event: ",ioe)
        event_type_index=DeviceEvent.EVENT_TYPE_ID_INDEX
        if ioe[event_type_index] == EventConstants.KEYBOARD_PRESS:
            #ioHub.print2err('Should handle keyboard event for: ', ioe[-4], ' key_id: ',ioe[-5],' key_mods: ',ioe[-2]) #key pressed
#            ioHub.print2err('** KEY: ', ioe[-4]," ,key_id: ",ioe[-5]," ,ascii_code: ",ioe[-6]," ,scan_code: ",ioe[-7])               
            self.translate_key_message((ioe[-5],ioe[-2]))
                
        elif ioe[event_type_index] == EventConstants.MOUSE_BUTTON_PRESS:
#            ioHub.print2err('Should handle mouse pressed event for button id: ', ioe[-7])
            self.state=1
        elif ioe[event_type_index] == EventConstants.MOUSE_BUTTON_RELEASE:
#            ioHub.print2err('Should handle mouse release event for button id: ', ioe[-7])
            self.state=0
            
        elif ioe[event_type_index] == EventConstants.MOUSE_MOVE:
            self.pos=self._ioMouse.getPosition()

#    def _printKeyMapping(self):
#        ioHub.print2err("===========================================")
#        for iokey, pylkey in self.IOHUB2PYLINK_KB_MAPPING.iteritems():
#            ioHub.print2err(iokey,' : ',pylkey)
#        ioHub.print2err("===========================================")
        
    def translate_key_message(self,event):
        key = 0
        mod = 0
        #ioHub.print2err('translate_key_message:' ,event)
        if len(event) >0 :
            key = event[0]
            
            self.keys.append(pylink.KeyInput(key,mod))

        return key

    def get_input_key(self):
        #keep the psychopy window happy ;)
        if currentSecTime()-self._lastMsgPumpTime>self.IOHUB_HEARTBEAT_INTERVAL:                
            # try to keep ioHub, being blocked. ;(
            if self._iohub_server:
                for dm in self._iohub_server.deviceMonitors:
                    dm.device._poll()
                self._iohub_server._processDeviceEventIteration()


            self._lastMsgPumpTime=currentSecTime()
                
        if len(self.keys) > 0:
            k= self.keys
            self.keys=[]
            #ioHub.print2err('KEY get_input_key: ',k)
            return k
        else:
            return None


    def _createStim(self):        

        class StimSet(object):
            def __setattr__(self, item, value):
                if item in self.__dict__: 
                    i=self.__dict__['_stimNameList'].find(item)
                    self.__dict__['_stimValueList'][i]=value
                else:
                    if '_stimNameList' not in self.__dict__:
                        self.__dict__['_stimNameList']=[]
                        self.__dict__['_stimValueList']=[]
                        self.__dict__['_stimNameList'].append(item)
                        self.__dict__['_stimValueList'].append(value)
                self.__dict__[item]=value
            
            def updateStim(self,name,**kwargs):
                astim=getattr(self,name)
                if isinstance(astim,OrderedDict):
                    for stimpart in astim.itervalues():
                        for argName,argValue in kwargs.iteritems():
                            a=getattr(stimpart,argName)
                            if callable(a):
                                a(argValue)
                            else:    
                                setattr(stimpart,argName,argValue)
                else:
                    for argName,argValue in kwargs.iteritems():
                        a=getattr(astim,argName)
                        if callable(a):
                            a(argValue)
                        else:    
                            setattr(astim,argName,argValue)

            def draw(self):
                for s in self._stimValueList:                    
                    if isinstance(s,OrderedDict):
                        for stimpart in s.itervalues():
                            stimpart.draw()
                    else:
                        s.draw()
                        
        self.calStim=StimSet()
                
        self.calStim.calibrationPoint=OrderedDict()
        self.calStim.calibrationPoint['OUTER'] = visual.Circle(self.window,pos=(0,0),
                    lineWidth=1.0, lineColor=self.CALIBRATION_POINT_OUTER_COLOR, lineColorSpace='rgb255',
                    fillColor=self.CALIBRATION_POINT_OUTER_COLOR, fillColorSpace='rgb255',
                    radius=self.CALIBRATION_POINT_OUTER_RADIUS,                    
                    name='CP_OUTER', units='pix',opacity=1.0, interpolate=False)

        self.calStim.calibrationPoint['INNER'] = visual.Circle(self.window,
                    pos=(0,0),lineWidth=1.0,lineColor=self.CALIBRATION_POINT_INNER_COLOR, lineColorSpace='rgb255',
                    fillColor=self.CALIBRATION_POINT_INNER_COLOR, fillColorSpace='rgb255', 
                    radius=self.CALIBRATION_POINT_INNER_RADIUS,
                    name='CP_INNER',units='pix',opacity=1.0, interpolate=False)

        self.imageStim=StimSet()
        self.imageStim.imageTitle = visual.TextStim(self.window, text = "EL CAL", pos=(0,0), units='pix', alignHoriz='center')        
        
    def setup_cal_display(self):
        #ioHub.print2err('setup_cal_display entered')
        self.window.flip(clearBuffer=True)
        #ioHub.print2err('setup_cal_display exiting')

    def exit_cal_display(self):
        #ioHub.print2err('exit_cal_display entered')
        self.window.flip(clearBuffer=True)
        #ioHub.print2err('exit_cal_display exiting')

    def record_abort_hide(self):
        pass

    def clear_cal_display(self):
        self.window.flip(clearBuffer=True)
        
    def erase_cal_target(self):
        self.window.flip(clearBuffer=True)
        
    def draw_cal_target(self, x, y):
        self.calStim.updateStim('calibrationPoint',setPos=(x,y))  
        self.calStim.draw()
        self.window.flip(clearBuffer=True)
        
    def play_beep(self, beepid):
        pass
        #if (Nsound is not None) and (beepid in self.SOUND_MAPPINGS):
        #    sname=self.SOUND_MAPPINGS[beepid]
        #    audiobuffer,audioplayer=self.SOUND_MAPPINGS[sname]
        #    audiobuffer >> audioplayer
        #elif Nsound is None:
        #    pass
        #else:
        #    ioHub.print2err('play_beep ERROR: Unsupported beep id: %d.'%(beepid))
                    
    def exit_image_display(self):
        #ioHub.print2err('exit_image_display entered')
        self.window.flip(clearBuffer=True)
        #ioHub.print2err('exit_image_display exiting')

    def alert_printf(self,msg):
        ioHub.print2err('**************************************************')
        ioHub.print2err('EYELINK CG ERROR: %s'%(msg))
        ioHub.print2err('**************************************************')
        
    def image_title(self, text):
        #ioHub.print2err('image_title entered')
        self.imageStim.updateStim('imageTitle',setText=text)
        self.imageStim.draw()        
        self.window.flip(clearBuffer=True)
        #ioHub.print2err('image_title exiting')

############# From Pyglet Custom Graphics #####################################
#
## NOT YET CONVERTED
#
#
#
###############################################################################
#
#
#   pyglet impl.
    def get_mouse_state(self):
        ioHub.print2err('get_mouse_state entered')
        if len(self.pos) > 0 :
            l = (int)(self.width*0.5-self.width*0.5*0.75)
            r = (int)(self.width*0.5+self.width*0.5*0.75)
            b = (int)(self.height*0.5-self.height*0.5*0.75)
            t = (int)(self.height*0.5+self.height*0.5*0.75)

            mx, my = 0,0
            if self.pos[0]<l:
                mx = l
            elif self.pos[0] >r:
                mx = r
            else:
                mx = self.pos[0]

            if self.pos[1]<b:
                my = b
            elif self.pos[1]>t:
                my = t
            else:
                my = self.pos[1]

            mx = (int)((mx-l)*self.img_size[0]//(r-l))
            my = self.img_size[1] - (int)((my-b)*self.img_size[1]//(t-b))
            ioHub.print2err('get_mouse_state exiting')
            return ((mx, my),self.state)
        else:
            ioHub.print2err('get_mouse_state exiting')
            return((0,0), 0)

###############################################################################
#
#
#   PYGLET IMP.
    def setup_image_display(self, width, height):
        ioHub.print2err('setup_image_display entered')
        self.img_size = (width,height)
        self.window.clearBuffer()
        self.window.flip(clearBuffer=True)
        ioHub.print2err('setup_image_display exiting')
      
###############################################################################
#
#   PYGLET Imp.
    def draw_image_line(self, width, line, totlines,buff):
        pass
#        ioHub.print2err('draw_image_line entered')
#        i =0
#        while i <width:
#            if buff[i]>=len(self.pal):
#                buff[i]=len(self.pal)-1
#            self.imagebuffer.append(self.pal[buff[i]&0x000000FF])
#            i = i+1
#        if line == totlines:
#            #asp = ((float)(self.size[1]))/((float)(self.size[0]))
#            asp = 1
#            r = (float)(self.width*0.5-self.width*0.5*0.75)
#            l = (float)(self.width*0.5+self.width*0.5*0.75)
#            t = (float)(self.height*0.5+self.height*0.5*asp*0.75)
#            b = (float)(self.height*0.5-self.height*0.5*asp*0.75)
#
#            self.window.clearBuffer()
#            
#            tx = (int)(self.width*0.5)
#            ty = b - 30
#            self.stim.drawStim('imageTitle',{'setPos':(tx,ty)})            
#
#            self.draw_cross_hair()
#            glEnable(GL_TEXTURE_RECTANGLE_ARB)
#            glBindTexture(GL_TEXTURE_RECTANGLE_ARB, self.texid.value)
#            glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
#            glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
#            glTexEnvi( GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE, GL_REPLACE )
#            glTexImage2D( GL_TEXTURE_RECTANGLE_ARB, 0,GL_RGBA8, width, totlines, 0, GL_RGBA, GL_UNSIGNED_BYTE, self.imagebuffer.tostring())
#
#            glBegin(GL_QUADS)
#            glTexCoord2i(0, 0)
#            glVertex2f(r,t)
#            glTexCoord2i(0, self.img_size[1])
#            glVertex2f(r, b)
#            glTexCoord2i(self.img_size[0],self.img_size[1])
#            glVertex2f(l, b)
#            glTexCoord2i(self.img_size[1],0)
#            glVertex2f(l, t)
#            glEnd()
#            glDisable(GL_TEXTURE_RECTANGLE_ARB)
#            self.draw_cross_hair()
#
#            self.window.flip(clearBuffer=True)
#            
#            self.imagebuffer = array.array('I')
#        ioHub.print2err('draw_image_line exiting')


###############################################################################
#
#   Pyglet impl.
    def draw_line(self,x1,y1,x2,y2,colorindex):
        pass
#    
#        ioHub.print2err('draw_line entered')
#        if colorindex   ==  pylink.CR_HAIR_COLOR:          color = (1.0,1.0,1.0,1.0)
#        elif colorindex ==  pylink.PUPIL_HAIR_COLOR:       color = (1.0,1.0,1.0,1.0)
#        elif colorindex ==  pylink.PUPIL_BOX_COLOR:        color = (0.0,1.0,0.0,1.0)
#        elif colorindex ==  pylink.SEARCH_LIMIT_BOX_COLOR: color = (1.0,0.0,0.0,1.0)
#        elif colorindex ==  pylink.MOUSE_CURSOR_COLOR:     color = (1.0,0.0,0.0,1.0)
#        else: color =(0.0,0.0,0.0,0.0)
#
#        #asp = ((float)(self.size[1]))/((float)(self.size[0]))
#        asp = 1
#        r = (float)(self.width*0.5-self.width*0.5*0.75)
#        l = (float)(self.width*0.5+self.width*0.5*0.75)
#        t = (float)(self.height*0.5+self.height*0.5*asp*0.75)
#        b = (float)(self.height*0.5-self.height*0.5*asp*0.75)
#
#        x11= float(float(x1)*(l-r)/float(self.img_size[0]) + r)
#        x22= float(float(x2)*(l-r)/float(self.img_size[0]) + r)
#        y11= float(float(y1)*(b-t)/float(self.img_size[1]) + t)
#        y22= float(float(y2)*(b-t)/float(self.img_size[1]) + t)
#
##        glBegin(GL_LINES)
##        glColor4f(color[0],color[1],color[2],color[3] )
##        glVertex2f(x11,y11)
##        glVertex2f(x22,y22)
##        glEnd()
#        ioHub.print2err('draw_line exiting')
#        

###############################################################################
#
#   Pyglet Implementation
    def draw_lozenge(self,x,y,width,height,colorindex):
        pass
#        ioHub.print2err('draw_lozenge entered')
#        if colorindex   ==  pylink.CR_HAIR_COLOR:          color = (1.0,1.0,1.0,1.0)
#        elif colorindex ==  pylink.PUPIL_HAIR_COLOR:       color = (1.0,1.0,1.0,1.0)
#        elif colorindex ==  pylink.PUPIL_BOX_COLOR:        color = (0.0,1.0,0.0,1.0)
#        elif colorindex ==  pylink.SEARCH_LIMIT_BOX_COLOR: color = (1.0,0.0,0.0,1.0)
#        elif colorindex ==  pylink.MOUSE_CURSOR_COLOR:     color = (1.0,0.0,0.0,1.0)
#        else: color =(0.0,0.0,0.0,0.0)
#
#        width=int((float(width)/float(self.img_size[0]))*self.img_size[0])
#        height=int((float(height)/float(self.img_size[1]))*self.img_size[1])
#
#        #asp = ((float)(self.size[1]))/((float)(self.size[0]))
#        asp = 1
#        r = (float)(self.width*0.5-self.width*0.5*0.75)
#        l = (float)(self.width*0.5+self.width*0.5*0.75)
#        t = (float)(self.height*0.5+self.height*0.5*asp*0.75)
#        b = (float)(self.height*0.5-self.height*0.5*asp*0.75)
#
#        x11= float(float(x)*(l-r)/float(self.img_size[0]) + r)
#        x22= float(float(x+width)*(l-r)/float(self.img_size[0]) + r)
#        y11= float(float(y)*(b-t)/float(self.img_size[1]) + t)
#        y22= float(float(y+height)*(b-t)/float(self.img_size[1]) + t)
#
#        r=x11
#        l=x22
#        b=y11
#        t=y22
#
#        #glColor4f(color[0],color[1],color[2],color[3])
#
#        xw = math.fabs(float(l-r))
#        yw = math.fabs(float(b-t))
#        sh = min(xw,yw)
#        rad = float(sh*0.5)
#
#        x = float(min(l,r)+rad)
#        y = float(min(t,b)+rad)
#
#        if xw==sh:
#            st = 180
#        else:
#            st = 90
#        glBegin(GL_LINE_LOOP)
#        i=st
#        degInRad = (float)(float(i)*(3.14159/180.0))
#
#        for i in range (st, st+180):
#            degInRad = (float)(float(i)*(3.14159/180.0))
#            glVertex2f((float)(float(x)+math.cos(degInRad)*rad),float(y)+(float)(math.sin(degInRad)*rad))
#
#        if xw == sh:    #short horizontally
#            y = (float)(max(t,b)-rad)
#        else:  		  # short vertically
#            x = (float)(max(l,r)-rad)
#
#        i = st+180
#        for i in range (st+180, st+360):
#            degInRad = (float)(float(i)*(3.14159/180.0))
#            glVertex2f((float)(float(x)+math.cos(degInRad)*rad),float(y)+(float)(math.sin(degInRad)*rad))
#
#        glEnd()
#        ioHub.print2err('draw_lozenge exiting')

###############################################################################
#
#   PYGLET Imp.
    def set_image_palette(self, r,g,b):
        ioHub.print2err('set_image_palette entered')
        self.imagebuffer = array.array('I')
        self.clear_cal_display()
        sz = len(r)
        i =0
        self.pal = []
        while i < sz:
            rf = int(r[i])
            gf = int(g[i])
            bf = int(b[i])
            if self.byteorder:
                self.pal.append(0xff<<24|(bf<<16)|(gf<<8)|(rf))
            else:
                self.pal.append((rf<<24)|(gf<<16)|(bf<<8)|0xff)
            i = i+1
        ioHub.print2err('set_image_palette exiting')
Example #4
0
File: run.py Project: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would
        be in your main psychopy experiment script.py file in a standard psychopy
        experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED.
        
        #create a window to draw in
        mouse=self.devices.mouse
        display=self.devices.display
        keyboard=self.devices.keyboard
        gamepad=self.devices.gamepad
        computer=self.devices.computer

        # Read the current resolution of the displays screen in pixels.
        # We will set our window size to match the current screen resolution 
        # and make it a full screen boarderless window.
        screen_resolution= display.getPixelResolution()

        # Create a psychopy window, full screen resolution, full screen mode, 
        # pix units, with no boarder.
        myWin = FullScreenWindow(display)
            
        # Hide the 'system mouse cursor'
        mouse.setSystemCursorVisibility(False)

        gamepad.updateBatteryInformation()
        bat=gamepad.getLastReadBatteryInfo()
        print "Battery Info: ",bat

        gamepad.updateCapabilitiesInformation()
        caps=gamepad.getLastReadCapabilitiesInfo()
        print "Capabilities: ",caps
    
        fixSpot = visual.PatchStim(myWin,tex="none", mask="gauss",pos=(0,0), 
                            size=(30,30),color='black')
        
        grating = visual.PatchStim(myWin,pos=(0,0), tex="sin",mask="gauss",
                            color='white',size=(200,200), sf=(0.01,0))

        msgText='Left Stick = Spot Pos; Right Stick = Grating Pos;\nLeft Trig = SF; Right Trig = Ori;\n"r" key = Rumble; "q" = Quit\n'
        message = visual.TextStim(myWin,pos=(0,-200),
                            text=msgText,
                            alignHoriz='center',alignVert='center',height=24,
                            wrapWidth=screen_resolution[0]*.9)
    
        END_DEMO=False
        
        while not END_DEMO:
            

            #update stim from joystick
            x,y,mag=gamepad.getThumbSticks()['RightStick'] # sticks are 3 item lists (x,y,magnitude)
            xx=self.normalizedValue2Pixel(x*mag,screen_resolution[0], -1)
            yy=self.normalizedValue2Pixel(y*mag,screen_resolution[1], -1)
            grating.setPos((xx, yy))
            
            x,y,mag=gamepad.getThumbSticks()['LeftStick'] # sticks are 3 item lists (x,y,magnitude)
            xx=self.normalizedValue2Pixel(x*mag,screen_resolution[0], -1)
            yy=self.normalizedValue2Pixel(y*mag,screen_resolution[1], -1)
            fixSpot.setPos((xx, yy))

            # change sf
            sf=gamepad.getTriggers()['LeftTrigger']
            
            grating.setSF((sf/display.getPixelsPerDegree()[0])*2+0.01) #so should be in the range 0:4

            #change ori
            ori=gamepad.getTriggers()['RightTrigger']
            grating.setOri(ori*360.0) 

            #if any button is pressed then make the stimulus coloured
            if gamepad.getPressedButtonList():
                grating.setColor('red')
            else:
                grating.setColor('white')
                    
            #drift the grating
            t=computer.getTime()
            grating.setPhase(t*2)
            grating.draw()
            
            fixSpot.draw()
            message.draw()
            myWin.flip()#redraw the buffer

            #print joy.getAllAxes()#to see what your axes are doing!
            
            for event in keyboard.getEvents():
                if event.key in ['q',]:                
                    END_DEMO=True
                elif event.key in ['r',]:
                    # rumble the pad , 50% low frequency motor,
                    # 25% high frequency motor, for 1 second.
                    r=gamepad.setRumble(50.0,25.0,1.0)                    
                
            self.hub.clearEvents()#do this each frame to avoid getting clogged with mouse events
Example #5
0
File: run.py Project: peircej/ioHub
    def run(self, *args, **kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        calibrationOK = tracker.runSetupProcedure()
        if calibrationOK is False:
            print "NOTE: Exiting application due to failed calibration."
            return

        # Create a psychopy window, full screen resolution, full screen mode...
        self.window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        image_name = "./images/party.png"
        imageStim = visual.ImageStim(self.window, image=image_name, name="image_stim")
        gaze_dot = visual.GratingStim(
            self.window, tex=None, mask="gauss", pos=(-2000, -2000), size=(100, 100), color="green"
        )

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen = ClearScreen(self)
        self.clearScreen.setScreenColor((128, 128, 128))

        self.clearScreen.flip(text="EXPERIMENT_INIT")

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.clearScreen.sendMessage(
            "Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID, self.hub.experimentSessionID)
        )
        self.clearScreen.sendMessage(
            "Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
                display.getIndex(), display.getPixelResolution(), display.getCoordinateType()
            )
        )
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # Screen for showing text and waiting for a keyboard response or something
        instuction_text = "Press Space Key".center(32) + "\n" + "to Start Experiment.".center(32)
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_CHAR, {"key": "SPACE"})
        timeout = 5 * 60.0
        self.instructionScreen = InstructionScreen(self, instuction_text, dtrigger, timeout)
        self.instructionScreen.setScreenColor((128, 128, 128))
        # flip_time,time_since_flip,event=self.instructionScreen.switchTo("CALIBRATION_WAIT")

        self.instructionScreen.setText(instuction_text)
        self.instructionScreen.switchTo("START_EXPERIMENT_WAIT")

        tracker.setRecordingState(True)
        self.clearScreen.flip()
        self.hub.wait(0.050)

        # Clear all events from the global event buffer,
        # and from the all device level event buffers.
        self.hub.clearEvents("all")

        # Loop until we get a keyboard event
        while not kb.getEvents():
            gpos = tracker.getLastGazePosition()
            if gpos:
                gaze_dot.setPos(gpos)
                imageStim.draw()
                gaze_dot.draw()
            else:
                imageStim.draw()

            self.window.flip()
            flip_time = Computer.currentSec()
            self.hub.sendMessageEvent("SYNCTIME %s" % (image_name,), sec_time=flip_time)

        self.hub.clearEvents("all")

        # A key was pressed so exit experiment.
        # Wait 250 msec before ending the experiment
        # (makes it feel less abrupt after you press the key to quit IMO)
        self.hub.wait(0.250)

        tracker.setRecordingState(False)
        tracker.setConnectionState(False)

        self.clearScreen.flip(text="EXPERIMENT_COMPLETE")
        instuction_text = (
            "Experiment Finished".center(32)
            + "\n"
            + "Press 'SPACE' to Quit.".center(32)
            + "\n"
            + "Thank You.".center(32)
        )
        self.instructionScreen.setText(instuction_text)
        self.instructionScreen.switchTo("EXPERIMENT_COMPLETE_WAIT")
Example #6
0
File: run.py Project: peircej/ioHub
    def run(self, *args, **kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.

        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        tracker.runSetupProcedure()
        self.hub.clearEvents("all")
        self.hub.wait(0.050)

        current_gaze = [0, 0]

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no boarder, using the monitor
        # profile name 'test monitor, which is created on the fly right now by the script
        window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim = OrderedDict()
        psychoStim["grating"] = visual.PatchStim(window, mask="circle", size=75, pos=[-100, 0], sf=0.075)
        psychoStim["fixation"] = visual.PatchStim(
            window, size=25, pos=[0, 0], sf=0, color=[-1, -1, -1], colorSpace="rgb"
        )
        psychoStim["gazePosText"] = visual.TextStim(
            window,
            text=str(current_gaze),
            pos=[100, 0],
            height=48,
            color=[-1, -1, -1],
            colorSpace="rgb",
            alignHoriz="left",
            wrapWidth=300,
        )
        psychoStim["gazePos"] = visual.GratingStim(
            window, tex=None, mask="gauss", pos=current_gaze, size=(50, 50), color="purple"
        )

        [psychoStim[stimName].draw() for stimName in psychoStim]

        Computer.enableHighPriority(True)
        # self.setProcessAffinities([0,1],[2,3])

        tracker.setRecordingState(True)
        self.hub.wait(0.050)

        # Clear all events from the ioHub event buffers.
        self.hub.clearEvents("all")

        # Loop until we get a keyboard event
        while len(kb.getEvents()) == 0:

            # for each loop, update the grating phase
            psychoStim["grating"].setPhase(0.05, "+")  # advance phase by 0.05 of a cycle

            # and update the gaze contingent gaussian based on the current gaze location

            current_gaze = tracker.getLastGazePosition()
            current_gaze = int(current_gaze[0]), int(current_gaze[1])

            psychoStim["gazePos"].setPos(current_gaze)
            psychoStim["gazePosText"].setText(str(current_gaze))

            # this is short hand for looping through the psychopy stim list and redrawing each one
            # it is also efficient, may not be as user friendly as:
            # for stimName, stim in psychoStim.itervalues():
            #    stim.draw()
            # which does the same thing if you like and is probably just as efficent.
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            flip_time = window.flip()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s" % (str(current_gaze),), sec_time=flip_time)

        # a key was pressed so the loop was exited. We are clearing the event buffers to avoid an event overflow ( currently known issue)
        self.hub.clearEvents("all")

        tracker.setRecordingState(False)

        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        self.hub.wait(0.250)
        tracker.setConnectionState(False)

        # _close neccessary files / objects, 'disable high priority.
        window.close()
Example #7
0
File: run.py Project: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker=self.hub.devices.tracker
        display=self.hub.devices.display
        kb=self.hub.devices.kb
        mouse=self.hub.devices.mouse

        result=tracker.runSetupProcedure()
        if isinstance(result,dict):
            print "Validation Accuracy Results: ", result
        elif result != EyeTrackerConstants.EYETRACKER_OK:
            print "An error occurred during eye tracker user setup: ",EyeTrackerConstants.getName(result)
            
        # Create a psychopy window, full screen resolution, full screen mode...
        self.window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        image_name='./images/party.png'
        imageStim = visual.ImageStim(self.window, image=image_name, name='image_stim')
        gaze_dot =visual.GratingStim(self.window,tex=None, mask="gauss", pos=(-2000,-2000),size=(100,100),color='green')

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen=ClearScreen(self)
        self.clearScreen.setScreenColor((128,128,128))

        self.clearScreen.flip(text='EXPERIMENT_INIT')

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.clearScreen.sendMessage("Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID))
        self.clearScreen.sendMessage("Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType()))
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))        
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # Screen for showing text and waiting for a keyboard response or something
        instuction_text="Press Space Key".center(32)+'\n'+"to Start Experiment.".center(32)
        dtrigger=DeviceEventTrigger(kb,EventConstants.KEYBOARD_CHAR,{'key':'SPACE'})
        timeout=5*60.0
        self.instructionScreen=InstructionScreen(self,instuction_text,dtrigger,timeout)
        self.instructionScreen.setScreenColor((128,128,128))
        #flip_time,time_since_flip,event=self.instructionScreen.switchTo("CALIBRATION_WAIT")

        self.instructionScreen.setText(instuction_text)        
        self.instructionScreen.switchTo("START_EXPERIMENT_WAIT")
        
        for t in range(5): 
            self.hub.clearEvents('all')
            instuction_text="Press Space Key To Start Trial %d"%t
            self.instructionScreen.setText(instuction_text)        
            self.instructionScreen.switchTo("START_TRIAL")

            tracker.setRecordingState(True)
            self.clearScreen.flip()
            self.hub.clearEvents('all')
    
            # Loop until we get a keyboard event
            runtrial=True
            while runtrial:
                gpos=tracker.getLastGazePosition()
                if gpos:
                    gaze_dot.setPos(gpos)
                    imageStim.draw()
                    gaze_dot.draw()
                else:
                    imageStim.draw()
                    
                flip_time=self.window.flip()          
                self.hub.sendMessageEvent("SYNCTIME %s"%(image_name,),sec_time=flip_time)
                
                keys=kb.getEvents(EventConstants.KEYBOARD_CHAR)
                for key in keys:
                    if key.key == 'SPACE':
                       runtrial=False
                       break
                   
            self.clearScreen.flip(text='TRIAL_%d_DONE'%t)
            tracker.setRecordingState(False)

        self.clearScreen.flip(text='EXPERIMENT_COMPLETE')
        instuction_text="Experiment Finished".center(32)+'\n'+"Press 'SPACE' to Quit.".center(32)+'\n'+"Thank You.".center(32)
        self.instructionScreen.setText(instuction_text)        
        self.instructionScreen.switchTo("EXPERIMENT_COMPLETE_WAIT")

        # A key was pressed so exit experiment.
        # Wait 250 msec before ending the experiment 
        # (makes it feel less abrupt after you press the key to quit IMO)
        self.hub.wait(0.250)

        tracker.setConnectionState(False)
Example #8
0
File: run.py Project: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        ENABLE_NOISY_MOUSE=True
     
        
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb

        #Computer.enableHighPriority()
        
        # Create a psychopy window, using settings from Display device config
        psychoWindow =  FullScreenWindow(display)#,res=(500,500),fullscr=False,allowGUI=True)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        #mouse.setSystemCursorVisibility(False)
        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))
        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        mouse.lockMouseToDisplayID(display.getIndex())
        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        psychoStim['fixation'] =visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        psychoStim['keytext'] = visual.TextStim(psychoWindow, text=u'?', pos = [100,200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['ucodetext'] = visual.TextStim(psychoWindow, text=u'?', pos = [-100,200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['mods'] = visual.TextStim(psychoWindow, text=u'?', pos = [0,-200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['mouseDot'] =visual.GratingStim(psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')

        # Clear all events from the global and device level event buffers.
        self.hub.clearEvents('all')

        QUIT_EXP=False
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        while QUIT_EXP is False:

            # for each loop, update the grating phase
            psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle

            # and update the mouse contingent gaussian based on the current mouse location
            mx,my=mouse.getPosition()
            if ENABLE_NOISY_MOUSE:
                mx=np.random.random_integers(mx-10,mx+10)
                my=np.random.random_integers(my-10,my+10)
            psychoStim['mouseDot'].setPos((mx,my))


            # redraw the stim
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            psychoWindow.flip()
            # it is on this side of the call that you know the changes have been displayed, so you can
            # make a call to the ioHub time method and get the time of the flip, as the built in
            # time methods represent both experiment process and ioHub server process time.
            # Most times in ioHub are represented sec.msec format to match that of Psychopy.
            flip_time=Computer.currentSec()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)

            # get any new keyboard char events from the keyboard device


            # for each new keyboard character event, check if it matches one of the end example keys.
            for k in kb.getEvents():
                if k.key.upper() in ['ESCAPE', ] and k.type==EventConstants.KEYBOARD_CHAR:
                    print 'Quit key pressed: ',k.key,' for ',k.duration,' sec.'
                    QUIT_EXP=True
                print u'{0}: time: {1}\t\tord: {2}.\t\tKey: [{3}]\t\tMods: {4}'.format(k.time,EventConstants.getName(k.type),k.ucode,k.key,k.modifiers)
                psychoStim['keytext'].setText(k.key)
                psychoStim['ucodetext'].setText(unichr(k.ucode))
                psychoStim['mods'].setText(str(k.modifiers))
                

             #for e in mouse.getEvents():
            #    print 'Event: ',e
                
            self.hub.clearEvents('all')
        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        actualDelay=self.hub.wait(0.250)
        print "Delay requested %.6f, actual delay %.6f, Diff: %.6f"%(0.250,actualDelay,actualDelay-0.250)

        # for fun, test getting a bunch of events at once, likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime=Computer.currentSec()
        
        if events is None:
            events=[]

        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
Example #9
0
File: run.py Project: peircej/ioHub
class ExperimentRuntime(ioHubExperimentRuntime):
    def __init__(self,configFileDirectory, configFile):
        ioHubExperimentRuntime.__init__(self,configFileDirectory,configFile)
        self.initAttributes()

    def initAttributes(self):
        """

        """
        self.psychoStim = OrderedDict()
        self.totalEventRequestsForTest=1000
        self.numEventRequests=0
        self.psychoWindow=None
        self.lastFlipTime=0.0
        self.events=None

    def run(self,*args,**kwargs):
        """
        psychopy code is taken from an example psychopy script in the coder documentation.
        """

        #report process affinities
        print "Current process affinities (experiment proc, ioHub proc):", Computer.getProcessAffinities()

        # create 'shortcuts' to the devices of interest for this experiment
        self.mouse=self.hub.devices.mouse
        self.kb=self.hub.devices.kb
        self.expRuntime=self.hub.devices.experimentRuntime
        self.display=self.hub.devices.display


        # let's print out the public method names for each device type for fun.
        #print "ExperimentPCkeyboard methods:",self.kb.getDeviceInterface()
        #print "ExperimentPCmouse methods:",self.mouse.getDeviceInterface()
        #print "ExperimentRuntime methods:",self.expRuntime.getDeviceInterface()
        #print "Display methods:",self.display.getDeviceInterface()

        # create fullscreen pyglet window at current resolution, as well as required resources / drawings
        self.createPsychoGraphicsWindow()

        # create stats numpy arrays, set experiment process to high priority.
        self.initStats()

        # enable high priority mode for the experiment process
        Computer.enableHighPriority()

        #draw and flip to the updated graphics state.
        ifi=self.drawAndFlipPsychoWindow()

        # START TEST LOOP >>>>>>>>>>>>>>>>>>>>>>>>>>

        while self.numEventRequests < self.totalEventRequestsForTest:
            # send an Experiment Event to the ioHub server process
            self.hub.sendMessageEvent("This is a test message %.3f"%self.flipTime)

            # check for any new events from any of the devices, and return the events list and the time it took to
            # request the events and receive the reply
            self.events,callDuration=self.checkForEvents()
            if self.events:
                # events were available
                self.updateStats(self.events, callDuration, ifi)
                #draw and flip to the updated graphics state.

            ifi=self.drawAndFlipPsychoWindow()

        # END TEST LOOP <<<<<<<<<<<<<<<<<<<<<<<<<<

        # close necessary files / objects, disable high priority.
        self.spinDownTest()

        # plot collected delay and retrace detection results.
        self.plotResults()

    def createPsychoGraphicsWindow(self):
        #create a window
        self.psychoWindow = FullScreenWindow(self.display)
        
        currentPosition=self.mouse.setPosition((0,0))
        self.mouse.setSystemCursorVisibility(False)

        self.instructionText2Pattern='%d'

        self.psychoStim['grating'] = visual.PatchStim(self.psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        self.psychoStim['fixation'] = visual.PatchStim(self.psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        self.psychoStim['title'] = visual.TextStim(win=self.psychoWindow, text="ioHub getEvents Delay Test", pos = [0,125], height=36, color=[1,.5,0], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['instructions'] = visual.TextStim(win=self.psychoWindow, text='Move the mouse around, press keyboard keys and mouse buttons', pos = [0,-125], height=32, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['instructions2'] = visual.TextStim(win=self.psychoWindow, text=self.instructionText2Pattern%(self.totalEventRequestsForTest,), pos = [0,-250],  color=[-1,-1,-1], height=32, colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['keytext'] = visual.TextStim(win=self.psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='left',wrapWidth=800.0)
        self.psychoStim['mouseDot'] = visual.GratingStim(win=self.psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')


    def drawAndFlipPsychoWindow(self):
        self.psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle
        currentPosition,currentDisplayIndex=self.mouse.getPosition(return_display_index=True)
        
        if currentDisplayIndex == self.display.getIndex():       
            currentPosition=(float(currentPosition[0]),float(currentPosition[1]))
            self.psychoStim['mouseDot'].setPos(currentPosition)


        if self.events:
            self.psychoStim['instructions2'].setText(self.instructionText2Pattern%(self.totalEventRequestsForTest-self.numEventRequests,))

            for r in self.events:
                if r.type is EventConstants.KEYBOARD_PRESS: #keypress code
                    self.psychoStim['keytext'].setText(r.key.decode('utf-8'))

            self.events=None

        [self.psychoStim[skey].draw() for skey in self.psychoStim]

        self.flipTime=self.psychoWindow.flip()
        d=self.flipTime-self.lastFlipTime
        self.lastFlipTime=self.flipTime
        return d

    def checkForEvents(self):
        # get the time we request events from the ioHub
        stime=Computer.currentTime()
        r = self.hub.getEvents()
        if r and len(r) > 0:
            # so there were events returned in the request, so include this getEvent request in the tally
            etime=Computer.currentTime()
            dur=etime-stime
            return r, dur*1000.0
        return None,None


    def initStats(self):
        if self.hub is None:
            print "Error: ioHub must be enabled to run the testEventRetrievalTiming test."
            return

        # Init Results numpy array
        self.results= zeros((self.totalEventRequestsForTest,3),dtype='f4')

        self.numEventRequests=0
        self.flipTime=0.0
        self.lastFlipTime=0.0

        # clear the ioHub event Buffer before starting the test.
        # This is VERY IMPORTANT, given an existing bug in ioHub.
        # You would want to do this before each trial started until the bug is fixed.
        self.hub.clearEvents('all')

    def updateStats(self, events, duration, ifi):
        self.results[self.numEventRequests][0]=duration     # ctime it took to get events from ioHub
        self.results[self.numEventRequests][1]=len(events)  # number of events returned
        self.results[self.numEventRequests][2]=ifi*1000.0   # calculating inter flip interval.
        self.numEventRequests+=1                            # incrementing tally counterfgh


    def spinDownTest(self):
        # OK, we have collected the number of requested getEvents, that have returned >0 events
        # so _close psychopy window
        self.psychoWindow.close()

        # disable high priority in both processes
        Computer.disableHighPriority()


    def plotResults(self):
        #### calculate stats on collected data and draw some plots ####
        import matplotlib.mlab as mlab
        from matplotlib.pyplot import axis, title, xlabel, hist, grid, show, ylabel, plot
        import pylab

        results= self.results

        durations=results[:,0]
        flips=results[1:,2]

        dmin=durations.min()
        dmax=durations.max()
        dmean=durations.mean()
        dstd=durations.std()

        fmean=flips.mean()
        fstd=flips.std()

        pylab.figure(figsize=[30,10])
        pylab.subplot(1,3,1)

        # the histogram of the delay data
        n, bins, patches = hist(durations, 50, normed=True, facecolor='blue', alpha=0.75)
        # add a 'best fit' line
        y = mlab.normpdf( bins, dmean, dstd)
        plot(bins, y, 'r--', linewidth=1)
        xlabel('ioHub getEvents Delay')
        ylabel('Percentage')
        title('$\mathrm{{Histogram\ of\ Delay:}}\ \min={0},\ \max={1},\ \mu={2},\ \sigma={3}$'.format(
                dmin, dmax, dmean, dstd))
        axis([0, dmax+1.0, 0, 25.0])
        grid(True)


        # graphs of the retrace data ( taken from retrace example in psychopy demos folder)
        intervalsMS = flips
        m=fmean
        sd=fstd
        distString= "Mean={0:.1f}ms,    s.d.={1:.1f},    99%CI={2:.1f}-{3:.1f}".format(m, sd, m - 3 * sd, m + 3 * sd)
        nTotal=len(intervalsMS)
        nDropped=sum(intervalsMS>(1.5*m))
        droppedString = "Dropped/Frames = {0:d}/{1:d} = {2}%".format(nDropped, nTotal, int(nDropped) / float(nTotal))

        pylab.subplot(1,3,2)

        #plot the frameintervals
        pylab.plot(intervalsMS, '-')
        pylab.ylabel('t (ms)')
        pylab.xlabel('frame N')
        pylab.title(droppedString)

        pylab.subplot(1,3,3)
        pylab.hist(intervalsMS, 50, normed=0, histtype='stepfilled')
        pylab.xlabel('t (ms)')
        pylab.ylabel('n frames')
        pylab.title(distString)

        show()
Example #10
0
    def run(self, *args, **kwargs):
        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        # using getDevice() returns None if the device is not found,
        tracker = self.hub.getDevice("tracker")

        display = self.devices.display
        kb = self.devices.kb
        mouse = self.devices.mouse

        if tracker is None:
            print "EyeTracker Device cdid not load."
            # return 0

        # get the experiment condition variable excel file to use.
        fdialog = FileDialog(
            message="Select a Condition Variable File",
            defaultDir=self.paths.CONDITION_FILES.getPath(),
            defaultFile="",
            openFile=True,
            allowMultipleSelections=False,
            allowChangingDirectories=True,
            fileTypes=(FileDialog.EXCEL_FILES, FileDialog.ALL_FILES),
            display_index=display.getIndex(),
        )

        result, conditionVariablesFile = fdialog.show()
        fdialog.destroy()

        if result != FileDialog.OK_RESULT:
            print "User cancelled Condition Variable Selection... Exiting Experiment."
            return

        if conditionVariablesFile:
            conditionVariablesFile = conditionVariablesFile[0]

        # create a condition set provider
        self.conditionVariablesProvider = ExperimentVariableProvider(
            conditionVariablesFile,
            "BLOCK_LABEL",
            practiceBlockValues="PRACTICE",
            randomizeBlocks=False,
            randomizeTrials=True,
        )

        # initialize (or create) a table in the ioDataStore to hold the condition variable data
        self.hub.initializeConditionVariableTable(self.conditionVariablesProvider)

        # Hide the 'system mouse cursor' so it does not bother us.
        mouse.setSystemCursorVisibility(False)

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no border, using the monitor
        # profile name 'test monitor', which is created on the fly right now by the script
        self.window = FullScreenWindow(display)

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen = ClearScreen(self)
        self.clearScreen.flip(text="EXPERIMENT_INIT")

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(ioHub.util.getCurrentDateTimeString()))
        self.clearScreen.sendMessage(
            "Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID, self.hub.experimentSessionID)
        )
        self.clearScreen.sendMessage(
            "Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
                display.getIndex(), display.getPixelResolution(), display.getCoordinateType()
            )
        )
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # screen for showing text and waiting for a keyboard response or something
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
        self.instructionScreen = InstructionScreen(
            self, "Press Space Key when Ready to Start Experiment.", dtrigger, 5 * 60
        )

        # screen state used during the data collection / runtime of the experiment to move the
        # target from one point to another.
        self.targetScreen = TargetScreen(self)

        xyEventTrigs = [
            DeviceEventTrigger(
                kb, EventConstants.KEYBOARD_PRESS, {"key": "F1"}, self.targetScreen.toggleDynamicStimVisibility
            )
        ]
        if tracker:
            self.targetScreen.dynamicStimPositionFuncPtr = tracker.getLastGazePosition
            msampleTrig = DeviceEventTrigger(
                tracker, EventConstants.MONOCULAR_EYE_SAMPLE, {}, self.targetScreen.setDynamicStimPosition
            )
            bsampleTrig = DeviceEventTrigger(
                tracker, EventConstants.BINOCULAR_EYE_SAMPLE, {}, self.targetScreen.setDynamicStimPosition
            )
            xyEventTrigs.extend([msampleTrig, bsampleTrig])
        else:
            self.targetScreen.dynamicStimPositionFuncPtr = mouse.getPosition
            msampleTrig = DeviceEventTrigger(
                mouse, EventConstants.MOUSE_MOVE, {}, self.targetScreen.setDynamicStimPosition
            )
            xyEventTrigs.append(msampleTrig)

        # setup keyboard event hook on target screen state
        # to catch any press space bar events for responses to color changes.

        dtrigger = DeviceEventTrigger(
            kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"}, self._spaceKeyPressedDuringTargetState
        )
        xyEventTrigs.append(dtrigger)
        self.targetScreen.setEventTriggers(xyEventTrigs)

        # set all screen states background color to the first screen background color in the Excel file
        # i.e. the SCREEN_COLOR column
        displayColor = tuple(self.conditionVariablesProvider.getData()[0]["SCREEN_COLOR"])
        self.clearScreen.setScreenColor(displayColor)
        self.instructionScreen.setScreenColor(displayColor)
        self.targetScreen.setScreenColor(displayColor)

        # clear the display a few times to be sure front and back buffers are clean.
        self.clearScreen.flip()

        self.hub.clearEvents("all")

        # show the opening instruction screen, clearing events so events pre display of the
        # screen state change are not picked up by the event monitoring. This is the default,
        # so you can just call .switchTo() if you want all events cleared right after the flip
        # returns. If you 'do not' want events cleared, use .switchTo(False)
        #
        flip_time, time_since_flip, event = self.instructionScreen.switchTo(clearEvents=True, msg="EXPERIMENT_START")

        self.clearScreen.flip(text="PRACTICE_BLOCKS_START")
        # Run Practice Blocks
        self.runBlockSet(self.conditionVariablesProvider.getPracticeBlocks())
        self.clearScreen.flip(text="PRACTICE_BLOCKS_END")

        # Run Experiment Blocks
        self.clearScreen.flip(text="EXPERIMENT_BLOCKS_START")
        self.runBlockSet(self.conditionVariablesProvider.getExperimentBlocks())
        self.clearScreen.flip(text="EXPERIMENT_BLOCKS_END")

        # show the 'thanks for participating screen'
        self.instructionScreen.setText("Experiment Complete. Thank you for Participating.")
        self.instructionScreen.setTimeout(10 * 60)  # 10 minute timeout
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
        self.instructionScreen.setEventTriggers(dtrigger)
        flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="EXPERIMENT_END")

        # close the psychopy window
        self.window.close()

        # Done Experiment close the tracker connection if it is open.

        if tracker:
            tracker.setConnectionState(False)
Example #11
0
class ExperimentRuntime(ioHubExperimentRuntime):
    HORZ_SCALING = 0.9
    VERT_SCALING = 0.9
    HORZ_POS_COUNT = 7
    VERT_POS_COUNT = 7
    RANDOMIZE_TRIALS = True

    def __init__(self, configFileDirectory, configFile):
        ioHubExperimentRuntime.__init__(self, configFileDirectory, configFile)

    def run(self, *args, **kwargs):
        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        # using getDevice() returns None if the device is not found,
        tracker = self.hub.getDevice("tracker")

        display = self.devices.display
        kb = self.devices.kb
        mouse = self.devices.mouse

        if tracker is None:
            print "EyeTracker Device cdid not load."
            # return 0

        # get the experiment condition variable excel file to use.
        fdialog = FileDialog(
            message="Select a Condition Variable File",
            defaultDir=self.paths.CONDITION_FILES.getPath(),
            defaultFile="",
            openFile=True,
            allowMultipleSelections=False,
            allowChangingDirectories=True,
            fileTypes=(FileDialog.EXCEL_FILES, FileDialog.ALL_FILES),
            display_index=display.getIndex(),
        )

        result, conditionVariablesFile = fdialog.show()
        fdialog.destroy()

        if result != FileDialog.OK_RESULT:
            print "User cancelled Condition Variable Selection... Exiting Experiment."
            return

        if conditionVariablesFile:
            conditionVariablesFile = conditionVariablesFile[0]

        # create a condition set provider
        self.conditionVariablesProvider = ExperimentVariableProvider(
            conditionVariablesFile,
            "BLOCK_LABEL",
            practiceBlockValues="PRACTICE",
            randomizeBlocks=False,
            randomizeTrials=True,
        )

        # initialize (or create) a table in the ioDataStore to hold the condition variable data
        self.hub.initializeConditionVariableTable(self.conditionVariablesProvider)

        # Hide the 'system mouse cursor' so it does not bother us.
        mouse.setSystemCursorVisibility(False)

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no border, using the monitor
        # profile name 'test monitor', which is created on the fly right now by the script
        self.window = FullScreenWindow(display)

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen = ClearScreen(self)
        self.clearScreen.flip(text="EXPERIMENT_INIT")

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(ioHub.util.getCurrentDateTimeString()))
        self.clearScreen.sendMessage(
            "Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID, self.hub.experimentSessionID)
        )
        self.clearScreen.sendMessage(
            "Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
                display.getIndex(), display.getPixelResolution(), display.getCoordinateType()
            )
        )
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # screen for showing text and waiting for a keyboard response or something
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
        self.instructionScreen = InstructionScreen(
            self, "Press Space Key when Ready to Start Experiment.", dtrigger, 5 * 60
        )

        # screen state used during the data collection / runtime of the experiment to move the
        # target from one point to another.
        self.targetScreen = TargetScreen(self)

        xyEventTrigs = [
            DeviceEventTrigger(
                kb, EventConstants.KEYBOARD_PRESS, {"key": "F1"}, self.targetScreen.toggleDynamicStimVisibility
            )
        ]
        if tracker:
            self.targetScreen.dynamicStimPositionFuncPtr = tracker.getLastGazePosition
            msampleTrig = DeviceEventTrigger(
                tracker, EventConstants.MONOCULAR_EYE_SAMPLE, {}, self.targetScreen.setDynamicStimPosition
            )
            bsampleTrig = DeviceEventTrigger(
                tracker, EventConstants.BINOCULAR_EYE_SAMPLE, {}, self.targetScreen.setDynamicStimPosition
            )
            xyEventTrigs.extend([msampleTrig, bsampleTrig])
        else:
            self.targetScreen.dynamicStimPositionFuncPtr = mouse.getPosition
            msampleTrig = DeviceEventTrigger(
                mouse, EventConstants.MOUSE_MOVE, {}, self.targetScreen.setDynamicStimPosition
            )
            xyEventTrigs.append(msampleTrig)

        # setup keyboard event hook on target screen state
        # to catch any press space bar events for responses to color changes.

        dtrigger = DeviceEventTrigger(
            kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"}, self._spaceKeyPressedDuringTargetState
        )
        xyEventTrigs.append(dtrigger)
        self.targetScreen.setEventTriggers(xyEventTrigs)

        # set all screen states background color to the first screen background color in the Excel file
        # i.e. the SCREEN_COLOR column
        displayColor = tuple(self.conditionVariablesProvider.getData()[0]["SCREEN_COLOR"])
        self.clearScreen.setScreenColor(displayColor)
        self.instructionScreen.setScreenColor(displayColor)
        self.targetScreen.setScreenColor(displayColor)

        # clear the display a few times to be sure front and back buffers are clean.
        self.clearScreen.flip()

        self.hub.clearEvents("all")

        # show the opening instruction screen, clearing events so events pre display of the
        # screen state change are not picked up by the event monitoring. This is the default,
        # so you can just call .switchTo() if you want all events cleared right after the flip
        # returns. If you 'do not' want events cleared, use .switchTo(False)
        #
        flip_time, time_since_flip, event = self.instructionScreen.switchTo(clearEvents=True, msg="EXPERIMENT_START")

        self.clearScreen.flip(text="PRACTICE_BLOCKS_START")
        # Run Practice Blocks
        self.runBlockSet(self.conditionVariablesProvider.getPracticeBlocks())
        self.clearScreen.flip(text="PRACTICE_BLOCKS_END")

        # Run Experiment Blocks
        self.clearScreen.flip(text="EXPERIMENT_BLOCKS_START")
        self.runBlockSet(self.conditionVariablesProvider.getExperimentBlocks())
        self.clearScreen.flip(text="EXPERIMENT_BLOCKS_END")

        # show the 'thanks for participating screen'
        self.instructionScreen.setText("Experiment Complete. Thank you for Participating.")
        self.instructionScreen.setTimeout(10 * 60)  # 10 minute timeout
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
        self.instructionScreen.setEventTriggers(dtrigger)
        flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="EXPERIMENT_END")

        # close the psychopy window
        self.window.close()

        # Done Experiment close the tracker connection if it is open.

        if tracker:
            tracker.setConnectionState(False)

        ### End of experiment logic

    # Called by the run() method to perform a sequence of blocks in the experiment.
    # So this method has the guts of the experiment logic.
    # This method is called once to run any practice blocks, and once to run the experimental blocks.
    #
    def runBlockSet(self, blockSet):
        # using getDevice() returns None if the device is not found,
        tracker = self.hub.getDevice("tracker")

        daq = self.hub.getDevice("daq")

        # using self.devices.xxxxx raises an exception if the
        # device is not present
        kb = self.devices.kb
        display = self.devices.display

        # for each block in the group of blocks.....
        for trialSet in blockSet.getNextConditionSet():
            # if an eye tracker is connected,
            if tracker:
                self.instructionScreen.setTimeout(30 * 60.0)  # 30 minute timeout, long enough for a break if needed.
                dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": ["RETURN", "ESCAPE"]})
                self.instructionScreen.setEventTriggers(dtrigger)
                self.instructionScreen.setText(
                    "Press 'Enter' to go to eye tracker Calibration mode.\n\nTo skip calibration and start Data Recording press 'Escape'"
                )
                flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="CALIBRATION_SELECT")
                if event and event.key == "RETURN":
                    runEyeTrackerSetupAndCalibration(tracker, self.window)
                elif event and event.key == "ESCAPE":
                    print "** Calibration stage skipped for block ", blockSet.getCurrentConditionSetIteration()
                else:
                    print "** Time out occurred. Entering calibration mode to play it safe. ;)"
                    runEyeTrackerSetupAndCalibration(tracker, self.window)

            dres = display.getPixelResolution()
            # right now, target positions are automatically generated based on point grid size, screen size, and a scaling factor (a gain).
            TARGET_POSITIONS = generatedPointGrid(
                dres[0], dres[1], self.HORZ_SCALING, self.VERT_SCALING, self.HORZ_POS_COUNT, self.VERT_POS_COUNT
            )

            # indexes to display the condition variable order in start out 'non' randomized.
            RAND_INDEXES = np.arange(TARGET_POSITIONS.shape[0])

            # if conditionVariablesProvider was told to randomize trials, then randomize trial index access list.
            if self.conditionVariablesProvider.randomizeTrials is True:
                self.hub.sendMessageEvent(
                    "RAND SEED = {0}".format(ExperimentVariableProvider._randomGeneratorSeed),
                    sec_time=ExperimentVariableProvider._randomGeneratorSeed / 1000.0,
                )
                np.random.shuffle(RAND_INDEXES)

            dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
            self.instructionScreen.setEventTriggers(dtrigger)
            self.instructionScreen.setText(
                "Press 'Space' key when Ready to Start Block %d" % (blockSet.getCurrentConditionSetIteration())
            )
            flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="BLOCK_START")

            # enable high priority for the experiment process only. Not sure this is necessary, or a good idea,
            # based on tests so far frankly. Running at std priority seems to usually be just fine.
            Computer.enableRealTimePriority(True)

            # if we have a tracker, start recording.......
            if tracker:
                tracker.setRecordingState(True)

            # delay a short time to let " the data start flow'in "
            self.hub.wait(0.050)

            # In this paradigm, each 'trial' is the movement from one target location to another.
            # Recording of eye data is on for the whole block of XxY target positions within the block.
            # A rough outline of the runtime / data collection portion of a block is as follows:
            #      a) Start each block with the target at screen center.
            #      b) Wait sec.msec duration after showing the target [ column PRE_POS_CHANGE_INTERVAL ] in excel file
            #      c) Then schedule move of target to next target position at the time of the next retrace.
            #      d) Once the Target has moved to the 2nd position for the trial, wait PRE_COLOR_CHANGE_INTERVAL
            #         sec.msec before 'possibly changing the color of the center of the target. The new color is
            #         determined by the FP_INNER_COLOR2 column. If no color change is wanted, simply make this color
            #         equal to the color of the target center in column FP_INNER_COLOR for that row of the spreadsheet.
            #      e) Once the target has been redrawn (either with or without a color change, it stays in position for
            #         another POST_COLOR_CHANGE_INTERVAL sec.msec. Since ioHub is being used, all keyboard activity
            #         is being recorded to the ioDataStore file, so there is no need really to 'monitor' for
            #         the participants key presses, since we do not use it for feedback. It can be retrieved from the
            #         data file for analysis post hoc.
            #      f) After the POST_COLOR_CHANGE_INTERVAL, the current 'trial' officially ends, and the next trial
            #         starts, with the target remaining in the position it was at in the end of the last trial, but
            #         with the target center color switching to FP_INNER_COLOR.
            #      g) Then the sequence from b) starts again for the number of target positions in the block
            #        (49 currently).
            #

            self.hub.clearEvents("all")

            self._TRIAL_STATE = None
            self.targetScreen.nextAreaOfInterest = None

            for trial in trialSet.getNextConditionSet():
                currentTrialIndex = trialSet.getCurrentConditionSetIndex()

                nextTargetPosition = TARGET_POSITIONS[currentTrialIndex]
                trial["FP_X"] = nextTargetPosition[0]
                trial["FP_Y"] = nextTargetPosition[1]

                ppd_x, ppd_y = self.devices.display.getPixelsPerDegree()

                fp_outer_radius = int(trial["FP_OUTER_RADIUS"] * ppd_x), int(trial["FP_OUTER_RADIUS"] * ppd_y)
                fp_inner_radius = int(trial["FP_INNER_RADIUS"] * ppd_x), int(trial["FP_INNER_RADIUS"] * ppd_y)

                self.targetScreen.setScreenColor(tuple(trial["SCREEN_COLOR"]))
                self.targetScreen.setTargetOuterColor(tuple(trial["FP_OUTER_COLOR"]))
                self.targetScreen.setTargetInnerColor(tuple(trial["FP_INNER_COLOR"]))
                self.targetScreen.setTargetOuterSize(fp_outer_radius)
                self.targetScreen.setTargetInnerSize(fp_inner_radius)

                self.hub.clearEvents("kb")

                self.targetScreen.setTimeout(trial["PRE_POS_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_PRE_POS_CHANGE_KEY"
                target_pos1_color1_time, time_since_flip, event = self.targetScreen.switchTo(
                    msg="TRIAL_TARGET_INITIAL_COLOR"
                )

                self.targetScreen.setTargetPosition(nextTargetPosition)
                self.targetScreen.setTimeout(trial["PRE_COLOR_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_POST_POS_CHANGE_KEY"

                # create a 3 degree circular region (1.5 degree radius) around the next target position
                # for use as out invisible boundary
                self.targetScreen.nextAreaOfInterest = Point(*nextTargetPosition).buffer(((ppd_x + ppd_y) / 2.0) * 1.5)

                target_pos2_color1_time, time_since_flip, event = self.targetScreen.switchTo(msg="TRIAL_TARGET_MOVE")

                self.targetScreen.setTargetInnerColor(tuple(trial["FP_INNER_COLOR2"]))
                self.targetScreen.setTimeout(trial["POST_COLOR_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_POST_COLOR_CHANGE_KEY"
                target_pos2_color2_time, time_since_flip, event = self.targetScreen.switchTo(
                    msg="TRIAL_TARGET_COLOR_TWO"
                )

                # end of 'trial sequence'
                # send condition variables used / populated to ioDataStore
                toSend = [self.hub.experimentSessionID, trialSet.getCurrentConditionSetIteration()]
                trial["TSTART_TIME"] = target_pos1_color1_time
                trial["APPROX_TEND_TIME"] = target_pos2_color2_time + time_since_flip
                trial["target_pos1_color1_time"] = target_pos1_color1_time
                trial["target_pos2_color1_time"] = target_pos2_color1_time
                trial["target_pos2_color2_time"] = target_pos2_color2_time

                if self.targetScreen.aoiTriggeredID:
                    trial["VOG_SAMPLE_ID_AOI_TRIGGER"] = self.targetScreen.aoiTriggeredID
                    trial["VOG_SAMPLE_TIME_AOI_TRIGGER"] = self.targetScreen.aoiTriggeredTime
                if self.targetScreen.aoiBestGaze:
                    trial["BEST_GAZE_X"] = self.targetScreen.aoiBestGaze[0]
                    trial["BEST_GAZE_Y"] = self.targetScreen.aoiBestGaze[1]

                self._TRIAL_STATE = None
                if self.targetScreen.nextAreaOfInterest:
                    del self.targetScreen.nextAreaOfInterest
                    self.targetScreen.nextAreaOfInterest = None

                toSend.extend(trial.tolist())
                self.hub.addRowToConditionVariableTable(toSend)

            # end of block of trials, clear screen
            self.clearScreen.flip(text="BLOCK_END")

            self._TRIAL_STATE = None

            # if tracking eye position, turn off eye tracking.
            if tracker:
                tracker.setRecordingState(False)
            if daq:
                daq.enableEventReporting(False)

            # turn off high priority so python GC can clean up if it needs to.
            Computer.disableHighPriority()

            # give a 100 msec delay before starting next block
            self.hub.wait(0.100)

        # end of block set, return from method.
        self.clearScreen.flip(text="BLOCK_SET_END")
        return True

    def _spaceKeyPressedDuringTargetState(self, flipTime, stateDuration, event):
        if self._TRIAL_STATE:
            trial, column_name = self._TRIAL_STATE
            if trial[column_name] <= -100:  # no RT has been registered yet
                trial[column_name] = event.time - flipTime
        return False
Example #12
0
    def __init__(self, eyetrackerInterface, targetForegroundColor=None, 
                 targetBackgroundColor=None, screenColor=None, 
                 targetOuterDiameter=None, targetInnerDiameter=None,
                 calibrationPointList=None):
        self._eyetrackerinterface=eyetrackerInterface
        self._tobii = eyetrackerInterface._tobii._eyetracker
        self.screenSize = eyetrackerInterface._display_device.getPixelResolution()
        self.width=self.screenSize[0]
        self.height=self.screenSize[1]
        self._ioKeyboard=None

        self._msg_queue=Queue.Queue()
        self._lastCalibrationOK=False
        self._lastCalibrationReturnCode=0
        self._lastCalibration=None
        
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_OUTER_COLOR=targetForegroundColor
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_INNER_COLOR=targetBackgroundColor
        TobiiPsychopyCalibrationGraphics.WINDOW_BACKGROUND_COLOR=screenColor
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_OUTER_RADIUS=targetOuterDiameter/2.0,targetOuterDiameter/2.0
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_INNER_RADIUS=targetInnerDiameter/2.0,targetInnerDiameter/2.0


        if calibrationPointList is not None:
            TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=calibrationPointList

        calibration_methods = dict(THREE_POINTS=3,
                                   FIVE_POINTS=5, 
                                   NINE_POINTS=9, 
                                   THIRTEEN_POINTS=13)

        cal_type=self._eyetrackerinterface.getConfiguration()['calibration']['type']

        if cal_type in calibration_methods:
            num_points=calibration_methods[cal_type]
            
            if num_points == 3:
                TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=[(0.5,0.1),
                                                                         (0.1,0.9),
                                                                         (0.9,0.9),
                                                                         (0.5,0.1)]
            elif num_points == 9:
                TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=[(0.5, 0.5),
                                                                         (0.1, 0.5),
                                                                         (0.9, 0.5),
                                                                         (0.1, 0.1),
                                                                         (0.5, 0.1),
                                                                         (0.9, 0.1),
                                                                         (0.9, 0.9),
                                                                         (0.5, 0.9),
                                                                         (0.1, 0.9),
                                                                         (0.5, 0.5)]
#            elif num_points == 13:
#                TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=[(x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y)]

        self.window = FullScreenWindow(self._eyetrackerinterface._display_device)
        self.window.setColor(self.WINDOW_BACKGROUND_COLOR,'rgb255')        
        self.window.flip(clearBuffer=True)
        
        self._createStim()        
        self._registerEventMonitors()
        self._lastMsgPumpTime=currentTime()
        
        self.clearAllEventBuffers()
Example #13
0
class TobiiPsychopyCalibrationGraphics(object):
    IOHUB_HEARTBEAT_INTERVAL=0.050   # seconds between forced run through of
                                     # micro threads, since one is blocking
                                     # on camera setup.
    WINDOW_BACKGROUND_COLOR=(128,128,128)
    CALIBRATION_POINT_OUTER_RADIUS=15.0,15.0
    CALIBRATION_POINT_OUTER_EDGE_COUNT=64
    CALIBRATION_POINT_OUTER_COLOR=(255,255,255)
    CALIBRATION_POINT_INNER_RADIUS=3.0,3.0
    CALIBRATION_POINT_INNER_EDGE_COUNT=32
    CALIBRATION_POINT_INNER_COLOR=(25,25,25)
    CALIBRATION_POINT_LIST=[(0.5, 0.5),(0.1, 0.1),(0.9, 0.1),(0.9, 0.9),(0.1, 0.9),(0.5, 0.5)]

    TEXT_POS=[0,0]
    TEXT_COLOR=[0,0,0]
    TEXT_HEIGHT=48
    
    def __init__(self, eyetrackerInterface, targetForegroundColor=None, 
                 targetBackgroundColor=None, screenColor=None, 
                 targetOuterDiameter=None, targetInnerDiameter=None,
                 calibrationPointList=None):
        self._eyetrackerinterface=eyetrackerInterface
        self._tobii = eyetrackerInterface._tobii._eyetracker
        self.screenSize = eyetrackerInterface._display_device.getPixelResolution()
        self.width=self.screenSize[0]
        self.height=self.screenSize[1]
        self._ioKeyboard=None

        self._msg_queue=Queue.Queue()
        self._lastCalibrationOK=False
        self._lastCalibrationReturnCode=0
        self._lastCalibration=None
        
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_OUTER_COLOR=targetForegroundColor
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_INNER_COLOR=targetBackgroundColor
        TobiiPsychopyCalibrationGraphics.WINDOW_BACKGROUND_COLOR=screenColor
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_OUTER_RADIUS=targetOuterDiameter/2.0,targetOuterDiameter/2.0
        TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_INNER_RADIUS=targetInnerDiameter/2.0,targetInnerDiameter/2.0


        if calibrationPointList is not None:
            TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=calibrationPointList

        calibration_methods = dict(THREE_POINTS=3,
                                   FIVE_POINTS=5, 
                                   NINE_POINTS=9, 
                                   THIRTEEN_POINTS=13)

        cal_type=self._eyetrackerinterface.getConfiguration()['calibration']['type']

        if cal_type in calibration_methods:
            num_points=calibration_methods[cal_type]
            
            if num_points == 3:
                TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=[(0.5,0.1),
                                                                         (0.1,0.9),
                                                                         (0.9,0.9),
                                                                         (0.5,0.1)]
            elif num_points == 9:
                TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=[(0.5, 0.5),
                                                                         (0.1, 0.5),
                                                                         (0.9, 0.5),
                                                                         (0.1, 0.1),
                                                                         (0.5, 0.1),
                                                                         (0.9, 0.1),
                                                                         (0.9, 0.9),
                                                                         (0.5, 0.9),
                                                                         (0.1, 0.9),
                                                                         (0.5, 0.5)]
#            elif num_points == 13:
#                TobiiPsychopyCalibrationGraphics.CALIBRATION_POINT_LIST=[(x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y),
#                                                                         (x,y)]

        self.window = FullScreenWindow(self._eyetrackerinterface._display_device)
        self.window.setColor(self.WINDOW_BACKGROUND_COLOR,'rgb255')        
        self.window.flip(clearBuffer=True)
        
        self._createStim()        
        self._registerEventMonitors()
        self._lastMsgPumpTime=currentTime()
        
        self.clearAllEventBuffers()

    def clearAllEventBuffers(self):
        self._eyetrackerinterface._iohub_server.eventBuffer.clear()
        for d in self._eyetrackerinterface._iohub_server.devices:
            d.clearEvents()

    def _registerEventMonitors(self):
        if self._eyetrackerinterface._iohub_server:
            for dev in self._eyetrackerinterface._iohub_server.devices:
                #ioHub.print2err("dev: ",dev.__class__.__name__)
                if dev.__class__.__name__ == 'Keyboard':
                    kbDevice=dev

        if kbDevice:
            eventIDs=[]
            for event_class_name in kbDevice.__class__.EVENT_CLASS_NAMES:
                eventIDs.append(getattr(EventConstants,convertCamelToSnake(event_class_name[:-5],False)))

            self._ioKeyboard=kbDevice
            self._ioKeyboard._addEventListener(self,eventIDs)
        else:
            ioHub.print2err("Warning: Tobii Cal GFX could not connect to Keyboard device for events.")

    def _unregisterEventMonitors(self):
        if self._ioKeyboard:
            self._ioKeyboard._removeEventListener(self)
     
    def _handleEvent(self,ioe):
        event_type_index=ioHub.devices.DeviceEvent.EVENT_TYPE_ID_INDEX
        if ioe[event_type_index] == EventConstants.KEYBOARD_CHAR:
            if ioe[-5] == 'SPACE':
                self._msg_queue.put("SPACE_KEY_ACTION")
                self.clearAllEventBuffers()
            if ioe[-5] == 'ESCAPE':
                self._msg_queue.put("QUIT")
                self.clearAllEventBuffers()

    def MsgPump(self):
        #keep the psychopy window happy ;)
        if currentTime()-self._lastMsgPumpTime>self.IOHUB_HEARTBEAT_INTERVAL:                
            # try to keep ioHub, being blocked. ;(
            if self._eyetrackerinterface._iohub_server:
                for dm in self._eyetrackerinterface._iohub_server.deviceMonitors:
                    dm.device._poll()
                self._eyetrackerinterface._iohub_server._processDeviceEventIteration()
            self._lastMsgPumpTime=currentTime()

    def getNextMsg(self):
        try:
            msg=self._msg_queue.get(block=True,timeout=0.02)
            self._msg_queue.task_done()
            return msg
        except Queue.Empty:
            pass

    def _createStim(self):                
        self.calibrationPointOUTER = visual.Circle(self.window,pos=(0,0) ,lineWidth=0.0,
                                                   radius=self.CALIBRATION_POINT_OUTER_RADIUS,
                                                   name='CP_OUTER', units='pix',opacity=1.0, 
                                                   interpolate=False)
        self.calibrationPointINNER = visual.Circle(self.window,pos=(0,0),
                                                   lineWidth=0.0, 
                                                   radius=self.CALIBRATION_POINT_INNER_RADIUS,
                                                   name='CP_INNER',units='pix',
                                                   opacity=1.0, interpolate=False)
        
        self.calibrationPointOUTER.setFillColor(self.CALIBRATION_POINT_OUTER_COLOR,'rgb255')
        self.calibrationPointOUTER.setLineColor(None,'rgb255')
        self.calibrationPointINNER.setFillColor(self.CALIBRATION_POINT_INNER_COLOR,'rgb255')
        self.calibrationPointINNER.setLineColor(None,'rgb255 ')

        instuction_text="Press Space Key to Start Eye Tracker Calibration."
        self.startCalibrationTextScreen=visual.TextStim(self.window, 
                                                        text=instuction_text, 
                                                        pos = self.TEXT_POS, 
                                                        height=self.TEXT_HEIGHT, 
                                                        color=self.TEXT_COLOR, 
                                                        colorSpace='rgb255',
                                                        alignHoriz='center',
                                                        alignVert='center',
                                                        wrapWidth=self.width*0.9)
        
    def runCalibration(self):
        """
        Performs a simple calibration routine. 
        
        Args: 
            None
        
        Result:
            bool: True if calibration was successful. False if not, in which case exit the application.            
        """
        import tobii

        self._lastCalibrationOK=False
        self._lastCalibrationReturnCode=0
        self._lastCalibration=None
        
        calibration_sequence_completed=False
        quit_calibration_notified=False
        

        instuction_text="Press Space Key to Start Eye Tracker Calibration."
        self.startCalibrationTextScreen.setText(instuction_text)
        
        self.startCalibrationTextScreen.draw()
        self.window.flip()
        
        self.clearAllEventBuffers()
 
        stime=currentTime()
        while currentTime()-stime<60*5.0:
            msg=self.getNextMsg()
            if msg == 'SPACE_KEY_ACTION':
                break

            self.MsgPump()

        self.clearAllEventBuffers()


        auto_pace=self._eyetrackerinterface.getConfiguration()['calibration']['auto_pace']
        pacing_speed=self._eyetrackerinterface.getConfiguration()['calibration']['pacing_speed']

        randomize_points=self._eyetrackerinterface.getConfiguration()['calibration']['randomize']

        cal_target_list=self.CALIBRATION_POINT_LIST[1:-1]
        if randomize_points is True:
            import random
            random.seed(None)
            random.shuffle(cal_target_list)
            
        cal_target_list.insert(0,self.CALIBRATION_POINT_LIST[0])
        cal_target_list.append(self.CALIBRATION_POINT_LIST[-1])
        
        self._tobii.StartCalibration(self.on_start_calibration)   

        i=0
        for pt in cal_target_list:
            w,h=self.screenSize
            #ioHub.print2err("Screen Size: ",w," ",h)
            self.clearAllEventBuffers()
            pix_pt=int(w*pt[0]-w/2),int(h*(1.0-pt[1])-h/2)
            #ioHub.print2err( "Cal point Mapping: ",pt," == ",pix_pt)
            self.drawCalibrationTarget(pix_pt)
            self.clearAllEventBuffers()
            stime=currentTime()
            
            def waitingForNextTargetTime():
                return True
            
            if auto_pace is True:
                def waitingForNextTargetTime():
                    return currentTime()-stime<float(pacing_speed)
                
            while waitingForNextTargetTime():
                msg=self.getNextMsg()
                if msg == 'SPACE_KEY_ACTION':
                    break
                elif msg == 'QUIT':
                    quit_calibration_notified=True
                    
                self.MsgPump()
            
            if quit_calibration_notified:
                break
            
            pt2D=tobii.sdk.types.Point2D(pt[0],pt[1])
            self._tobii.AddCalibrationPoint(pt2D,self.on_add_calibration_point)
            time.sleep(0.5)            
            self.clearCalibrationWindow()
            self.clearAllEventBuffers()

            i+=1
            if i == len(cal_target_list):
                calibration_sequence_completed=True
        
        if calibration_sequence_completed:
            self._tobii.ComputeCalibration(self.on_compute_calibration)
 
            msg=1
            while msg not in ["CALIBRATION_COMPUTATION_COMPLETE","CALIBRATION_COMPUTATION_FAILED"]:        
                msg=self.getNextMsg()
            
        self._tobii.StopCalibration(self.on_stop_calibration)  
        msg=1
        while msg is not "CALIBRATION_FINISHED":        
            msg=self.getNextMsg()

        if self._lastCalibrationOK is True:
            self._tobii.GetCalibration(self.on_calibration_result)

            msg=1
            while msg is not "CALIBRATION_RESULT_RECEIVED":        
                msg=self.getNextMsg()
            
            cal_data_dict={}

            import math

            for cal_point_result in self._lastCalibration.plot_data:
                left_eye_data=cal_point_result.left.map_point
                left_eye_data=(left_eye_data.x*self.width,left_eye_data.y*self.height),cal_point_result.left.validity
                
                right_eye_data=cal_point_result.right.map_point
                right_eye_data=(right_eye_data.x*self.width,right_eye_data.y*self.height),cal_point_result.right.validity
                
                target_pos=cal_point_result.true_point.x*self.width,cal_point_result.true_point.y*self.height
                
                if target_pos not in cal_data_dict:
                    cal_data_dict[target_pos]=[]
                cal_data_dict[target_pos].append((left_eye_data,right_eye_data))

            cal_stats=dict()
            for (targ_x,targ_y),eye_cal_result_list in cal_data_dict.iteritems():
                left_stats=dict(pos_sample_count=0,invalid_sample_count=0,avg_err=0.0,min_err=100000.0,max_err=0.0)
                right_stats=dict(pos_sample_count=0,invalid_sample_count=0,avg_err=0.0,min_err=100000.0,max_err=0.0)
                
                for ((left_x,left_y),left_validity),((right_x,right_y),right_validity) in eye_cal_result_list:
                    left_stats['pos_sample_count']+=1.0
                    right_stats['pos_sample_count']+=1.0
                    
                    if left_validity==1:
                        x_err=targ_x-left_x
                        y_err=targ_y-left_y
                        left_err=math.sqrt(x_err*x_err+y_err*y_err)
                        if left_err<left_stats['min_err']:
                            left_stats['min_err']=left_err
                        elif left_err>left_stats['max_err']:
                            left_stats['max_err']=left_err
                        left_stats['avg_err']+=left_err
                    else:
                        left_stats['invalid_sample_count']+=1.0

                        
                    if right_validity==1:
                        x_err=targ_x-right_x
                        y_err=targ_y-right_y                        
                        right_err=math.sqrt(x_err*x_err+y_err*y_err)
                        if right_err<right_stats['min_err']:
                            right_stats['min_err']=right_err
                        elif right_err>right_stats['max_err']:
                            right_stats['max_err']=right_err
                        right_stats['avg_err']+=right_err
                    else:
                        right_stats['invalid_sample_count']+=1.0
                    
                if right_stats['invalid_sample_count']==0:
                    right_stats['valid_sample_percentage']=100.0
                else:
                    right_stats['valid_sample_percentage']=(1.0-right_stats['invalid_sample_count']/right_stats['pos_sample_count'])*100.0
                
                if left_stats['invalid_sample_count']==0:
                    left_stats['valid_sample_percentage']=100.0
                else:
                    left_stats['valid_sample_percentage']=(1.0-left_stats['invalid_sample_count']/left_stats['pos_sample_count'])*100.0
             
                if int(right_stats['pos_sample_count']-right_stats['invalid_sample_count'])>0:
                    right_stats['avg_err']=right_stats['avg_err']/(right_stats['pos_sample_count']-right_stats['invalid_sample_count'])
                else:
                    right_stats['avg_err']=-1.0
                    
                if int(left_stats['pos_sample_count']-left_stats['invalid_sample_count'])>0:
                    left_stats['avg_err']=left_stats['avg_err']/(left_stats['pos_sample_count']-left_stats['invalid_sample_count'])
                else:
                    left_stats['avg_err']=-1.0
               
                cal_stats[(targ_x,targ_y)]=dict(left=left_stats,right=right_stats)
            
            # TODO Use calibration stats to show graphical results of calibration
            
            instuction_text="Calibration Passed. PRESS 'SPACE' KEY TO CONTINUE."     
            self.startCalibrationTextScreen.setText(instuction_text)
            self.startCalibrationTextScreen.draw()
            self.window.flip()
            self.clearAllEventBuffers()
        
            while 1:
                msg=self.getNextMsg()
                if msg == 'SPACE_KEY_ACTION':
                    return True
                    
                self.MsgPump()

        if self._lastCalibrationOK is False:
            instuction_text="Calibration Failed. Options: SPACE: Re-run Calibration; ESCAPE: Exit Program"            
            self.startCalibrationTextScreen.setText(instuction_text)
            self.startCalibrationTextScreen.draw()
            self.window.flip()
            self.clearAllEventBuffers()
        
            while 1:
                msg=self.getNextMsg()
                if msg == 'SPACE_KEY_ACTION':
                    return self.runCalibration()
                elif msg == 'QUIT':
                    return False
                    
                self.MsgPump()
        
        return True
            
    def clearCalibrationWindow(self):
        self.window.flip(clearBuffer=True)
        
    def drawCalibrationTarget(self,tp):        
        self.calibrationPointOUTER.setPos(tp)            
        self.calibrationPointINNER.setPos(tp)            
        self.calibrationPointOUTER.draw()          
        self.calibrationPointINNER.draw()            
        self.window.flip(clearBuffer=True)
           
    def on_start_calibration(self,*args,**kwargs):
        #ioHub.print2err('on_start_calibration: ',args,kwargs)
        pass
    
    def on_add_calibration_point(self,*args,**kwargs):
        #ioHub.print2err('on_add_calibration_point: ',args,kwargs)
        self._msg_queue.put('DRAW_NEXT')

    def on_stop_calibration(self,*args,**kwargs):
        #ioHub.print2err('on_stop_calibration: ',args,kwargs)
        self._msg_queue.put("CALIBRATION_FINISHED")
        
    def on_compute_calibration(self,*args,**kwargs):
        self._lastCalibrationReturnCode=args[0]
        if self._lastCalibrationReturnCode!=0:
            ioHub.print2err("ERROR: Tobii Calibration Calculation Failed. Error code: {0}".format(self._lastCalibrationReturnCode))
            self._lastCalibrationOK=False
            self._msg_queue.put("CALIBRATION_COMPUTATION_FAILED")
            
        else:
            self._msg_queue.put("CALIBRATION_COMPUTATION_COMPLETE")
            self._lastCalibrationOK=True

    def on_calibration_result(self,*args,**kwargs):
        self._lastCalibration=args[1]
        self._msg_queue.put("CALIBRATION_RESULT_RECEIVED")
Example #14
0
File: run.py Project: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb
        ain=self.devices.ain
        
        # get the number of trials entered in the session dialog
        user_params=self.getSavedUserDefinedParameters()
        print 'user_params: ', user_params
        trial_count=int(user_params.get('trial_count',5))
           
        #Computer.enableHighPriority()

        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))

        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        # Create a psychopy window, full screen resolution, full screen mode
        psychoWindow = FullScreenWindow(display)
        
        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=150,pos=[0,0], sf=.075)

        psychoStim['title'] = visual.TextStim(win=psychoWindow, 
                              text="Analog Input Test. Trial 1 of %d"%(trial_count),
                              pos = [0,200], height=36, color=[1,.5,0], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        ai_values_string_proto="AI_0: %.3f\tAI_1: %.3f\tAI_2: %.3f\tAI_3: %.3f\t\nAI_4: %.3f\tAI_5: %.3f\tAI_6: %.3f\tAI_7: %.3f"
        ai_values=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
        psychoStim['analog_input_values'] = visual.TextStim(win=psychoWindow, 
                              text=ai_values_string_proto%ai_values,
                              pos = [0,-200], height=24, color=[1,1,0], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        psychoStim['instruction'] = visual.TextStim(win=psychoWindow, 
                              text="Press ESCAPE Key for Next Trial",
                              pos = [0,-300], height=36, color=[1,1,0.5], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        # Clear all events from the global and device level event buffers.
        self.hub.clearEvents('all')

        
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        for i in range(trial_count):        
            # Clear all events from the global and device level event buffers.
            psychoStim['title'].setText("Analog Input Test. Trial %d of %d"%(i+1,trial_count))
            self.hub.clearEvents('all')
            
            #start streamin AnalogInput data        
            ain.enableEventReporting(True)
            
            QUIT_TRIAL=False
            
            while QUIT_TRIAL is False:
    
                # for each loop, update the grating phase
                psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle
    
                # update analog input values to display
                analog_input_events=ain.getEvents()
                if analog_input_events:
                    event_count=len(analog_input_events)
                    event=analog_input_events[-1]
                    ai_values=(event.AI_0,event.AI_1,event.AI_2,event.AI_3,
                               event.AI_4,event.AI_5,event.AI_6,event.AI_7)
                    psychoStim['analog_input_values'].setText(ai_values_string_proto%ai_values)
    
                # redraw the stim
                [psychoStim[stimName].draw() for stimName in psychoStim]
    
                # flip the psychopy window buffers, so the stim changes you just made get displayed.
                psychoWindow.flip()
                # it is on this side of the call that you know the changes have been displayed, so you can
                # make a call to the ioHub time method and get the time of the flip, as the built in
                # time methods represent both experiment process and ioHub server process time.
                # Most times in ioHub are represented sec.msec format to match that of Psychopy.
                flip_time=Computer.currentSec()
    
                # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
                # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
                self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)
        
                # for each new keyboard char event, check if it matches one of the end example keys.
                for k in kb.getEvents(EventConstants.KEYBOARD_CHAR):
                    if k.key in ['ESCAPE', ]:
                        print 'Trial Quit key pressed: ',k.key,' for ',k.duration,' sec.'
                        QUIT_TRIAL=True

            
            # clear the screen
            psychoWindow.flip()
 
            # stop analog input recording
            ain.enableEventReporting(False)
                    
            # delay 1/4 second before next trial
            actualDelay=self.hub.delay(0.250)
    
        # wait 250 msec before ending the experiment
        actualDelay=self.hub.wait(0.250)
        print "Delay requested %.6f, actual delay %.6f, Diff: %.6f"%(0.250,actualDelay,actualDelay-0.250)

        # for fun, test getting a bunch of events at once, likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime=Computer.currentSec()
        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
Example #15
0
File: run.py Project: peircej/ioHub
DOT_COUNT=1000

# Example where ioHub does not use yaml config files specified by user.

import random
io=quickStartHubServer("exp_code","sess_%d"%(random.randint(1,10000)))

# By default, keyboard, mouse, and display devices are created if you
# do not pass any config info to the ioHubConnection class above.
display=io.devices.display
keyboard=io.devices.keyboard

# Create a psychopy window, full screen resolution, full screen mode, pix units,
# with no boarder, using the monitor default profile name used by ioHub,
# which is created on the fly right now by the script. (ioHubDefault)
myWin= FullScreenWindow(display)

#INITIALISE SOME STIMULI
dotPatch =visual.DotStim(myWin,
                        color=(1.0,1.0,1.0),
                        dir=270,
                        nDots=DOT_COUNT,
                        fieldShape='circle',
                        fieldPos=(0.0,0.0),
                        fieldSize=display.getPixelResolution(),
                        dotLife=5, #number of frames for each dot to be drawn
                        signalDots='same', #are the signal dots the 'same' on each frame? (see Scase et al)
                        noiseDots='direction', #do the noise dots follow random- 'walk', 'direction', or 'position'
                        speed=3.0,
                        coherence=90.0
                        )
Example #16
0
File: run.py Project: peircej/ioHub
    def run(self,*args,**kwargs):
        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        print "THIS DEMO REQUIRES A CONNECTED (WIRED OR WIRELESS) XBOX 360"
        print "GAMEPAD OR OTHER XINPUT COMPATIBLE DEVICE. DEVICE ALSO NEEDS TO "
        print " BE TURNED ON. ;) "

        print ""
        print "\tPRESS 'ESCAPE' KEY TO EXIT."
        print "\tPRESS 'b' KEY TO PRINT BATTERY INFO TO STDOUT."
        print "\tPRESS 'u' KEY TO PRINT CAPABILITIES INFO TO STDOUT."
        print "\tPRESS ANY OTHER KEY TO MAKE GAMEPAD *RUMBLE* FOR 1 SEC."


        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb
        gamepad=self.devices.gamepad


        # Read the current resolution of the monitors screen in pixels.
        # We will set our window size to match the current screen resolution and make it a full screen boarderless window.
        screen_resolution= display.getPixelResolution()


        # Create psychopy full screen window using the display device config.
        psychoWindow = FullScreenWindow(display)
        
        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))

        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        psychoStim['fixation'] =visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        psychoStim['mouseDot'] =visual.GratingStim(psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')
        psychoStim['text'] = visual.TextStim(psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)


        # Clear all events from the global event buffer, and from the keyboard event buffer.
        self.hub.clearEvents('all')

        QUIT_EXP=False
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        while QUIT_EXP is False:

            # read gamepad events and take the last one if any exist
            gpevents=gamepad.getEvents()
            if len(gpevents)>0:
                gpevents=gpevents[-1]

                ## Display pressed buttons
                #
                psychoStim['text'].setText(str([k for k,v in gpevents.buttons.iteritems() if v is True]))
                #
                ###

                # Use 2 finger triggers for fixation square position (so it will be at bottom left hand corner of screen
                # when the triggers are not presses
                #
                fixationX=self.normalizedValue2Pixel(gpevents.leftTrigger,screen_resolution[0], 0)
                fixationY=self.normalizedValue2Pixel(gpevents.rightTrigger,screen_resolution[1], 0)
                psychoStim['fixation'].setPos((fixationX,fixationY))
                #
                #####

                # Use the Right Thumb Stick for the purple gaussian  spot position
                #

                x,y,mag=gpevents.rightThumbStick # sticks are 3 item lists (x,y,magnitude)
                currentPosition[0]=self.normalizedValue2Pixel(x*mag,screen_resolution[0], -1)
                currentPosition[1]=self.normalizedValue2Pixel(y*mag,screen_resolution[1], -1)
                psychoStim['mouseDot'].setPos(currentPosition)
                #
                ###

            # for each loop, update the grating phase
            psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle

            # redraw stim
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            psychoWindow.flip()
            # it is on this side of the call that you know the changes have been displayed, so you can
            # make a call to one of the built-in time methods and get the event time of the flip, as the built in
            # time methods represent both experiment process and ioHub server process time.
            # Most times in ioHub are represented as unsigned 64 bit integers when they are saved, so using usec
            # as a timescale is appropriate.
            flip_time=Computer.currentSec()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)


            # for each new keyboard event, check if it matches one of the end example keys.
            for k in kb.getEvents():
                # key: the string representation of the key pressed, A-Z if a-zA-Z pressed, 0-9 if 0-9 pressed ect.
                #      To get the mapping from a key_id to a key string, use
                #
                #      key_string=EventConstants.IDToName(key_event['key_id'])
                #
                # char: the ascii char for the key pressed. This field factors in if shift was also pressed or not
                #       when the char was typed, so typing a 's' == char field of 's', while typing SHIFT+s == char
                #       field of 'S'. This is in contrast to the key field, which always returns upper case values
                #       regardless of shift value. If the character pressed is not an ascii printable character,
                #       this filed will print junk, hex, or who knows what else at this point.
                if k.key in ['ESCAPE',]:
                    print 'Quit key pressed: ',k.key
                    QUIT_EXP=True
                else:
                    if k.type == EventConstants.KEYBOARD_PRESS:
                        if k.key in['B','b']:
                            bat=gamepad.updateBatteryInformation()
                            print "Bat Update: ",bat
                            bat=gamepad.getLastReadBatteryInfo()
                            print "Bat Last Read: ",bat
                        elif k.key in['U','u']:
                            bat=gamepad.updateCapabilitiesInformation()
                            print "Cap Update: ",bat
                            bat=gamepad.getLastReadCapabilitiesInfo()
                            print "Cap Last Read: ",bat
                        else:
                            # rumble the pad , 50% low frequency motor,
                            # 25% high frequency motor, for 1 second.
                            r=gamepad.setRumble(50.0,25.0,1.0)

        # wait 250 msec before ending the experiment (makes it feel less
        # abrupt after you press the key)
        self.hub.wait(0.250)

        # for fun, test getting a bunch of events at once,
        # likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime= Computer.currentSec()
        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
Example #17
0
File: run.py Project: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker=self.hub.devices.tracker
        display=self.hub.devices.display
        kb=self.hub.devices.kb
        mouse=self.hub.devices.mouse

        tracker.runSetupProcedure()

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no boarder, using the monitor
        # profile name 'test monitor, which is created on the fly right now by the script
        window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        image_name='./images/party.png'
        imageStim = visual.ImageStim(window, image=image_name, name='image_stim')

        imageStim.draw()

        print "Detected Fixation Message Format:"
        print "FIX_DETECTED fix_end_time fix_x fix_y fdur fix_rt"

        tracker.setRecordingState(True)
        self.hub.wait(0.050)

        flip_time=window.flip()
        self.hub.clearEvents('all')
        self.hub.sendMessageEvent("SYNCTIME %s"%(image_name,),sec_time=flip_time)
        # Clear all events from the global event buffer, and from the keyboard and eyetracker event buffer.
        # This 'mess' of calls is needed right now because clearing the global event buffer does not
        # clear device level event buffers, and each device buffer is independent. Not sure this is a 'good'
        # thing as it stands, but until there is feedback, it will stay as is.

        fixationCount=0
        dwellTime=0.0
        # Loop until we get a keyboard event
        while len(kb.getEvents())==0:
            for ee in tracker.getEvents(EventConstants.FIXATION_END):
                if EventConstants.FIXATION_END == ee.type:
                    etime=ee.time
                    eeye=ee.eye
                    ex=ee.average_gaze_x
                    ey=ee.average_gaze_y
                    edur=ee.duration
                    ert=etime-flip_time
                    print 'FIX %.3f\t%d\t%.3f\t%.3f\t%.3f\t%.3f'%(etime,eeye,ex,ey,edur,ert)
                    fixationCount+=1
                    dwellTime+=edur
                    self.hub.sendMessageEvent("FIX_DETECTED %.6f %.3f %.3f %.6f %.6f"%(etime,ex,ey,edur,ert))

            imageStim.draw()
            window.flip()

        print "-------------"
        print " Number Fixations Made: ",fixationCount
        print " Total Dwell Time: ",dwellTime
        if fixationCount:
            print " Average Dwell Time / Fixation: ",dwellTime/fixationCount

        # a key was pressed so the loop was exited. We are clearing the event buffers to avoid an event overflow ( currently known issue)
        self.hub.clearEvents('all')
        tracker.setRecordingState(False)


        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        self.hub.wait(0.250)

        tracker.setConnectionState(False)

        # _close neccessary files / objects, 'disable high priority.
        window.close()
Example #18
0
File: run.py Project: peircej/ioHub
    def run(self, *args, **kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        keyboard = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        trial_count = self.getExperimentConfiguration()["trial_count"]
        image_names = self.getExperimentConfiguration()["image_names"]

        # eye trackers, like other devices, should be conected to when the
        # ioHub Server starts, so this next call is not needed, but should not
        # hurt anythnig either:
        tracker.setConnectionState(True)

        # run the eye tracker calibration routine before starting trials
        tracker.runSetupProcedure()

        # Create a psychopy window, full screen resolution, full screen mode, pix units.
        self.window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        image_cache = dict()
        for i in image_names:
            iname = "./images/{0}".format(i)
            image_cache[i] = visual.ImageStim(self.window, image=iname, name=iname)
            image_cache[i].draw()
        image_count = len(image_cache)
        self.window.clearBuffer()

        gaze_dot = visual.GratingStim(
            self.window, tex=None, mask="gauss", pos=(-2000, -2000), size=(100, 100), color="green"
        )

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen = ClearScreen(self)
        self.clearScreen.setScreenColor((128, 128, 128))

        self.clearScreen.flip(text="EXPERIMENT_INIT")

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.clearScreen.sendMessage(
            "Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID, self.hub.experimentSessionID)
        )
        self.clearScreen.sendMessage(
            "Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
                display.getIndex(), display.getPixelResolution(), display.getCoordinateType()
            )
        )
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # Screen for showing text and waiting for a keyboard response or something
        instuction_text = "Press Space Key".center(32) + "\n" + "to Start Experiment.".center(32)
        dtrigger = DeviceEventTrigger(keyboard, EventConstants.KEYBOARD_CHAR, {"key": "SPACE"})
        timeout = 5 * 60.0
        self.instructionScreen = InstructionScreen(self, instuction_text, dtrigger, timeout)
        self.instructionScreen.setScreenColor((128, 128, 128))
        # flip_time,time_since_flip,event=self.instructionScreen.switchTo("CALIBRATION_WAIT")

        self.instructionScreen.setText(instuction_text)
        self.instructionScreen.switchTo("START_EXPERIMENT_WAIT")

        self.experiment_running = True

        for t in range(trial_count):
            self.hub.clearEvents("all")
            instuction_text = "Press Space Key To Start Trial %d" % t
            self.instructionScreen.setText(instuction_text)
            self.instructionScreen.switchTo("START_TRIAL")

            tracker.setRecordingState(True)
            self.clearScreen.flip()
            self.hub.clearEvents("all")

            # Loop until we get a keyboard event
            runtrial = True
            while runtrial:
                gpos = tracker.getLastGazePosition()
                if gpos:
                    gaze_dot.setPos(gpos)
                    image_cache[i].draw()
                    gaze_dot.draw()
                else:
                    image_cache[i].draw()

                flip_time = self.window.flip()
                self.hub.sendMessageEvent("SYNCTIME %s" % (image_cache[i].name,), sec_time=flip_time)

                keys = keyboard.getEvents(EventConstants.KEYBOARD_CHAR)
                for key in keys:
                    if key.key == "SPACE":
                        runtrial = False
                        break

            self.clearScreen.flip(text="TRIAL_%d_DONE" % t)
            tracker.setRecordingState(False)

        self.clearScreen.flip(text="EXPERIMENT_COMPLETE")
        instuction_text = (
            "Experiment Finished".center(32)
            + "\n"
            + "Press 'SPACE' to Quit.".center(32)
            + "\n"
            + "Thank You.".center(32)
        )
        self.instructionScreen.setText(instuction_text)
        self.instructionScreen.switchTo("EXPERIMENT_COMPLETE_WAIT")

        # A key was pressed so exit experiment.
        # Wait 250 msec before ending the experiment
        # (makes it feel less abrupt after you press the key to quit IMO)
        self.hub.wait(0.250)

        tracker.setConnectionState(False)
Example #19
0
from psychopy import visual, core
from ioHub import quickStartHubServer
from ioHub.constants import EventConstants
from ioHub.util.experiment import FullScreenWindow
import sys
import random
io=quickStartHubServer("exp_code","sess_%d"%(random.randint(1,10000)))

# get 'shortcut' handles to the devices you will be using in the experiment:
myMouse=io.devices.mouse
display=io.devices.display
myKeyboard=io.devices.keyboard

myMouse.setSystemCursorVisibility(False)

myWin = FullScreenWindow(display)

screen_resolution=display.getPixelResolution()

display_index=display.getIndex()

#INITIALISE SOME STIMULI
fixSpot = visual.PatchStim(myWin,tex="none", mask="gauss",
        pos=(0,0), size=(30,30),color='black', autoLog=False)
grating = visual.PatchStim(myWin,pos=(300,0),
                           tex="sin",mask="gauss",
                           color=[1.0,0.5,-1.0],
                           size=(150.0,150.0), sf=(0.01,0.0),
                           autoLog=False)#this stim changes too much for autologging to be useful

message = visual.TextStim(myWin,pos=(0.0,-250),alignHoriz='center',