Exemplo n.º 1
0
Arquivo: run.py Projeto: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        ENABLE_NOISY_MOUSE=True
     
        
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb

        #Computer.enableHighPriority()
        
        # Create a psychopy window, using settings from Display device config
        psychoWindow =  FullScreenWindow(display)#,res=(500,500),fullscr=False,allowGUI=True)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        #mouse.setSystemCursorVisibility(False)
        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))
        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        mouse.lockMouseToDisplayID(display.getIndex())
        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        psychoStim['fixation'] =visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        psychoStim['keytext'] = visual.TextStim(psychoWindow, text=u'?', pos = [100,200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['ucodetext'] = visual.TextStim(psychoWindow, text=u'?', pos = [-100,200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['mods'] = visual.TextStim(psychoWindow, text=u'?', pos = [0,-200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['mouseDot'] =visual.GratingStim(psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')

        # Clear all events from the global and device level event buffers.
        self.hub.clearEvents('all')

        QUIT_EXP=False
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        while QUIT_EXP is False:

            # for each loop, update the grating phase
            psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle

            # and update the mouse contingent gaussian based on the current mouse location
            mx,my=mouse.getPosition()
            if ENABLE_NOISY_MOUSE:
                mx=np.random.random_integers(mx-10,mx+10)
                my=np.random.random_integers(my-10,my+10)
            psychoStim['mouseDot'].setPos((mx,my))


            # redraw the stim
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            psychoWindow.flip()
            # it is on this side of the call that you know the changes have been displayed, so you can
            # make a call to the ioHub time method and get the time of the flip, as the built in
            # time methods represent both experiment process and ioHub server process time.
            # Most times in ioHub are represented sec.msec format to match that of Psychopy.
            flip_time=Computer.currentSec()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)

            # get any new keyboard char events from the keyboard device


            # for each new keyboard character event, check if it matches one of the end example keys.
            for k in kb.getEvents():
                if k.key.upper() in ['ESCAPE', ] and k.type==EventConstants.KEYBOARD_CHAR:
                    print 'Quit key pressed: ',k.key,' for ',k.duration,' sec.'
                    QUIT_EXP=True
                print u'{0}: time: {1}\t\tord: {2}.\t\tKey: [{3}]\t\tMods: {4}'.format(k.time,EventConstants.getName(k.type),k.ucode,k.key,k.modifiers)
                psychoStim['keytext'].setText(k.key)
                psychoStim['ucodetext'].setText(unichr(k.ucode))
                psychoStim['mods'].setText(str(k.modifiers))
                

             #for e in mouse.getEvents():
            #    print 'Event: ',e
                
            self.hub.clearEvents('all')
        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        actualDelay=self.hub.wait(0.250)
        print "Delay requested %.6f, actual delay %.6f, Diff: %.6f"%(0.250,actualDelay,actualDelay-0.250)

        # for fun, test getting a bunch of events at once, likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime=Computer.currentSec()
        
        if events is None:
            events=[]

        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
Exemplo n.º 2
0
Arquivo: run.py Projeto: peircej/ioHub
[myWin.flip() for i in range(10)]
lastFlipTime=Computer.getTime()
myWin.fps()
exit=False

myWin.setRecordFrameIntervals(True)

while not exit and endTime>Computer.currentTime():
    dotPatch.draw()
    message.draw()
    myWin.flip()#redraw the buffer
    flipTime=Computer.getTime()
    IFI=flipTime-lastFlipTime
    lastFlipTime=flipTime
    fcounter+=1

    if IFI > reportedRefreshInterval*1.5:
        print "Frame {0} dropped: IFI of {1}".format(fcounter,IFI)

    #handle key presses each frame
    for event in keyboard.getEvents():
        if event.key in ['ESCAPE','Q','q']:
            exit=True
            break

Computer.disableHighPriority()
myWin.close()

io.quit()### End of experiment logic

core.quit()
Exemplo n.º 3
0
Arquivo: run.py Projeto: peircej/ioHub
    def run(self,*args,**kwargs):
        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        print "THIS DEMO REQUIRES A CONNECTED (WIRED OR WIRELESS) XBOX 360"
        print "GAMEPAD OR OTHER XINPUT COMPATIBLE DEVICE. DEVICE ALSO NEEDS TO "
        print " BE TURNED ON. ;) "

        print ""
        print "\tPRESS 'ESCAPE' KEY TO EXIT."
        print "\tPRESS 'b' KEY TO PRINT BATTERY INFO TO STDOUT."
        print "\tPRESS 'u' KEY TO PRINT CAPABILITIES INFO TO STDOUT."
        print "\tPRESS ANY OTHER KEY TO MAKE GAMEPAD *RUMBLE* FOR 1 SEC."


        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb
        gamepad=self.devices.gamepad


        # Read the current resolution of the monitors screen in pixels.
        # We will set our window size to match the current screen resolution and make it a full screen boarderless window.
        screen_resolution= display.getPixelResolution()


        # Create psychopy full screen window using the display device config.
        psychoWindow = FullScreenWindow(display)
        
        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))

        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        psychoStim['fixation'] =visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        psychoStim['mouseDot'] =visual.GratingStim(psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')
        psychoStim['text'] = visual.TextStim(psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)


        # Clear all events from the global event buffer, and from the keyboard event buffer.
        self.hub.clearEvents('all')

        QUIT_EXP=False
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        while QUIT_EXP is False:

            # read gamepad events and take the last one if any exist
            gpevents=gamepad.getEvents()
            if len(gpevents)>0:
                gpevents=gpevents[-1]

                ## Display pressed buttons
                #
                psychoStim['text'].setText(str([k for k,v in gpevents.buttons.iteritems() if v is True]))
                #
                ###

                # Use 2 finger triggers for fixation square position (so it will be at bottom left hand corner of screen
                # when the triggers are not presses
                #
                fixationX=self.normalizedValue2Pixel(gpevents.leftTrigger,screen_resolution[0], 0)
                fixationY=self.normalizedValue2Pixel(gpevents.rightTrigger,screen_resolution[1], 0)
                psychoStim['fixation'].setPos((fixationX,fixationY))
                #
                #####

                # Use the Right Thumb Stick for the purple gaussian  spot position
                #

                x,y,mag=gpevents.rightThumbStick # sticks are 3 item lists (x,y,magnitude)
                currentPosition[0]=self.normalizedValue2Pixel(x*mag,screen_resolution[0], -1)
                currentPosition[1]=self.normalizedValue2Pixel(y*mag,screen_resolution[1], -1)
                psychoStim['mouseDot'].setPos(currentPosition)
                #
                ###

            # for each loop, update the grating phase
            psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle

            # redraw stim
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            psychoWindow.flip()
            # it is on this side of the call that you know the changes have been displayed, so you can
            # make a call to one of the built-in time methods and get the event time of the flip, as the built in
            # time methods represent both experiment process and ioHub server process time.
            # Most times in ioHub are represented as unsigned 64 bit integers when they are saved, so using usec
            # as a timescale is appropriate.
            flip_time=Computer.currentSec()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)


            # for each new keyboard event, check if it matches one of the end example keys.
            for k in kb.getEvents():
                # key: the string representation of the key pressed, A-Z if a-zA-Z pressed, 0-9 if 0-9 pressed ect.
                #      To get the mapping from a key_id to a key string, use
                #
                #      key_string=EventConstants.IDToName(key_event['key_id'])
                #
                # char: the ascii char for the key pressed. This field factors in if shift was also pressed or not
                #       when the char was typed, so typing a 's' == char field of 's', while typing SHIFT+s == char
                #       field of 'S'. This is in contrast to the key field, which always returns upper case values
                #       regardless of shift value. If the character pressed is not an ascii printable character,
                #       this filed will print junk, hex, or who knows what else at this point.
                if k.key in ['ESCAPE',]:
                    print 'Quit key pressed: ',k.key
                    QUIT_EXP=True
                else:
                    if k.type == EventConstants.KEYBOARD_PRESS:
                        if k.key in['B','b']:
                            bat=gamepad.updateBatteryInformation()
                            print "Bat Update: ",bat
                            bat=gamepad.getLastReadBatteryInfo()
                            print "Bat Last Read: ",bat
                        elif k.key in['U','u']:
                            bat=gamepad.updateCapabilitiesInformation()
                            print "Cap Update: ",bat
                            bat=gamepad.getLastReadCapabilitiesInfo()
                            print "Cap Last Read: ",bat
                        else:
                            # rumble the pad , 50% low frequency motor,
                            # 25% high frequency motor, for 1 second.
                            r=gamepad.setRumble(50.0,25.0,1.0)

        # wait 250 msec before ending the experiment (makes it feel less
        # abrupt after you press the key)
        self.hub.wait(0.250)

        # for fun, test getting a bunch of events at once,
        # likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime= Computer.currentSec()
        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
Exemplo n.º 4
0
Arquivo: run.py Projeto: peircej/ioHub
    def run(self, *args, **kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.

        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        tracker.runSetupProcedure()
        self.hub.clearEvents("all")
        self.hub.wait(0.050)

        current_gaze = [0, 0]

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no boarder, using the monitor
        # profile name 'test monitor, which is created on the fly right now by the script
        window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim = OrderedDict()
        psychoStim["grating"] = visual.PatchStim(window, mask="circle", size=75, pos=[-100, 0], sf=0.075)
        psychoStim["fixation"] = visual.PatchStim(
            window, size=25, pos=[0, 0], sf=0, color=[-1, -1, -1], colorSpace="rgb"
        )
        psychoStim["gazePosText"] = visual.TextStim(
            window,
            text=str(current_gaze),
            pos=[100, 0],
            height=48,
            color=[-1, -1, -1],
            colorSpace="rgb",
            alignHoriz="left",
            wrapWidth=300,
        )
        psychoStim["gazePos"] = visual.GratingStim(
            window, tex=None, mask="gauss", pos=current_gaze, size=(50, 50), color="purple"
        )

        [psychoStim[stimName].draw() for stimName in psychoStim]

        Computer.enableHighPriority(True)
        # self.setProcessAffinities([0,1],[2,3])

        tracker.setRecordingState(True)
        self.hub.wait(0.050)

        # Clear all events from the ioHub event buffers.
        self.hub.clearEvents("all")

        # Loop until we get a keyboard event
        while len(kb.getEvents()) == 0:

            # for each loop, update the grating phase
            psychoStim["grating"].setPhase(0.05, "+")  # advance phase by 0.05 of a cycle

            # and update the gaze contingent gaussian based on the current gaze location

            current_gaze = tracker.getLastGazePosition()
            current_gaze = int(current_gaze[0]), int(current_gaze[1])

            psychoStim["gazePos"].setPos(current_gaze)
            psychoStim["gazePosText"].setText(str(current_gaze))

            # this is short hand for looping through the psychopy stim list and redrawing each one
            # it is also efficient, may not be as user friendly as:
            # for stimName, stim in psychoStim.itervalues():
            #    stim.draw()
            # which does the same thing if you like and is probably just as efficent.
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            flip_time = window.flip()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s" % (str(current_gaze),), sec_time=flip_time)

        # a key was pressed so the loop was exited. We are clearing the event buffers to avoid an event overflow ( currently known issue)
        self.hub.clearEvents("all")

        tracker.setRecordingState(False)

        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        self.hub.wait(0.250)
        tracker.setConnectionState(False)

        # _close neccessary files / objects, 'disable high priority.
        window.close()
Exemplo n.º 5
0
Arquivo: run.py Projeto: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker=self.hub.devices.tracker
        display=self.hub.devices.display
        kb=self.hub.devices.kb
        mouse=self.hub.devices.mouse

        tracker.runSetupProcedure()

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no boarder, using the monitor
        # profile name 'test monitor, which is created on the fly right now by the script
        window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        image_name='./images/party.png'
        imageStim = visual.ImageStim(window, image=image_name, name='image_stim')

        imageStim.draw()

        print "Detected Fixation Message Format:"
        print "FIX_DETECTED fix_end_time fix_x fix_y fdur fix_rt"

        tracker.setRecordingState(True)
        self.hub.wait(0.050)

        flip_time=window.flip()
        self.hub.clearEvents('all')
        self.hub.sendMessageEvent("SYNCTIME %s"%(image_name,),sec_time=flip_time)
        # Clear all events from the global event buffer, and from the keyboard and eyetracker event buffer.
        # This 'mess' of calls is needed right now because clearing the global event buffer does not
        # clear device level event buffers, and each device buffer is independent. Not sure this is a 'good'
        # thing as it stands, but until there is feedback, it will stay as is.

        fixationCount=0
        dwellTime=0.0
        # Loop until we get a keyboard event
        while len(kb.getEvents())==0:
            for ee in tracker.getEvents(EventConstants.FIXATION_END):
                if EventConstants.FIXATION_END == ee.type:
                    etime=ee.time
                    eeye=ee.eye
                    ex=ee.average_gaze_x
                    ey=ee.average_gaze_y
                    edur=ee.duration
                    ert=etime-flip_time
                    print 'FIX %.3f\t%d\t%.3f\t%.3f\t%.3f\t%.3f'%(etime,eeye,ex,ey,edur,ert)
                    fixationCount+=1
                    dwellTime+=edur
                    self.hub.sendMessageEvent("FIX_DETECTED %.6f %.3f %.3f %.6f %.6f"%(etime,ex,ey,edur,ert))

            imageStim.draw()
            window.flip()

        print "-------------"
        print " Number Fixations Made: ",fixationCount
        print " Total Dwell Time: ",dwellTime
        if fixationCount:
            print " Average Dwell Time / Fixation: ",dwellTime/fixationCount

        # a key was pressed so the loop was exited. We are clearing the event buffers to avoid an event overflow ( currently known issue)
        self.hub.clearEvents('all')
        tracker.setRecordingState(False)


        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        self.hub.wait(0.250)

        tracker.setConnectionState(False)

        # _close neccessary files / objects, 'disable high priority.
        window.close()
Exemplo n.º 6
0
Arquivo: run.py Projeto: peircej/ioHub
class ExperimentRuntime(ioHubExperimentRuntime):
    def __init__(self,configFileDirectory, configFile):
        ioHubExperimentRuntime.__init__(self,configFileDirectory,configFile)
        self.initAttributes()

    def initAttributes(self):
        """

        """
        self.psychoStim = OrderedDict()
        self.totalEventRequestsForTest=1000
        self.numEventRequests=0
        self.psychoWindow=None
        self.lastFlipTime=0.0
        self.events=None

    def run(self,*args,**kwargs):
        """
        psychopy code is taken from an example psychopy script in the coder documentation.
        """

        #report process affinities
        print "Current process affinities (experiment proc, ioHub proc):", Computer.getProcessAffinities()

        # create 'shortcuts' to the devices of interest for this experiment
        self.mouse=self.hub.devices.mouse
        self.kb=self.hub.devices.kb
        self.expRuntime=self.hub.devices.experimentRuntime
        self.display=self.hub.devices.display


        # let's print out the public method names for each device type for fun.
        #print "ExperimentPCkeyboard methods:",self.kb.getDeviceInterface()
        #print "ExperimentPCmouse methods:",self.mouse.getDeviceInterface()
        #print "ExperimentRuntime methods:",self.expRuntime.getDeviceInterface()
        #print "Display methods:",self.display.getDeviceInterface()

        # create fullscreen pyglet window at current resolution, as well as required resources / drawings
        self.createPsychoGraphicsWindow()

        # create stats numpy arrays, set experiment process to high priority.
        self.initStats()

        # enable high priority mode for the experiment process
        Computer.enableHighPriority()

        #draw and flip to the updated graphics state.
        ifi=self.drawAndFlipPsychoWindow()

        # START TEST LOOP >>>>>>>>>>>>>>>>>>>>>>>>>>

        while self.numEventRequests < self.totalEventRequestsForTest:
            # send an Experiment Event to the ioHub server process
            self.hub.sendMessageEvent("This is a test message %.3f"%self.flipTime)

            # check for any new events from any of the devices, and return the events list and the time it took to
            # request the events and receive the reply
            self.events,callDuration=self.checkForEvents()
            if self.events:
                # events were available
                self.updateStats(self.events, callDuration, ifi)
                #draw and flip to the updated graphics state.

            ifi=self.drawAndFlipPsychoWindow()

        # END TEST LOOP <<<<<<<<<<<<<<<<<<<<<<<<<<

        # close necessary files / objects, disable high priority.
        self.spinDownTest()

        # plot collected delay and retrace detection results.
        self.plotResults()

    def createPsychoGraphicsWindow(self):
        #create a window
        self.psychoWindow = FullScreenWindow(self.display)
        
        currentPosition=self.mouse.setPosition((0,0))
        self.mouse.setSystemCursorVisibility(False)

        self.instructionText2Pattern='%d'

        self.psychoStim['grating'] = visual.PatchStim(self.psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        self.psychoStim['fixation'] = visual.PatchStim(self.psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        self.psychoStim['title'] = visual.TextStim(win=self.psychoWindow, text="ioHub getEvents Delay Test", pos = [0,125], height=36, color=[1,.5,0], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['instructions'] = visual.TextStim(win=self.psychoWindow, text='Move the mouse around, press keyboard keys and mouse buttons', pos = [0,-125], height=32, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['instructions2'] = visual.TextStim(win=self.psychoWindow, text=self.instructionText2Pattern%(self.totalEventRequestsForTest,), pos = [0,-250],  color=[-1,-1,-1], height=32, colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
        self.psychoStim['keytext'] = visual.TextStim(win=self.psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='left',wrapWidth=800.0)
        self.psychoStim['mouseDot'] = visual.GratingStim(win=self.psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')


    def drawAndFlipPsychoWindow(self):
        self.psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle
        currentPosition,currentDisplayIndex=self.mouse.getPosition(return_display_index=True)
        
        if currentDisplayIndex == self.display.getIndex():       
            currentPosition=(float(currentPosition[0]),float(currentPosition[1]))
            self.psychoStim['mouseDot'].setPos(currentPosition)


        if self.events:
            self.psychoStim['instructions2'].setText(self.instructionText2Pattern%(self.totalEventRequestsForTest-self.numEventRequests,))

            for r in self.events:
                if r.type is EventConstants.KEYBOARD_PRESS: #keypress code
                    self.psychoStim['keytext'].setText(r.key.decode('utf-8'))

            self.events=None

        [self.psychoStim[skey].draw() for skey in self.psychoStim]

        self.flipTime=self.psychoWindow.flip()
        d=self.flipTime-self.lastFlipTime
        self.lastFlipTime=self.flipTime
        return d

    def checkForEvents(self):
        # get the time we request events from the ioHub
        stime=Computer.currentTime()
        r = self.hub.getEvents()
        if r and len(r) > 0:
            # so there were events returned in the request, so include this getEvent request in the tally
            etime=Computer.currentTime()
            dur=etime-stime
            return r, dur*1000.0
        return None,None


    def initStats(self):
        if self.hub is None:
            print "Error: ioHub must be enabled to run the testEventRetrievalTiming test."
            return

        # Init Results numpy array
        self.results= zeros((self.totalEventRequestsForTest,3),dtype='f4')

        self.numEventRequests=0
        self.flipTime=0.0
        self.lastFlipTime=0.0

        # clear the ioHub event Buffer before starting the test.
        # This is VERY IMPORTANT, given an existing bug in ioHub.
        # You would want to do this before each trial started until the bug is fixed.
        self.hub.clearEvents('all')

    def updateStats(self, events, duration, ifi):
        self.results[self.numEventRequests][0]=duration     # ctime it took to get events from ioHub
        self.results[self.numEventRequests][1]=len(events)  # number of events returned
        self.results[self.numEventRequests][2]=ifi*1000.0   # calculating inter flip interval.
        self.numEventRequests+=1                            # incrementing tally counterfgh


    def spinDownTest(self):
        # OK, we have collected the number of requested getEvents, that have returned >0 events
        # so _close psychopy window
        self.psychoWindow.close()

        # disable high priority in both processes
        Computer.disableHighPriority()


    def plotResults(self):
        #### calculate stats on collected data and draw some plots ####
        import matplotlib.mlab as mlab
        from matplotlib.pyplot import axis, title, xlabel, hist, grid, show, ylabel, plot
        import pylab

        results= self.results

        durations=results[:,0]
        flips=results[1:,2]

        dmin=durations.min()
        dmax=durations.max()
        dmean=durations.mean()
        dstd=durations.std()

        fmean=flips.mean()
        fstd=flips.std()

        pylab.figure(figsize=[30,10])
        pylab.subplot(1,3,1)

        # the histogram of the delay data
        n, bins, patches = hist(durations, 50, normed=True, facecolor='blue', alpha=0.75)
        # add a 'best fit' line
        y = mlab.normpdf( bins, dmean, dstd)
        plot(bins, y, 'r--', linewidth=1)
        xlabel('ioHub getEvents Delay')
        ylabel('Percentage')
        title('$\mathrm{{Histogram\ of\ Delay:}}\ \min={0},\ \max={1},\ \mu={2},\ \sigma={3}$'.format(
                dmin, dmax, dmean, dstd))
        axis([0, dmax+1.0, 0, 25.0])
        grid(True)


        # graphs of the retrace data ( taken from retrace example in psychopy demos folder)
        intervalsMS = flips
        m=fmean
        sd=fstd
        distString= "Mean={0:.1f}ms,    s.d.={1:.1f},    99%CI={2:.1f}-{3:.1f}".format(m, sd, m - 3 * sd, m + 3 * sd)
        nTotal=len(intervalsMS)
        nDropped=sum(intervalsMS>(1.5*m))
        droppedString = "Dropped/Frames = {0:d}/{1:d} = {2}%".format(nDropped, nTotal, int(nDropped) / float(nTotal))

        pylab.subplot(1,3,2)

        #plot the frameintervals
        pylab.plot(intervalsMS, '-')
        pylab.ylabel('t (ms)')
        pylab.xlabel('frame N')
        pylab.title(droppedString)

        pylab.subplot(1,3,3)
        pylab.hist(intervalsMS, 50, normed=0, histtype='stepfilled')
        pylab.xlabel('t (ms)')
        pylab.ylabel('n frames')
        pylab.title(distString)

        show()
Exemplo n.º 7
0
class ExperimentRuntime(ioHubExperimentRuntime):
    HORZ_SCALING = 0.9
    VERT_SCALING = 0.9
    HORZ_POS_COUNT = 7
    VERT_POS_COUNT = 7
    RANDOMIZE_TRIALS = True

    def __init__(self, configFileDirectory, configFile):
        ioHubExperimentRuntime.__init__(self, configFileDirectory, configFile)

    def run(self, *args, **kwargs):
        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        # using getDevice() returns None if the device is not found,
        tracker = self.hub.getDevice("tracker")

        display = self.devices.display
        kb = self.devices.kb
        mouse = self.devices.mouse

        if tracker is None:
            print "EyeTracker Device cdid not load."
            # return 0

        # get the experiment condition variable excel file to use.
        fdialog = FileDialog(
            message="Select a Condition Variable File",
            defaultDir=self.paths.CONDITION_FILES.getPath(),
            defaultFile="",
            openFile=True,
            allowMultipleSelections=False,
            allowChangingDirectories=True,
            fileTypes=(FileDialog.EXCEL_FILES, FileDialog.ALL_FILES),
            display_index=display.getIndex(),
        )

        result, conditionVariablesFile = fdialog.show()
        fdialog.destroy()

        if result != FileDialog.OK_RESULT:
            print "User cancelled Condition Variable Selection... Exiting Experiment."
            return

        if conditionVariablesFile:
            conditionVariablesFile = conditionVariablesFile[0]

        # create a condition set provider
        self.conditionVariablesProvider = ExperimentVariableProvider(
            conditionVariablesFile,
            "BLOCK_LABEL",
            practiceBlockValues="PRACTICE",
            randomizeBlocks=False,
            randomizeTrials=True,
        )

        # initialize (or create) a table in the ioDataStore to hold the condition variable data
        self.hub.initializeConditionVariableTable(self.conditionVariablesProvider)

        # Hide the 'system mouse cursor' so it does not bother us.
        mouse.setSystemCursorVisibility(False)

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no border, using the monitor
        # profile name 'test monitor', which is created on the fly right now by the script
        self.window = FullScreenWindow(display)

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen = ClearScreen(self)
        self.clearScreen.flip(text="EXPERIMENT_INIT")

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(ioHub.util.getCurrentDateTimeString()))
        self.clearScreen.sendMessage(
            "Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID, self.hub.experimentSessionID)
        )
        self.clearScreen.sendMessage(
            "Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
                display.getIndex(), display.getPixelResolution(), display.getCoordinateType()
            )
        )
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # screen for showing text and waiting for a keyboard response or something
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
        self.instructionScreen = InstructionScreen(
            self, "Press Space Key when Ready to Start Experiment.", dtrigger, 5 * 60
        )

        # screen state used during the data collection / runtime of the experiment to move the
        # target from one point to another.
        self.targetScreen = TargetScreen(self)

        xyEventTrigs = [
            DeviceEventTrigger(
                kb, EventConstants.KEYBOARD_PRESS, {"key": "F1"}, self.targetScreen.toggleDynamicStimVisibility
            )
        ]
        if tracker:
            self.targetScreen.dynamicStimPositionFuncPtr = tracker.getLastGazePosition
            msampleTrig = DeviceEventTrigger(
                tracker, EventConstants.MONOCULAR_EYE_SAMPLE, {}, self.targetScreen.setDynamicStimPosition
            )
            bsampleTrig = DeviceEventTrigger(
                tracker, EventConstants.BINOCULAR_EYE_SAMPLE, {}, self.targetScreen.setDynamicStimPosition
            )
            xyEventTrigs.extend([msampleTrig, bsampleTrig])
        else:
            self.targetScreen.dynamicStimPositionFuncPtr = mouse.getPosition
            msampleTrig = DeviceEventTrigger(
                mouse, EventConstants.MOUSE_MOVE, {}, self.targetScreen.setDynamicStimPosition
            )
            xyEventTrigs.append(msampleTrig)

        # setup keyboard event hook on target screen state
        # to catch any press space bar events for responses to color changes.

        dtrigger = DeviceEventTrigger(
            kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"}, self._spaceKeyPressedDuringTargetState
        )
        xyEventTrigs.append(dtrigger)
        self.targetScreen.setEventTriggers(xyEventTrigs)

        # set all screen states background color to the first screen background color in the Excel file
        # i.e. the SCREEN_COLOR column
        displayColor = tuple(self.conditionVariablesProvider.getData()[0]["SCREEN_COLOR"])
        self.clearScreen.setScreenColor(displayColor)
        self.instructionScreen.setScreenColor(displayColor)
        self.targetScreen.setScreenColor(displayColor)

        # clear the display a few times to be sure front and back buffers are clean.
        self.clearScreen.flip()

        self.hub.clearEvents("all")

        # show the opening instruction screen, clearing events so events pre display of the
        # screen state change are not picked up by the event monitoring. This is the default,
        # so you can just call .switchTo() if you want all events cleared right after the flip
        # returns. If you 'do not' want events cleared, use .switchTo(False)
        #
        flip_time, time_since_flip, event = self.instructionScreen.switchTo(clearEvents=True, msg="EXPERIMENT_START")

        self.clearScreen.flip(text="PRACTICE_BLOCKS_START")
        # Run Practice Blocks
        self.runBlockSet(self.conditionVariablesProvider.getPracticeBlocks())
        self.clearScreen.flip(text="PRACTICE_BLOCKS_END")

        # Run Experiment Blocks
        self.clearScreen.flip(text="EXPERIMENT_BLOCKS_START")
        self.runBlockSet(self.conditionVariablesProvider.getExperimentBlocks())
        self.clearScreen.flip(text="EXPERIMENT_BLOCKS_END")

        # show the 'thanks for participating screen'
        self.instructionScreen.setText("Experiment Complete. Thank you for Participating.")
        self.instructionScreen.setTimeout(10 * 60)  # 10 minute timeout
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
        self.instructionScreen.setEventTriggers(dtrigger)
        flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="EXPERIMENT_END")

        # close the psychopy window
        self.window.close()

        # Done Experiment close the tracker connection if it is open.

        if tracker:
            tracker.setConnectionState(False)

        ### End of experiment logic

    # Called by the run() method to perform a sequence of blocks in the experiment.
    # So this method has the guts of the experiment logic.
    # This method is called once to run any practice blocks, and once to run the experimental blocks.
    #
    def runBlockSet(self, blockSet):
        # using getDevice() returns None if the device is not found,
        tracker = self.hub.getDevice("tracker")

        daq = self.hub.getDevice("daq")

        # using self.devices.xxxxx raises an exception if the
        # device is not present
        kb = self.devices.kb
        display = self.devices.display

        # for each block in the group of blocks.....
        for trialSet in blockSet.getNextConditionSet():
            # if an eye tracker is connected,
            if tracker:
                self.instructionScreen.setTimeout(30 * 60.0)  # 30 minute timeout, long enough for a break if needed.
                dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": ["RETURN", "ESCAPE"]})
                self.instructionScreen.setEventTriggers(dtrigger)
                self.instructionScreen.setText(
                    "Press 'Enter' to go to eye tracker Calibration mode.\n\nTo skip calibration and start Data Recording press 'Escape'"
                )
                flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="CALIBRATION_SELECT")
                if event and event.key == "RETURN":
                    runEyeTrackerSetupAndCalibration(tracker, self.window)
                elif event and event.key == "ESCAPE":
                    print "** Calibration stage skipped for block ", blockSet.getCurrentConditionSetIteration()
                else:
                    print "** Time out occurred. Entering calibration mode to play it safe. ;)"
                    runEyeTrackerSetupAndCalibration(tracker, self.window)

            dres = display.getPixelResolution()
            # right now, target positions are automatically generated based on point grid size, screen size, and a scaling factor (a gain).
            TARGET_POSITIONS = generatedPointGrid(
                dres[0], dres[1], self.HORZ_SCALING, self.VERT_SCALING, self.HORZ_POS_COUNT, self.VERT_POS_COUNT
            )

            # indexes to display the condition variable order in start out 'non' randomized.
            RAND_INDEXES = np.arange(TARGET_POSITIONS.shape[0])

            # if conditionVariablesProvider was told to randomize trials, then randomize trial index access list.
            if self.conditionVariablesProvider.randomizeTrials is True:
                self.hub.sendMessageEvent(
                    "RAND SEED = {0}".format(ExperimentVariableProvider._randomGeneratorSeed),
                    sec_time=ExperimentVariableProvider._randomGeneratorSeed / 1000.0,
                )
                np.random.shuffle(RAND_INDEXES)

            dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
            self.instructionScreen.setEventTriggers(dtrigger)
            self.instructionScreen.setText(
                "Press 'Space' key when Ready to Start Block %d" % (blockSet.getCurrentConditionSetIteration())
            )
            flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="BLOCK_START")

            # enable high priority for the experiment process only. Not sure this is necessary, or a good idea,
            # based on tests so far frankly. Running at std priority seems to usually be just fine.
            Computer.enableRealTimePriority(True)

            # if we have a tracker, start recording.......
            if tracker:
                tracker.setRecordingState(True)

            # delay a short time to let " the data start flow'in "
            self.hub.wait(0.050)

            # In this paradigm, each 'trial' is the movement from one target location to another.
            # Recording of eye data is on for the whole block of XxY target positions within the block.
            # A rough outline of the runtime / data collection portion of a block is as follows:
            #      a) Start each block with the target at screen center.
            #      b) Wait sec.msec duration after showing the target [ column PRE_POS_CHANGE_INTERVAL ] in excel file
            #      c) Then schedule move of target to next target position at the time of the next retrace.
            #      d) Once the Target has moved to the 2nd position for the trial, wait PRE_COLOR_CHANGE_INTERVAL
            #         sec.msec before 'possibly changing the color of the center of the target. The new color is
            #         determined by the FP_INNER_COLOR2 column. If no color change is wanted, simply make this color
            #         equal to the color of the target center in column FP_INNER_COLOR for that row of the spreadsheet.
            #      e) Once the target has been redrawn (either with or without a color change, it stays in position for
            #         another POST_COLOR_CHANGE_INTERVAL sec.msec. Since ioHub is being used, all keyboard activity
            #         is being recorded to the ioDataStore file, so there is no need really to 'monitor' for
            #         the participants key presses, since we do not use it for feedback. It can be retrieved from the
            #         data file for analysis post hoc.
            #      f) After the POST_COLOR_CHANGE_INTERVAL, the current 'trial' officially ends, and the next trial
            #         starts, with the target remaining in the position it was at in the end of the last trial, but
            #         with the target center color switching to FP_INNER_COLOR.
            #      g) Then the sequence from b) starts again for the number of target positions in the block
            #        (49 currently).
            #

            self.hub.clearEvents("all")

            self._TRIAL_STATE = None
            self.targetScreen.nextAreaOfInterest = None

            for trial in trialSet.getNextConditionSet():
                currentTrialIndex = trialSet.getCurrentConditionSetIndex()

                nextTargetPosition = TARGET_POSITIONS[currentTrialIndex]
                trial["FP_X"] = nextTargetPosition[0]
                trial["FP_Y"] = nextTargetPosition[1]

                ppd_x, ppd_y = self.devices.display.getPixelsPerDegree()

                fp_outer_radius = int(trial["FP_OUTER_RADIUS"] * ppd_x), int(trial["FP_OUTER_RADIUS"] * ppd_y)
                fp_inner_radius = int(trial["FP_INNER_RADIUS"] * ppd_x), int(trial["FP_INNER_RADIUS"] * ppd_y)

                self.targetScreen.setScreenColor(tuple(trial["SCREEN_COLOR"]))
                self.targetScreen.setTargetOuterColor(tuple(trial["FP_OUTER_COLOR"]))
                self.targetScreen.setTargetInnerColor(tuple(trial["FP_INNER_COLOR"]))
                self.targetScreen.setTargetOuterSize(fp_outer_radius)
                self.targetScreen.setTargetInnerSize(fp_inner_radius)

                self.hub.clearEvents("kb")

                self.targetScreen.setTimeout(trial["PRE_POS_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_PRE_POS_CHANGE_KEY"
                target_pos1_color1_time, time_since_flip, event = self.targetScreen.switchTo(
                    msg="TRIAL_TARGET_INITIAL_COLOR"
                )

                self.targetScreen.setTargetPosition(nextTargetPosition)
                self.targetScreen.setTimeout(trial["PRE_COLOR_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_POST_POS_CHANGE_KEY"

                # create a 3 degree circular region (1.5 degree radius) around the next target position
                # for use as out invisible boundary
                self.targetScreen.nextAreaOfInterest = Point(*nextTargetPosition).buffer(((ppd_x + ppd_y) / 2.0) * 1.5)

                target_pos2_color1_time, time_since_flip, event = self.targetScreen.switchTo(msg="TRIAL_TARGET_MOVE")

                self.targetScreen.setTargetInnerColor(tuple(trial["FP_INNER_COLOR2"]))
                self.targetScreen.setTimeout(trial["POST_COLOR_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_POST_COLOR_CHANGE_KEY"
                target_pos2_color2_time, time_since_flip, event = self.targetScreen.switchTo(
                    msg="TRIAL_TARGET_COLOR_TWO"
                )

                # end of 'trial sequence'
                # send condition variables used / populated to ioDataStore
                toSend = [self.hub.experimentSessionID, trialSet.getCurrentConditionSetIteration()]
                trial["TSTART_TIME"] = target_pos1_color1_time
                trial["APPROX_TEND_TIME"] = target_pos2_color2_time + time_since_flip
                trial["target_pos1_color1_time"] = target_pos1_color1_time
                trial["target_pos2_color1_time"] = target_pos2_color1_time
                trial["target_pos2_color2_time"] = target_pos2_color2_time

                if self.targetScreen.aoiTriggeredID:
                    trial["VOG_SAMPLE_ID_AOI_TRIGGER"] = self.targetScreen.aoiTriggeredID
                    trial["VOG_SAMPLE_TIME_AOI_TRIGGER"] = self.targetScreen.aoiTriggeredTime
                if self.targetScreen.aoiBestGaze:
                    trial["BEST_GAZE_X"] = self.targetScreen.aoiBestGaze[0]
                    trial["BEST_GAZE_Y"] = self.targetScreen.aoiBestGaze[1]

                self._TRIAL_STATE = None
                if self.targetScreen.nextAreaOfInterest:
                    del self.targetScreen.nextAreaOfInterest
                    self.targetScreen.nextAreaOfInterest = None

                toSend.extend(trial.tolist())
                self.hub.addRowToConditionVariableTable(toSend)

            # end of block of trials, clear screen
            self.clearScreen.flip(text="BLOCK_END")

            self._TRIAL_STATE = None

            # if tracking eye position, turn off eye tracking.
            if tracker:
                tracker.setRecordingState(False)
            if daq:
                daq.enableEventReporting(False)

            # turn off high priority so python GC can clean up if it needs to.
            Computer.disableHighPriority()

            # give a 100 msec delay before starting next block
            self.hub.wait(0.100)

        # end of block set, return from method.
        self.clearScreen.flip(text="BLOCK_SET_END")
        return True

    def _spaceKeyPressedDuringTargetState(self, flipTime, stateDuration, event):
        if self._TRIAL_STATE:
            trial, column_name = self._TRIAL_STATE
            if trial[column_name] <= -100:  # no RT has been registered yet
                trial[column_name] = event.time - flipTime
        return False
Exemplo n.º 8
0
Arquivo: run.py Projeto: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb
        ain=self.devices.ain
        
        # get the number of trials entered in the session dialog
        user_params=self.getSavedUserDefinedParameters()
        print 'user_params: ', user_params
        trial_count=int(user_params.get('trial_count',5))
           
        #Computer.enableHighPriority()

        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))

        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        # Create a psychopy window, full screen resolution, full screen mode
        psychoWindow = FullScreenWindow(display)
        
        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=150,pos=[0,0], sf=.075)

        psychoStim['title'] = visual.TextStim(win=psychoWindow, 
                              text="Analog Input Test. Trial 1 of %d"%(trial_count),
                              pos = [0,200], height=36, color=[1,.5,0], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        ai_values_string_proto="AI_0: %.3f\tAI_1: %.3f\tAI_2: %.3f\tAI_3: %.3f\t\nAI_4: %.3f\tAI_5: %.3f\tAI_6: %.3f\tAI_7: %.3f"
        ai_values=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
        psychoStim['analog_input_values'] = visual.TextStim(win=psychoWindow, 
                              text=ai_values_string_proto%ai_values,
                              pos = [0,-200], height=24, color=[1,1,0], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        psychoStim['instruction'] = visual.TextStim(win=psychoWindow, 
                              text="Press ESCAPE Key for Next Trial",
                              pos = [0,-300], height=36, color=[1,1,0.5], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        # Clear all events from the global and device level event buffers.
        self.hub.clearEvents('all')

        
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        for i in range(trial_count):        
            # Clear all events from the global and device level event buffers.
            psychoStim['title'].setText("Analog Input Test. Trial %d of %d"%(i+1,trial_count))
            self.hub.clearEvents('all')
            
            #start streamin AnalogInput data        
            ain.enableEventReporting(True)
            
            QUIT_TRIAL=False
            
            while QUIT_TRIAL is False:
    
                # for each loop, update the grating phase
                psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle
    
                # update analog input values to display
                analog_input_events=ain.getEvents()
                if analog_input_events:
                    event_count=len(analog_input_events)
                    event=analog_input_events[-1]
                    ai_values=(event.AI_0,event.AI_1,event.AI_2,event.AI_3,
                               event.AI_4,event.AI_5,event.AI_6,event.AI_7)
                    psychoStim['analog_input_values'].setText(ai_values_string_proto%ai_values)
    
                # redraw the stim
                [psychoStim[stimName].draw() for stimName in psychoStim]
    
                # flip the psychopy window buffers, so the stim changes you just made get displayed.
                psychoWindow.flip()
                # it is on this side of the call that you know the changes have been displayed, so you can
                # make a call to the ioHub time method and get the time of the flip, as the built in
                # time methods represent both experiment process and ioHub server process time.
                # Most times in ioHub are represented sec.msec format to match that of Psychopy.
                flip_time=Computer.currentSec()
    
                # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
                # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
                self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)
        
                # for each new keyboard char event, check if it matches one of the end example keys.
                for k in kb.getEvents(EventConstants.KEYBOARD_CHAR):
                    if k.key in ['ESCAPE', ]:
                        print 'Trial Quit key pressed: ',k.key,' for ',k.duration,' sec.'
                        QUIT_TRIAL=True

            
            # clear the screen
            psychoWindow.flip()
 
            # stop analog input recording
            ain.enableEventReporting(False)
                    
            # delay 1/4 second before next trial
            actualDelay=self.hub.delay(0.250)
    
        # wait 250 msec before ending the experiment
        actualDelay=self.hub.wait(0.250)
        print "Delay requested %.6f, actual delay %.6f, Diff: %.6f"%(0.250,actualDelay,actualDelay-0.250)

        # for fun, test getting a bunch of events at once, likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime=Computer.currentSec()
        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()