コード例 #1
0
ファイル: run.py プロジェクト: peircej/ioHub
    def spinDownTest(self):
        # OK, we have collected the number of requested getEvents, that have returned >0 events
        # so _close psychopy window
        self.psychoWindow.close()

        # disable high priority in both processes
        Computer.disableHighPriority()
コード例 #2
0
ファイル: client.py プロジェクト: sckim/ioHub
    def _calculateClientServerTimeOffset(self, sampleSize=100):
        """
        Calculates 'sampleSize' experimentTime and ioHub Server time process offsets by calling currentMsec localally
        and via IPC to the ioHub server process repeatedly, as ewell as calculating the round trip time it took to get the server process time in each case.
        Puts the 'sampleSize' salculates in a 2D numpy array, index [i][0] = server_time - local_time offset, index [i][1]
        = local_time after call to server_time - local_time before call to server_time.

        In Windows, since direct QPC implementation is used, offset should == delay to within 100 usec or so.
        """
        results = N.zeros((sampleSize, 2), dtype='f4')
        for i in xrange(
                sampleSize
        ):  # make multiple calles to local and ioHub times to calculate 'offset' and 'delay' in calling the ioHub server time
            tc = Computer.currentMsec()  # get the local time 1
            ts = self.currentMsec(
            )  # get the ioHub server time (this results in a RPC call and response from the server)
            tc2 = Computer.currentMsec(
            )  # get local time 2, to calculate e2e delay for the ioHub server time call
            results[i][
                0] = ts - tc  # calculate time difference between returned iohub server time, and 1 read local time (offset)
            results[i][
                1] = tc2 - tc  # calculate delay / duration it took to make the call to the ioHub server and get reply
            time.sleep(0.001)  # sleep for a little before going next loop
        #print N.min(results,axis=0) ,N.max(results,axis=0) , N.average(results,axis=0), N.std(results,axis=0)
        return results
コード例 #3
0
ファイル: psychopyIOHubRuntime.py プロジェクト: peircej/ioHub
 def _setInitialProcessAffinities(self,ioHubInfo):
         # set process affinities based on config file settings
         cpus=range(Computer.processingUnitCount)
         experiment_process_affinity=cpus
         other_process_affinity=cpus
         iohub_process_affinity=cpus
 
         experiment_process_affinity=self.configuration.get('process_affinity',[])
         if len(experiment_process_affinity) == 0:
             experiment_process_affinity=cpus
                 
         other_process_affinity=self.configuration.get('remaining_processes_affinity',[])
         if len(other_process_affinity) == 0:
             other_process_affinity=cpus
         
         iohub_process_affinity=ioHubInfo.get('process_affinity',[])
         if len(iohub_process_affinity) == 0:
             iohub_process_affinity=cpus
 
         if len(experiment_process_affinity) < len(cpus) and len(iohub_process_affinity) < len(cpus):
             Computer.setProcessAffinities(experiment_process_affinity,iohub_process_affinity)
 
         if len(other_process_affinity) < len(cpus):
             ignore=[Computer.currentProcessID,Computer.ioHubServerProcessID]
             Computer.setAllOtherProcessesAffinity(other_process_affinity,ignore)
コード例 #4
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        psychopy code is taken from an example psychopy script in the coder documentation.
        """

        #report process affinities
        print "Current process affinities (experiment proc, ioHub proc):", Computer.getProcessAffinities()

        # create 'shortcuts' to the devices of interest for this experiment
        self.mouse=self.hub.devices.mouse
        self.kb=self.hub.devices.kb
        self.expRuntime=self.hub.devices.experimentRuntime
        self.display=self.hub.devices.display


        # let's print out the public method names for each device type for fun.
        #print "ExperimentPCkeyboard methods:",self.kb.getDeviceInterface()
        #print "ExperimentPCmouse methods:",self.mouse.getDeviceInterface()
        #print "ExperimentRuntime methods:",self.expRuntime.getDeviceInterface()
        #print "Display methods:",self.display.getDeviceInterface()

        # create fullscreen pyglet window at current resolution, as well as required resources / drawings
        self.createPsychoGraphicsWindow()

        # create stats numpy arrays, set experiment process to high priority.
        self.initStats()

        # enable high priority mode for the experiment process
        Computer.enableHighPriority()

        #draw and flip to the updated graphics state.
        ifi=self.drawAndFlipPsychoWindow()

        # START TEST LOOP >>>>>>>>>>>>>>>>>>>>>>>>>>

        while self.numEventRequests < self.totalEventRequestsForTest:
            # send an Experiment Event to the ioHub server process
            self.hub.sendMessageEvent("This is a test message %.3f"%self.flipTime)

            # check for any new events from any of the devices, and return the events list and the time it took to
            # request the events and receive the reply
            self.events,callDuration=self.checkForEvents()
            if self.events:
                # events were available
                self.updateStats(self.events, callDuration, ifi)
                #draw and flip to the updated graphics state.

            ifi=self.drawAndFlipPsychoWindow()

        # END TEST LOOP <<<<<<<<<<<<<<<<<<<<<<<<<<

        # close necessary files / objects, disable high priority.
        self.spinDownTest()

        # plot collected delay and retrace detection results.
        self.plotResults()
コード例 #5
0
ファイル: run.py プロジェクト: peircej/ioHub
 def checkForEvents(self):
     # get the time we request events from the ioHub
     stime=Computer.currentTime()
     r = self.hub.getEvents()
     if r and len(r) > 0:
         # so there were events returned in the request, so include this getEvent request in the tally
         etime=Computer.currentTime()
         dur=etime-stime
         return r, dur*1000.0
     return None,None
コード例 #6
0
ファイル: linux2.py プロジェクト: peircej/ioHub
    def _nativeEventCallback(self,event):
        try:
           if self.isReportingEvents():
                logged_time=currentSec()
                
                event_array=event[0]
                event_array[3]=Computer._getNextEventID()
                
                display_index=self._display_device.getIndex()                
                x,y=self._display_device.pixel2DisplayCoord(event_array[15],event_array[16],display_index)  
                event_array[15]=x
                event_array[16]=y
                
                self._lastPosition=self._position
                self._position=x,y

                self._last_display_index=self._display_index
                self._display_index=display_index
                
                self._addNativeEventToBuffer(event_array)
                
                self._last_callback_time=logged_time
        except:
            ioHub.printExceptionDetailsToStdErr()
        
        # Must return original event or no mouse events will get to OSX!
        return 1
コード例 #7
0
ファイル: client.py プロジェクト: awood3/ioHub
    def _calculateClientServerTimeOffset(self, sampleSize=100):
        """
        Calculates 'sampleSize' experimentTime and ioHub Server time process offsets by calling currentMsec localally
        and via IPC to the ioHub server process repeatedly, as ewell as calculating the round trip time it took to get the server process time in each case.
        Puts the 'sampleSize' salculates in a 2D numpy array, index [i][0] = server_time - local_time offset, index [i][1]
        = local_time after call to server_time - local_time before call to server_time.

        In Windows, since direct QPC implementation is used, offset should == delay to within 100 usec or so.
        """
        results=N.zeros((sampleSize,2),dtype='f4')
        for i in xrange(sampleSize):     # make multiple calles to local and ioHub times to calculate 'offset' and 'delay' in calling the ioHub server time
            tc=Computer.currentMsec()    # get the local time 1
            ts=self.currentMsec()        # get the ioHub server time (this results in a RPC call and response from the server)
            tc2=Computer.currentMsec()   # get local time 2, to calculate e2e delay for the ioHub server time call
            results[i][0]=ts-tc          # calculate time difference between returned iohub server time, and 1 read local time (offset)
            results[i][1]=tc2-tc         # calculate delay / duration it took to make the call to the ioHub server and get reply
            time.sleep(0.001)            # sleep for a little before going next loop
        #print N.min(results,axis=0) ,N.max(results,axis=0) , N.average(results,axis=0), N.std(results,axis=0)
        return results
コード例 #8
0
ファイル: tobiiclasses.py プロジェクト: peircej/ioHub
 def findDevice(cls, model=None, product_id=None, timeout=10.0):
     tracker_info=None
     
     # check existing detected devices
     matching_tracker_infos=[tracker_info for tracker_info in cls.getDetectedTrackerList() if cls._checkForMatch(tracker_info,model,product_id) is True]
     if matching_tracker_infos:
         return matching_tracker_infos[0]
         
     started_browsing_time=Computer.getTime()        
     while(Computer.getTime()-started_browsing_time < timeout):
         try:
             tb_event=TobiiTrackerBrowser.getNextEvent(timeout=0.05)
             if tb_event is None:
                 break
             if isinstance(tb_event, TrackerFoundEvent):
                 tracker_info=tb_event.tracker_info
                 if TobiiTrackerBrowser._checkForMatch(tracker_info,model,product_id) is True:
                     return tracker_info
         except Queue.Empty:
             pass
コード例 #9
0
ファイル: run.py プロジェクト: peircej/ioHub
 def printApplicationStatus(self):
     print ''
     print 'Headless ioHub Status:'
     if self.eyetracker:
         print '\tRunning Time: %.3f seconds.'%(Computer.getTime()-self.app_start_time)
         print '\tRecording Eye Data: ',self.eyetracker.isRecordingEnabled()
         print '\tPrinting Eye Events: ',self.print_eye_event_stream
         print '\tPrinting Mouse Events: ',self.print_mouse_event_stream
         print '\tPrinting Keyboard Events: ',self.print_keyboard_event_stream
         print '\tPrinting Gaze Position: ',self.print_current_gaze_pos
     print ''
コード例 #10
0
ファイル: visualStimUtil.py プロジェクト: peircej/ioHub
    def getPos(self):
        t=0.0
        if self.lastPositionTime:

            nextFlipTimeEstimate=self.lastPositionTime+self.reportedRetraceInterval
            while nextFlipTimeEstimate < Computer.getTime():
                nextFlipTimeEstimate+=self.reportedRetraceInterval
            self.nextFlipTimeEstimate=nextFlipTimeEstimate

            t=nextFlipTimeEstimate-self.startTime

        self.pos=(self.amplX*cos(self.wX*t + self.phaseX),
                  self.amplY*sin(self.wY*t + self.phaseY))

        return self.pos
コード例 #11
0
ファイル: win32.py プロジェクト: peircej/ioHub
    def _getIOHubEventObject(self,native_event_data):
        logged_time, event=native_event_data
        p = event.Position
        px=p[0]
        py=p[1]

        bstate,etype,bnum=self._mouse_event_mapper[event.Message]

        if event.Message == self.WM_MOUSEMOVE and event.ActiveButtons>0:
            etype=EventConstants.MOUSE_DRAG

        confidence_interval=0.0
        delay=0.0

        # From MSDN: http://msdn.microsoft.com/en-us/library/windows/desktop/ms644939(v=vs.85).aspx
        # The time is a long integer that specifies the elapsed time, in milliseconds, from the time the system was started to the time the message was 
        # created (that is, placed in the thread's message queue).REMARKS: The return value from the GetMessageTime function does not necessarily increase
        # between subsequent messages, because the value wraps to zero if the timer count exceeds the maximum value for a long integer. To calculate time
        # delays between messages, verify that the time of the second message is greater than the time of the first message; then, subtract the time of the
        # first message from the time of the second message.
        device_time = event.Time/1000.0 # convert to sec
        
        hubTime = logged_time

        r= [0,
            0,
            0, #device id
            Computer._getNextEventID(),
            etype,
            device_time,
            logged_time,
            hubTime,
            confidence_interval, 
            delay,
            0, 
            event.DisplayIndex, 
            bstate, 
            bnum,
            event.ActiveButtons,
            px, 
            py,
            0, #scroll_dx not supported
            0, #scroll_x not supported   
            event.Wheel,
            event.WheelAbsolute,                      
            event.Window]    
        return r
コード例 #12
0
ファイル: screenState.py プロジェクト: peircej/ioHub
 def sendMessage(self, text, mtime=None):
     if mtime is None:
         mtime=Computer.currentSec()
     mtext=text
     try:
         tracker=self.experimentRuntime().getDevice('tracker')
         if tracker is not None and tracker.isConnected() is True:
             mtext="%s : tracker_time [%.6f]"%(mtext, tracker.trackerSec())
             tracker.sendMessage(mtext)
         else:
             print '----------------------'
             print 'Warning: eyetracker is not connected.'
             print 'Msg not sent to eyetracker datafile: '
             print mtext
             print '----------------------'
     except:
         pass
     self.experimentRuntime().hub.sendMessageEvent(mtext,sec_time=mtime)
コード例 #13
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        """
        self.keyboard=self.devices.kb
        self.display=self.devices.display
        self.mouse=self.devices.mouse
        
        self.eyetracker=self.devices.tracker
        
            
        self.print_eye_event_stream=False
        self.print_current_gaze_pos=False
        self.print_keyboard_event_stream=False
        self.print_mouse_event_stream=False
        
        self.app_start_time=Computer.getTime()
        # Loop until we get a keyboard event with the space, Enter (Return), 
        # or Escape key is pressed.
        
        self.printCommandOptions()
        self.printApplicationStatus()
        
        while 1:
            
             # if 'an event handler returns True, quit the program         
            if self.handleEyeTrackerEvents():
                break
            if self.handleKeyboardEvents():
                break
            if self.handleMouseEvents():
                break

            if self.eyetracker:
                self.printGazePosition()
                
            # discard any event we do not need from the online event queues            
            self.hub.clearEvents('all')
            
            # since realtime access to events on this process does not matter in 
            # this demo, elt's sleep for 20 msec every loop
            time.sleep(0.02)
        
        self.eyetracker.setConnectionState(False)
        self.hub.clearEvents('all')
コード例 #14
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self, *args, **kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON.

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.

        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        tracker.runSetupProcedure()
        self.hub.clearEvents("all")
        self.hub.wait(0.050)

        current_gaze = [0, 0]

        # Create a psychopy window, full screen resolution, full screen mode, pix units, with no boarder, using the monitor
        # profile name 'test monitor, which is created on the fly right now by the script
        window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim = OrderedDict()
        psychoStim["grating"] = visual.PatchStim(window, mask="circle", size=75, pos=[-100, 0], sf=0.075)
        psychoStim["fixation"] = visual.PatchStim(
            window, size=25, pos=[0, 0], sf=0, color=[-1, -1, -1], colorSpace="rgb"
        )
        psychoStim["gazePosText"] = visual.TextStim(
            window,
            text=str(current_gaze),
            pos=[100, 0],
            height=48,
            color=[-1, -1, -1],
            colorSpace="rgb",
            alignHoriz="left",
            wrapWidth=300,
        )
        psychoStim["gazePos"] = visual.GratingStim(
            window, tex=None, mask="gauss", pos=current_gaze, size=(50, 50), color="purple"
        )

        [psychoStim[stimName].draw() for stimName in psychoStim]

        Computer.enableHighPriority(True)
        # self.setProcessAffinities([0,1],[2,3])

        tracker.setRecordingState(True)
        self.hub.wait(0.050)

        # Clear all events from the ioHub event buffers.
        self.hub.clearEvents("all")

        # Loop until we get a keyboard event
        while len(kb.getEvents()) == 0:

            # for each loop, update the grating phase
            psychoStim["grating"].setPhase(0.05, "+")  # advance phase by 0.05 of a cycle

            # and update the gaze contingent gaussian based on the current gaze location

            current_gaze = tracker.getLastGazePosition()
            current_gaze = int(current_gaze[0]), int(current_gaze[1])

            psychoStim["gazePos"].setPos(current_gaze)
            psychoStim["gazePosText"].setText(str(current_gaze))

            # this is short hand for looping through the psychopy stim list and redrawing each one
            # it is also efficient, may not be as user friendly as:
            # for stimName, stim in psychoStim.itervalues():
            #    stim.draw()
            # which does the same thing if you like and is probably just as efficent.
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            flip_time = window.flip()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s" % (str(current_gaze),), sec_time=flip_time)

        # a key was pressed so the loop was exited. We are clearing the event buffers to avoid an event overflow ( currently known issue)
        self.hub.clearEvents("all")

        tracker.setRecordingState(False)

        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        self.hub.wait(0.250)
        tracker.setConnectionState(False)

        # _close neccessary files / objects, 'disable high priority.
        window.close()
コード例 #15
0
ファイル: server.py プロジェクト: peircej/ioHub
 def setProcessAffinity(self, processorList):
     return Computer.setCurrentProcessAffinity(processorList)
コード例 #16
0
ファイル: darwin.py プロジェクト: peircej/ioHub
    def _nativeEventCallback(self, *args):
        try:
            proxy, etype, event, refcon = args
            if self.isReportingEvents():
                logged_time = currentSec()

                if etype == kCGEventTapDisabledByTimeout:
                    ioHub.print2err("** WARNING: Mouse Tap Disabled due to timeout. Re-enabling....: ", etype)
                    CGEventTapEnable(self._tap, True)
                    return event
                else:
                    confidence_interval = 0.0
                    delay = 0.0
                    iohub_time = logged_time
                    device_time = CGEventGetTimestamp(event) * self.DEVICE_TIME_TO_SECONDS
                    ioe_type = EventConstants.UNDEFINED
                    px, py = CGEventGetLocation(event)
                    multi_click_count = CGEventGetIntegerValueField(event, kCGMouseEventClickState)
                    mouse_event = NSEvent.eventWithCGEvent_(event)
                    window_handle = mouse_event.windowNumber()

                    # TO DO: Implement multimonitor location based on mouse location support.
                    # Currently always uses monitor index 0

                    display_index = self.getDisplayIndexForMousePosition((px, py))
                    if display_index == -1:
                        if self._last_display_index is not None:
                            display_index = self._last_display_index
                        else:
                            ioHub.print2err(
                                "!!! _nativeEventCallback error: mouse event pos {0} not in any display bounds!!!".format(
                                    event.Position
                                )
                            )
                            ioHub.print2err("!!!  -> SKIPPING EVENT")
                            ioHub.print2err("===============")
                            return event

                    result = self._validateMousePosition((px, py), display_index)
                    if result != True:
                        ioHub.print2err(
                            "!!! _validateMousePosition made ajustment: {0} to {1}".format((px, py), result)
                        )
                        nx, ny = result
                        display_index = self.getDisplayIndexForMousePosition((nx, ny))
                        ioHub.print2err(
                            "Going to Update mousePosition: {0} => {1} on D {2}".format(
                                (px, py), (ny, ny), display_index
                            )
                        )
                        px, py = nx, ny
                        self._nativeSetMousePos(px, py)

                    px, py = self._display_device.pixel2DisplayCoord(px, py, display_index)
                    self._lastPosition = self._position
                    self._position = px, py
                    self._last_display_index = self._display_index
                    self._display_index = display_index

                    # TO DO: Supported reporting scroll x info for OSX.
                    # This also suggests not having scoll up and down events and
                    # just having the one scroll event type, regardless of direction / dimension
                    scroll_dx = 0
                    scroll_dy = 0
                    button_state = 0
                    if etype in pressID:
                        button_state = MouseConstants.MOUSE_BUTTON_STATE_PRESSED
                        if multi_click_count > 1:
                            ioe_type = EventConstants.MOUSE_MULTI_CLICK
                        else:
                            ioe_type = EventConstants.MOUSE_BUTTON_PRESS
                    elif etype in releaseID:
                        button_state = MouseConstants.MOUSE_BUTTON_STATE_RELEASED
                        ioe_type = EventConstants.MOUSE_BUTTON_RELEASE
                    elif etype in dragID:
                        ioe_type = EventConstants.MOUSE_DRAG
                    elif etype == kCGEventMouseMoved:
                        ioe_type = EventConstants.MOUSE_MOVE
                    elif etype == kCGEventScrollWheel:
                        ioe_type = EventConstants.MOUSE_SCROLL
                        scroll_dy = CGEventGetIntegerValueField(event, kCGScrollWheelEventPointDeltaAxis1)
                        scroll_dx = CGEventGetIntegerValueField(event, kCGScrollWheelEventPointDeltaAxis2)
                        self._scrollPositionX += scroll_dx
                        self._scrollPositionY += scroll_dy

                    iohub_button_id = self._IOHUB_BUTTON_ID_MAPPINGS.get(etype, 0)

                    if iohub_button_id in self.activeButtons:
                        self.activeButtons[iohub_button_id] = int(
                            button_state == MouseConstants.MOUSE_BUTTON_STATE_PRESSED
                        )

                    pressed_buttons = 0
                    for k, v in self.activeButtons.iteritems():
                        pressed_buttons += k * v

                    # Create Event List
                    # index 0 and 1 are session and exp. ID's
                    # index 2 is (yet to be used) device_id
                    ioe = self._EVENT_TEMPLATE_LIST
                    ioe[3] = Computer._getNextEventID()
                    ioe[4] = ioe_type  # event type code
                    ioe[5] = device_time
                    ioe[6] = logged_time
                    ioe[7] = iohub_time
                    ioe[8] = confidence_interval
                    ioe[9] = delay
                    # index 10 is filter id, not used at this time
                    ioe[11] = display_index
                    ioe[12] = button_state
                    ioe[13] = iohub_button_id
                    ioe[14] = pressed_buttons
                    ioe[15] = px
                    ioe[16] = py
                    ioe[17] = int(scroll_dx)
                    ioe[18] = int(self._scrollPositionX)
                    ioe[19] = int(scroll_dy)
                    ioe[20] = int(self._scrollPositionY)
                    ioe[21] = window_handle

                    self._addNativeEventToBuffer(copy(ioe))

                self._last_callback_time = logged_time
        except:
            ioHub.printExceptionDetailsToStdErr()
            CGEventTapEnable(self._tap, False)

        # Must return original event or no mouse events will get to OSX!
        return event
コード例 #17
0
ファイル: streamTest.py プロジェクト: peircej/ioHub
def runTest():
    dataFile = open(os.path.join(ioHub.IO_HUB_DIRECTORY,"examples","labjacktest.dat"), mode = 'w')

    Computer.enableHighPriority(False)

    # MAX_REQUESTS is the number of packets to be read.
    MAX_REQUESTS = 100
    #NUM_CHANNELS is the number of channels to read from
    NUM_CHANNELS=4

    #SCAN_RATE = Hz to scan all channels at. So NUM_CHANNELS * SCAN_RATE == SAMPLING_FREQ
    SCAN_RATES=[500,1000,1500,2000,4000,8000,10000,12000,14000]
    SCAN_RATE_GAIN=1.01
    SETTLING_FACTORS=[0,1]

    RESOLUTION_INDEX=[1,]

    d = u6.U6()
    #
    ## For applying the proper calibration to readings.
    d.getCalibrationData()
    #
    print "configuring U6 stream"
    #

    dataFile.write("scan_rate\tsettling_factor\tres_index\tread_time\tAIN_1V_E\tAIN_5V_I\tAIN_9V_E\tAIN_GND_I\n")

    for scan_rate in SCAN_RATES:
        for settling_factor in SETTLING_FACTORS:
            for res_index in RESOLUTION_INDEX:
                try:
                    d.streamConfig( NumChannels = NUM_CHANNELS, ChannelNumbers = range(NUM_CHANNELS),
                        ChannelOptions = [ 0, 0, 0 , 0 ], SettlingFactor = settling_factor,
                        ResolutionIndex = res_index, ScanFrequency = scan_rate*SCAN_RATE_GAIN )

                    output = cStringIO.StringIO()

                    print "started stream with scan_rate %d, settling_factor %d, res_index %d for %d packets."%(scan_rate,
                                                                                    settling_factor,res_index,MAX_REQUESTS)
                    missed = 0
                    dataCount = 0
                    packetCount = 0
                    stop=0

                    d.streamStart()
                    start = default_timer()
                    print "Start Time: ", start

                    for r in d.streamData():
                        read_time=default_timer()
                        if r is not None:
                            # Our stop condition
                            if dataCount >= MAX_REQUESTS:
                                d.streamStop()
                                print "stream stopped."
                                break

                            if r['errors'] != 0:
                                print "Error: %s ; " % r['errors'], default_timer()

                            if r['numPackets'] != d.packetsPerRequest:
                                print "----- UNDERFLOW : %s : " % r['numPackets'], ()

                            if r['missed'] != 0:
                                missed += r['missed']
                                print "+++ Missed ", r['missed']

                            try:
                                for ia in xrange(len(r['AIN0'])):
                                    output.write("%d\t%d\t%d\t%.6f\t%.9f\t%.9f\t%.9f\t%.9f\n"%(scan_rate,settling_factor,res_index,read_time,r['AIN0'][ia],r['AIN1'][ia],r['AIN2'][ia],r['AIN3'][ia]))
                            except:
                                print 'ERROR SAVING DATA:', len(r['AIN1'])
                                print "".join(i for i in traceback.format_exc())

                            #print "Average of" , len(r['AIN0']), "AIN0," , len(r['AIN1']) , "AIN1 reading(s):", len(r['AIN2']) , "AIN2 reading(s):",  len(r['AIN3']) , "AIN3 reading(s):",
                            #print sum(r['AIN0'])/len(r['AIN0']) , "," , sum(r['AIN1'])/len(r['AIN1']), "," , sum(r['AIN2'])/len(r['AIN2']), "," , sum(r['AIN3'])/len(r['AIN3'])

                            dataCount += 1
                            packetCount += r['numPackets']
                        else:
                            # Got no data back from our read.
                            # This only happens if your stream isn't faster than the
                            # the USB read timeout, ~1 sec.
                            print "No data", default_timer()
                except:
                    print "".join(i for i in traceback.format_exc())
                finally:
                    stop = default_timer()
                    runTime = (stop-start)

                    dataFile.write(output.getvalue())
                    output.close()

                    sampleTotal = packetCount * d.streamSamplesPerPacket
                    scanTotal = sampleTotal / NUM_CHANNELS #sampleTotal / NumChannels

                    print "%s requests with %s packets per request with %s samples per packet = %s samples total." % ( dataCount, (float(packetCount) / dataCount), d.streamSamplesPerPacket, sampleTotal )
                    print "%s samples were lost due to errors." % missed
                    sampleTotal -= missed
                    print "Adjusted number of samples = %s" % sampleTotal

                    print "Scan Rate : %s scans / %s seconds = %s Hz" % ( scanTotal, runTime, float(scanTotal)/runTime )
                    print "Sample Rate : %s samples / %s seconds = %s Hz" % ( sampleTotal, runTime, float(sampleTotal)/runTime )

                    print "The condition took %s seconds." % runTime
                    print '----------------------------------------------------'
    d.close()
    dataFile.close()
コード例 #18
0
ファイル: tobiiclasses.py プロジェクト: peircej/ioHub
    
    print '###################################'
    print ''
    
    print "Test Creating a connected TobiiTracker class, using first available Tobii:"

    tobii_tracker=TobiiTracker()
    print "\tCreated a Connected Tobii Tracker OK."
    print "\tDetails:"
    for k,v in tobii_tracker.getTrackerDetails().iteritems():
        print "\t\t{0}  {1}".format(k,v)
    
    print ''
    print 'Tracker Name: ',tobii_tracker.getName()
    print 'Set Tracker Name (to "tracker [time]") ...'
    tobii_tracker.setName('tracker %.6f'%Computer.getTime())
    print 'Tracker Name now: ',tobii_tracker.getName()

    print ''
    print 'Tracker Head Movement Box: ',tobii_tracker.getHeadBox()

    print ''
    print 'Tracker Physical Placement: ',tobii_tracker.getEyeTrackerPhysicalPlacement()

    print ''
    print 'Tracker Enabled Extensions: ',tobii_tracker.getEnabledExtensions()

    print ''
    print 'Tracker Available Extensions: ',tobii_tracker.getAvailableExtensions()

    print ''
コード例 #19
0
    def runBlockSet(self, blockSet):
        # using getDevice() returns None if the device is not found,
        tracker = self.hub.getDevice("tracker")

        daq = self.hub.getDevice("daq")

        # using self.devices.xxxxx raises an exception if the
        # device is not present
        kb = self.devices.kb
        display = self.devices.display

        # for each block in the group of blocks.....
        for trialSet in blockSet.getNextConditionSet():
            # if an eye tracker is connected,
            if tracker:
                self.instructionScreen.setTimeout(30 * 60.0)  # 30 minute timeout, long enough for a break if needed.
                dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": ["RETURN", "ESCAPE"]})
                self.instructionScreen.setEventTriggers(dtrigger)
                self.instructionScreen.setText(
                    "Press 'Enter' to go to eye tracker Calibration mode.\n\nTo skip calibration and start Data Recording press 'Escape'"
                )
                flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="CALIBRATION_SELECT")
                if event and event.key == "RETURN":
                    runEyeTrackerSetupAndCalibration(tracker, self.window)
                elif event and event.key == "ESCAPE":
                    print "** Calibration stage skipped for block ", blockSet.getCurrentConditionSetIteration()
                else:
                    print "** Time out occurred. Entering calibration mode to play it safe. ;)"
                    runEyeTrackerSetupAndCalibration(tracker, self.window)

            dres = display.getPixelResolution()
            # right now, target positions are automatically generated based on point grid size, screen size, and a scaling factor (a gain).
            TARGET_POSITIONS = generatedPointGrid(
                dres[0], dres[1], self.HORZ_SCALING, self.VERT_SCALING, self.HORZ_POS_COUNT, self.VERT_POS_COUNT
            )

            # indexes to display the condition variable order in start out 'non' randomized.
            RAND_INDEXES = np.arange(TARGET_POSITIONS.shape[0])

            # if conditionVariablesProvider was told to randomize trials, then randomize trial index access list.
            if self.conditionVariablesProvider.randomizeTrials is True:
                self.hub.sendMessageEvent(
                    "RAND SEED = {0}".format(ExperimentVariableProvider._randomGeneratorSeed),
                    sec_time=ExperimentVariableProvider._randomGeneratorSeed / 1000.0,
                )
                np.random.shuffle(RAND_INDEXES)

            dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_PRESS, {"key": "SPACE"})
            self.instructionScreen.setEventTriggers(dtrigger)
            self.instructionScreen.setText(
                "Press 'Space' key when Ready to Start Block %d" % (blockSet.getCurrentConditionSetIteration())
            )
            flip_time, time_since_flip, event = self.instructionScreen.switchTo(msg="BLOCK_START")

            # enable high priority for the experiment process only. Not sure this is necessary, or a good idea,
            # based on tests so far frankly. Running at std priority seems to usually be just fine.
            Computer.enableRealTimePriority(True)

            # if we have a tracker, start recording.......
            if tracker:
                tracker.setRecordingState(True)

            # delay a short time to let " the data start flow'in "
            self.hub.wait(0.050)

            # In this paradigm, each 'trial' is the movement from one target location to another.
            # Recording of eye data is on for the whole block of XxY target positions within the block.
            # A rough outline of the runtime / data collection portion of a block is as follows:
            #      a) Start each block with the target at screen center.
            #      b) Wait sec.msec duration after showing the target [ column PRE_POS_CHANGE_INTERVAL ] in excel file
            #      c) Then schedule move of target to next target position at the time of the next retrace.
            #      d) Once the Target has moved to the 2nd position for the trial, wait PRE_COLOR_CHANGE_INTERVAL
            #         sec.msec before 'possibly changing the color of the center of the target. The new color is
            #         determined by the FP_INNER_COLOR2 column. If no color change is wanted, simply make this color
            #         equal to the color of the target center in column FP_INNER_COLOR for that row of the spreadsheet.
            #      e) Once the target has been redrawn (either with or without a color change, it stays in position for
            #         another POST_COLOR_CHANGE_INTERVAL sec.msec. Since ioHub is being used, all keyboard activity
            #         is being recorded to the ioDataStore file, so there is no need really to 'monitor' for
            #         the participants key presses, since we do not use it for feedback. It can be retrieved from the
            #         data file for analysis post hoc.
            #      f) After the POST_COLOR_CHANGE_INTERVAL, the current 'trial' officially ends, and the next trial
            #         starts, with the target remaining in the position it was at in the end of the last trial, but
            #         with the target center color switching to FP_INNER_COLOR.
            #      g) Then the sequence from b) starts again for the number of target positions in the block
            #        (49 currently).
            #

            self.hub.clearEvents("all")

            self._TRIAL_STATE = None
            self.targetScreen.nextAreaOfInterest = None

            for trial in trialSet.getNextConditionSet():
                currentTrialIndex = trialSet.getCurrentConditionSetIndex()

                nextTargetPosition = TARGET_POSITIONS[currentTrialIndex]
                trial["FP_X"] = nextTargetPosition[0]
                trial["FP_Y"] = nextTargetPosition[1]

                ppd_x, ppd_y = self.devices.display.getPixelsPerDegree()

                fp_outer_radius = int(trial["FP_OUTER_RADIUS"] * ppd_x), int(trial["FP_OUTER_RADIUS"] * ppd_y)
                fp_inner_radius = int(trial["FP_INNER_RADIUS"] * ppd_x), int(trial["FP_INNER_RADIUS"] * ppd_y)

                self.targetScreen.setScreenColor(tuple(trial["SCREEN_COLOR"]))
                self.targetScreen.setTargetOuterColor(tuple(trial["FP_OUTER_COLOR"]))
                self.targetScreen.setTargetInnerColor(tuple(trial["FP_INNER_COLOR"]))
                self.targetScreen.setTargetOuterSize(fp_outer_radius)
                self.targetScreen.setTargetInnerSize(fp_inner_radius)

                self.hub.clearEvents("kb")

                self.targetScreen.setTimeout(trial["PRE_POS_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_PRE_POS_CHANGE_KEY"
                target_pos1_color1_time, time_since_flip, event = self.targetScreen.switchTo(
                    msg="TRIAL_TARGET_INITIAL_COLOR"
                )

                self.targetScreen.setTargetPosition(nextTargetPosition)
                self.targetScreen.setTimeout(trial["PRE_COLOR_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_POST_POS_CHANGE_KEY"

                # create a 3 degree circular region (1.5 degree radius) around the next target position
                # for use as out invisible boundary
                self.targetScreen.nextAreaOfInterest = Point(*nextTargetPosition).buffer(((ppd_x + ppd_y) / 2.0) * 1.5)

                target_pos2_color1_time, time_since_flip, event = self.targetScreen.switchTo(msg="TRIAL_TARGET_MOVE")

                self.targetScreen.setTargetInnerColor(tuple(trial["FP_INNER_COLOR2"]))
                self.targetScreen.setTimeout(trial["POST_COLOR_CHANGE_INTERVAL"])
                self._TRIAL_STATE = trial, "FIRST_POST_COLOR_CHANGE_KEY"
                target_pos2_color2_time, time_since_flip, event = self.targetScreen.switchTo(
                    msg="TRIAL_TARGET_COLOR_TWO"
                )

                # end of 'trial sequence'
                # send condition variables used / populated to ioDataStore
                toSend = [self.hub.experimentSessionID, trialSet.getCurrentConditionSetIteration()]
                trial["TSTART_TIME"] = target_pos1_color1_time
                trial["APPROX_TEND_TIME"] = target_pos2_color2_time + time_since_flip
                trial["target_pos1_color1_time"] = target_pos1_color1_time
                trial["target_pos2_color1_time"] = target_pos2_color1_time
                trial["target_pos2_color2_time"] = target_pos2_color2_time

                if self.targetScreen.aoiTriggeredID:
                    trial["VOG_SAMPLE_ID_AOI_TRIGGER"] = self.targetScreen.aoiTriggeredID
                    trial["VOG_SAMPLE_TIME_AOI_TRIGGER"] = self.targetScreen.aoiTriggeredTime
                if self.targetScreen.aoiBestGaze:
                    trial["BEST_GAZE_X"] = self.targetScreen.aoiBestGaze[0]
                    trial["BEST_GAZE_Y"] = self.targetScreen.aoiBestGaze[1]

                self._TRIAL_STATE = None
                if self.targetScreen.nextAreaOfInterest:
                    del self.targetScreen.nextAreaOfInterest
                    self.targetScreen.nextAreaOfInterest = None

                toSend.extend(trial.tolist())
                self.hub.addRowToConditionVariableTable(toSend)

            # end of block of trials, clear screen
            self.clearScreen.flip(text="BLOCK_END")

            self._TRIAL_STATE = None

            # if tracking eye position, turn off eye tracking.
            if tracker:
                tracker.setRecordingState(False)
            if daq:
                daq.enableEventReporting(False)

            # turn off high priority so python GC can clean up if it needs to.
            Computer.disableHighPriority()

            # give a 100 msec delay before starting next block
            self.hub.wait(0.100)

        # end of block set, return from method.
        self.clearScreen.flip(text="BLOCK_SET_END")
        return True
コード例 #20
0
 def currentSec(self):
     return Computer.currentSec()
コード例 #21
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self, *args, **kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        tracker = self.hub.devices.tracker
        display = self.hub.devices.display
        kb = self.hub.devices.kb
        mouse = self.hub.devices.mouse

        calibrationOK = tracker.runSetupProcedure()
        if calibrationOK is False:
            print "NOTE: Exiting application due to failed calibration."
            return

        # Create a psychopy window, full screen resolution, full screen mode...
        self.window = FullScreenWindow(display)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        image_name = "./images/party.png"
        imageStim = visual.ImageStim(self.window, image=image_name, name="image_stim")
        gaze_dot = visual.GratingStim(
            self.window, tex=None, mask="gauss", pos=(-2000, -2000), size=(100, 100), color="green"
        )

        # create screen states

        # screen state that can be used to just clear the screen to blank.
        self.clearScreen = ClearScreen(self)
        self.clearScreen.setScreenColor((128, 128, 128))

        self.clearScreen.flip(text="EXPERIMENT_INIT")

        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO START")
        self.clearScreen.sendMessage("ioHub Experiment started {0}".format(getCurrentDateTimeString()))
        self.clearScreen.sendMessage(
            "Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID, self.hub.experimentSessionID)
        )
        self.clearScreen.sendMessage(
            "Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(
                display.getIndex(), display.getPixelResolution(), display.getCoordinateType()
            )
        )
        self.clearScreen.sendMessage("Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
        self.clearScreen.sendMessage("IO_HUB EXPERIMENT_INFO END")

        # Screen for showing text and waiting for a keyboard response or something
        instuction_text = "Press Space Key".center(32) + "\n" + "to Start Experiment.".center(32)
        dtrigger = DeviceEventTrigger(kb, EventConstants.KEYBOARD_CHAR, {"key": "SPACE"})
        timeout = 5 * 60.0
        self.instructionScreen = InstructionScreen(self, instuction_text, dtrigger, timeout)
        self.instructionScreen.setScreenColor((128, 128, 128))
        # flip_time,time_since_flip,event=self.instructionScreen.switchTo("CALIBRATION_WAIT")

        self.instructionScreen.setText(instuction_text)
        self.instructionScreen.switchTo("START_EXPERIMENT_WAIT")

        tracker.setRecordingState(True)
        self.clearScreen.flip()
        self.hub.wait(0.050)

        # Clear all events from the global event buffer,
        # and from the all device level event buffers.
        self.hub.clearEvents("all")

        # Loop until we get a keyboard event
        while not kb.getEvents():
            gpos = tracker.getLastGazePosition()
            if gpos:
                gaze_dot.setPos(gpos)
                imageStim.draw()
                gaze_dot.draw()
            else:
                imageStim.draw()

            self.window.flip()
            flip_time = Computer.currentSec()
            self.hub.sendMessageEvent("SYNCTIME %s" % (image_name,), sec_time=flip_time)

        self.hub.clearEvents("all")

        # A key was pressed so exit experiment.
        # Wait 250 msec before ending the experiment
        # (makes it feel less abrupt after you press the key to quit IMO)
        self.hub.wait(0.250)

        tracker.setRecordingState(False)
        tracker.setConnectionState(False)

        self.clearScreen.flip(text="EXPERIMENT_COMPLETE")
        instuction_text = (
            "Experiment Finished".center(32)
            + "\n"
            + "Press 'SPACE' to Quit.".center(32)
            + "\n"
            + "Thank You.".center(32)
        )
        self.instructionScreen.setText(instuction_text)
        self.instructionScreen.switchTo("EXPERIMENT_COMPLETE_WAIT")
コード例 #22
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
        script.py file in a standard psychopy experiment setup. That is all there is too it really.
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb
        ain=self.devices.ain
        
        # get the number of trials entered in the session dialog
        user_params=self.getSavedUserDefinedParameters()
        print 'user_params: ', user_params
        trial_count=int(user_params.get('trial_count',5))
           
        #Computer.enableHighPriority()

        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))

        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        # Create a psychopy window, full screen resolution, full screen mode
        psychoWindow = FullScreenWindow(display)
        
        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=150,pos=[0,0], sf=.075)

        psychoStim['title'] = visual.TextStim(win=psychoWindow, 
                              text="Analog Input Test. Trial 1 of %d"%(trial_count),
                              pos = [0,200], height=36, color=[1,.5,0], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        ai_values_string_proto="AI_0: %.3f\tAI_1: %.3f\tAI_2: %.3f\tAI_3: %.3f\t\nAI_4: %.3f\tAI_5: %.3f\tAI_6: %.3f\tAI_7: %.3f"
        ai_values=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
        psychoStim['analog_input_values'] = visual.TextStim(win=psychoWindow, 
                              text=ai_values_string_proto%ai_values,
                              pos = [0,-200], height=24, color=[1,1,0], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        psychoStim['instruction'] = visual.TextStim(win=psychoWindow, 
                              text="Press ESCAPE Key for Next Trial",
                              pos = [0,-300], height=36, color=[1,1,0.5], 
                              colorSpace='rgb',
                              alignHoriz='center',alignVert='center',
                              wrapWidth=800.0)

        # Clear all events from the global and device level event buffers.
        self.hub.clearEvents('all')

        
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        for i in range(trial_count):        
            # Clear all events from the global and device level event buffers.
            psychoStim['title'].setText("Analog Input Test. Trial %d of %d"%(i+1,trial_count))
            self.hub.clearEvents('all')
            
            #start streamin AnalogInput data        
            ain.enableEventReporting(True)
            
            QUIT_TRIAL=False
            
            while QUIT_TRIAL is False:
    
                # for each loop, update the grating phase
                psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle
    
                # update analog input values to display
                analog_input_events=ain.getEvents()
                if analog_input_events:
                    event_count=len(analog_input_events)
                    event=analog_input_events[-1]
                    ai_values=(event.AI_0,event.AI_1,event.AI_2,event.AI_3,
                               event.AI_4,event.AI_5,event.AI_6,event.AI_7)
                    psychoStim['analog_input_values'].setText(ai_values_string_proto%ai_values)
    
                # redraw the stim
                [psychoStim[stimName].draw() for stimName in psychoStim]
    
                # flip the psychopy window buffers, so the stim changes you just made get displayed.
                psychoWindow.flip()
                # it is on this side of the call that you know the changes have been displayed, so you can
                # make a call to the ioHub time method and get the time of the flip, as the built in
                # time methods represent both experiment process and ioHub server process time.
                # Most times in ioHub are represented sec.msec format to match that of Psychopy.
                flip_time=Computer.currentSec()
    
                # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
                # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
                self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)
        
                # for each new keyboard char event, check if it matches one of the end example keys.
                for k in kb.getEvents(EventConstants.KEYBOARD_CHAR):
                    if k.key in ['ESCAPE', ]:
                        print 'Trial Quit key pressed: ',k.key,' for ',k.duration,' sec.'
                        QUIT_TRIAL=True

            
            # clear the screen
            psychoWindow.flip()
 
            # stop analog input recording
            ain.enableEventReporting(False)
                    
            # delay 1/4 second before next trial
            actualDelay=self.hub.delay(0.250)
    
        # wait 250 msec before ending the experiment
        actualDelay=self.hub.wait(0.250)
        print "Delay requested %.6f, actual delay %.6f, Diff: %.6f"%(0.250,actualDelay,actualDelay-0.250)

        # for fun, test getting a bunch of events at once, likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime=Computer.currentSec()
        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
コード例 #23
0
ファイル: u6Noise.py プロジェクト: peircej/ioHub
"""
Name: noise.py
Intended Device: U6
Desc: An example program that will calculate the values that can be found in
      Appendix B of the U6 User's Guide.
"""
import ioHub
from ioHub.devices import Computer
import pylabjack
from pylabjack import u6 # Import the u6 class
import math # Need math for square root and log.
default_timer = ioHub.highPrecisionTimer

Computer.enableHighPriority(False)

# The size of the various ranges
ranges = [20, 2, 0.2, 0.02]

# A nice string representation of each range
strRanges = ["+/- 10", "+/- 1", "+/- 0.1", "+/- 0.01"]

# Numerical versions of range that the LabJack expects
vRanges = range(4)

def calcNoiseAndResolution(d, resolutionIndex, voltageRange):
    """
    Takes 128 readings and calculates noise and resolution
    """
    # Make a list to hold our readings
    readings = []
    
コード例 #24
0
ファイル: __init__.py プロジェクト: peircej/ioHub
 def flip(self,clearBuffer=True):
     Window.flip(self,clearBuffer)
     return Computer.getTime()
コード例 #25
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self,*args,**kwargs):
        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        print "THIS DEMO REQUIRES A CONNECTED (WIRED OR WIRELESS) XBOX 360"
        print "GAMEPAD OR OTHER XINPUT COMPATIBLE DEVICE. DEVICE ALSO NEEDS TO "
        print " BE TURNED ON. ;) "

        print ""
        print "\tPRESS 'ESCAPE' KEY TO EXIT."
        print "\tPRESS 'b' KEY TO PRINT BATTERY INFO TO STDOUT."
        print "\tPRESS 'u' KEY TO PRINT CAPABILITIES INFO TO STDOUT."
        print "\tPRESS ANY OTHER KEY TO MAKE GAMEPAD *RUMBLE* FOR 1 SEC."


        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb
        gamepad=self.devices.gamepad


        # Read the current resolution of the monitors screen in pixels.
        # We will set our window size to match the current screen resolution and make it a full screen boarderless window.
        screen_resolution= display.getPixelResolution()


        # Create psychopy full screen window using the display device config.
        psychoWindow = FullScreenWindow(display)
        
        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))

        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        mouse.setSystemCursorVisibility(False)

        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        psychoStim['fixation'] =visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        psychoStim['mouseDot'] =visual.GratingStim(psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')
        psychoStim['text'] = visual.TextStim(psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)


        # Clear all events from the global event buffer, and from the keyboard event buffer.
        self.hub.clearEvents('all')

        QUIT_EXP=False
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        while QUIT_EXP is False:

            # read gamepad events and take the last one if any exist
            gpevents=gamepad.getEvents()
            if len(gpevents)>0:
                gpevents=gpevents[-1]

                ## Display pressed buttons
                #
                psychoStim['text'].setText(str([k for k,v in gpevents.buttons.iteritems() if v is True]))
                #
                ###

                # Use 2 finger triggers for fixation square position (so it will be at bottom left hand corner of screen
                # when the triggers are not presses
                #
                fixationX=self.normalizedValue2Pixel(gpevents.leftTrigger,screen_resolution[0], 0)
                fixationY=self.normalizedValue2Pixel(gpevents.rightTrigger,screen_resolution[1], 0)
                psychoStim['fixation'].setPos((fixationX,fixationY))
                #
                #####

                # Use the Right Thumb Stick for the purple gaussian  spot position
                #

                x,y,mag=gpevents.rightThumbStick # sticks are 3 item lists (x,y,magnitude)
                currentPosition[0]=self.normalizedValue2Pixel(x*mag,screen_resolution[0], -1)
                currentPosition[1]=self.normalizedValue2Pixel(y*mag,screen_resolution[1], -1)
                psychoStim['mouseDot'].setPos(currentPosition)
                #
                ###

            # for each loop, update the grating phase
            psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle

            # redraw stim
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            psychoWindow.flip()
            # it is on this side of the call that you know the changes have been displayed, so you can
            # make a call to one of the built-in time methods and get the event time of the flip, as the built in
            # time methods represent both experiment process and ioHub server process time.
            # Most times in ioHub are represented as unsigned 64 bit integers when they are saved, so using usec
            # as a timescale is appropriate.
            flip_time=Computer.currentSec()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)


            # for each new keyboard event, check if it matches one of the end example keys.
            for k in kb.getEvents():
                # key: the string representation of the key pressed, A-Z if a-zA-Z pressed, 0-9 if 0-9 pressed ect.
                #      To get the mapping from a key_id to a key string, use
                #
                #      key_string=EventConstants.IDToName(key_event['key_id'])
                #
                # char: the ascii char for the key pressed. This field factors in if shift was also pressed or not
                #       when the char was typed, so typing a 's' == char field of 's', while typing SHIFT+s == char
                #       field of 'S'. This is in contrast to the key field, which always returns upper case values
                #       regardless of shift value. If the character pressed is not an ascii printable character,
                #       this filed will print junk, hex, or who knows what else at this point.
                if k.key in ['ESCAPE',]:
                    print 'Quit key pressed: ',k.key
                    QUIT_EXP=True
                else:
                    if k.type == EventConstants.KEYBOARD_PRESS:
                        if k.key in['B','b']:
                            bat=gamepad.updateBatteryInformation()
                            print "Bat Update: ",bat
                            bat=gamepad.getLastReadBatteryInfo()
                            print "Bat Last Read: ",bat
                        elif k.key in['U','u']:
                            bat=gamepad.updateCapabilitiesInformation()
                            print "Cap Update: ",bat
                            bat=gamepad.getLastReadCapabilitiesInfo()
                            print "Cap Last Read: ",bat
                        else:
                            # rumble the pad , 50% low frequency motor,
                            # 25% high frequency motor, for 1 second.
                            r=gamepad.setRumble(50.0,25.0,1.0)

        # wait 250 msec before ending the experiment (makes it feel less
        # abrupt after you press the key)
        self.hub.wait(0.250)

        # for fun, test getting a bunch of events at once,
        # likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime= Computer.currentSec()
        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
コード例 #26
0
ファイル: tobiiclasses.py プロジェクト: peircej/ioHub
    def __init__(self, eyetracker_info=None, product_id=None, model=None, mainloop=None, create_sync_manager=True):
        self._eyetracker_info=eyetracker_info
        self._requested_product_id=product_id
        self._requested_model=model
        self._mainloop=mainloop
        self._eyetracker=None
        self._queue=None
        self._tobiiClock = None
        self._getTobiiClockResolution=None
        self._getTobiiClockTime=None
        self._sync_manager = None
        self._syncTimeEventDeque=None
        self._isRecording=False

        if eyetracker_info is None:
            if not TobiiTrackerBrowser.isActive():
                TobiiTrackerBrowser.start()
                self._eyetracker_info=TobiiTrackerBrowser.findDevice(model,product_id)
                if self._eyetracker_info:
                    self._mainloop=TobiiTrackerBrowser.getMainLoop()
                TobiiTrackerBrowser.stop()
            else:
                self._eyetracker_info=TobiiTrackerBrowser.findDevice(model,product_id)
        
        if self._eyetracker_info is None:
            raise exceptions.BaseException("Could not find a Tobii Eye Tracker matching requirements.")
            
        if self._mainloop is None:
            if TobiiTrackerBrowser.isActive():                       
                self._mainloop=TobiiTrackerBrowser.getMainLoop()
            else:
                tobii.sdk.init()
                self._mainloop = tobii.sdk.mainloop.MainloopThread()
                self._mainloop.start()

        self._queue=Queue.Queue()
        
        tobii.sdk.eyetracker.Eyetracker.create_async(self._mainloop,self._eyetracker_info,self.on_eyetracker_created)
    
        stime=Computer.getTime()
        while Computer.getTime()-stime<10.0:
            try:            
                event=self._queue.get(block=True,timeout=.1)
                if isinstance(event,TobiiTrackerCreatedEvent):
                    self._eyetracker=event.tracker_object
                    self._eyetracker.events.OnFramerateChanged += self.on_external_framerate_change
                    self._eyetracker.events.OnHeadMovementBoxChanged += self.on_head_box_change
                    self._eyetracker.events.OnXConfigurationChanged += self.on_x_series_physical_config_change
                    
                    break
                self._queue.task_done()
            except Queue.Empty:
                pass
                
        if self._eyetracker is None:
            raise exceptions.BaseException("Could not connect to Tobii. Timeout.")
            
        if create_sync_manager:
            self._eyetracker.events.OnError += self.on_eyetracker_error
            self._tobiiClock = Clock()
            self._getTobiiClockResolution=self._tobiiClock.get_resolution
            self._getTobiiClockTime=self._tobiiClock.get_time
            self._syncTimeEventDeque=collections.deque(maxlen=32)
            self._sync_manager = sync.SyncManager(self._tobiiClock,
                                         self._eyetracker_info,
                                         self._mainloop,
                                         self.on_sync_error,
                                         self.on_sync_status)
コード例 #27
0
ファイル: server.py プロジェクト: peircej/ioHub
 def enableHighPriority(self,disable_gc=True):
     Computer.enableHighPriority(disable_gc)
コード例 #28
0
ファイル: server.py プロジェクト: peircej/ioHub
 def disableHighPriority(self):
     Computer.disableHighPriority()
コード例 #29
0
ファイル: server.py プロジェクト: peteristhegreat/ioHub
 def currentSec(self):
     return Computer.currentSec()
コード例 #30
0
ファイル: server.py プロジェクト: peircej/ioHub
 def getProcessAffinity(self):
     return Computer.getCurrentProcessAffinity()
コード例 #31
0
ファイル: run.py プロジェクト: peircej/ioHub
    def run(self,*args,**kwargs):
        """
        """

        # PLEASE REMEMBER , THE SCREEN ORIGIN IS ALWAYS IN THE CENTER OF THE SCREEN,
        # REGARDLESS OF THE COORDINATE SPACE YOU ARE RUNNING IN. THIS MEANS 0,0 IS SCREEN CENTER,
        # -x_min, -y_min is the screen bottom left
        # +x_max, +y_max is the screen top right
        #
        # *** RIGHT NOW, ONLY PIXEL COORD SPACE IS SUPPORTED. THIS WILL BE FIXED SOON. ***

        ENABLE_NOISY_MOUSE=True
     
        
        # Let's make some short-cuts to the devices we will be using in this 'experiment'.
        mouse=self.devices.mouse
        display=self.devices.display
        kb=self.devices.kb

        #Computer.enableHighPriority()
        
        # Create a psychopy window, using settings from Display device config
        psychoWindow =  FullScreenWindow(display)#,res=(500,500),fullscr=False,allowGUI=True)

        # Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
        #mouse.setSystemCursorVisibility(False)
        # Set the mouse position to 0,0, which means the 'center' of the screen.
        mouse.setPosition((0.0,0.0))
        # Read the current mouse position (should be 0,0)  ;)
        currentPosition=mouse.getPosition()

        mouse.lockMouseToDisplayID(display.getIndex())
        # Create an ordered dictionary of psychopy stimuli. An ordered dictionary is one that returns keys in the order
        # they are added, you you can use it to reference stim by a name or by 'zorder'
        psychoStim=OrderedDict()
        psychoStim['grating'] = visual.PatchStim(psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
        psychoStim['fixation'] =visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,  color=[-1,-1,-1], colorSpace='rgb')
        psychoStim['keytext'] = visual.TextStim(psychoWindow, text=u'?', pos = [100,200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['ucodetext'] = visual.TextStim(psychoWindow, text=u'?', pos = [-100,200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['mods'] = visual.TextStim(psychoWindow, text=u'?', pos = [0,-200], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',alignVert='center',wrapWidth=400.0)
        psychoStim['mouseDot'] =visual.GratingStim(psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')

        # Clear all events from the global and device level event buffers.
        self.hub.clearEvents('all')

        QUIT_EXP=False
        # Loop until we get a keyboard event with the space, Enter (Return), or Escape key is pressed.
        while QUIT_EXP is False:

            # for each loop, update the grating phase
            psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle

            # and update the mouse contingent gaussian based on the current mouse location
            mx,my=mouse.getPosition()
            if ENABLE_NOISY_MOUSE:
                mx=np.random.random_integers(mx-10,mx+10)
                my=np.random.random_integers(my-10,my+10)
            psychoStim['mouseDot'].setPos((mx,my))


            # redraw the stim
            [psychoStim[stimName].draw() for stimName in psychoStim]

            # flip the psychopy window buffers, so the stim changes you just made get displayed.
            psychoWindow.flip()
            # it is on this side of the call that you know the changes have been displayed, so you can
            # make a call to the ioHub time method and get the time of the flip, as the built in
            # time methods represent both experiment process and ioHub server process time.
            # Most times in ioHub are represented sec.msec format to match that of Psychopy.
            flip_time=Computer.currentSec()

            # send a message to the iohub with the message text that a flip occurred and what the mouse position was.
            # since we know the ioHub server time the flip occurred on, we can set that directly in the event.
            self.hub.sendMessageEvent("Flip %s"%(str(currentPosition),), sec_time=flip_time)

            # get any new keyboard char events from the keyboard device


            # for each new keyboard character event, check if it matches one of the end example keys.
            for k in kb.getEvents():
                if k.key.upper() in ['ESCAPE', ] and k.type==EventConstants.KEYBOARD_CHAR:
                    print 'Quit key pressed: ',k.key,' for ',k.duration,' sec.'
                    QUIT_EXP=True
                print u'{0}: time: {1}\t\tord: {2}.\t\tKey: [{3}]\t\tMods: {4}'.format(k.time,EventConstants.getName(k.type),k.ucode,k.key,k.modifiers)
                psychoStim['keytext'].setText(k.key)
                psychoStim['ucodetext'].setText(unichr(k.ucode))
                psychoStim['mods'].setText(str(k.modifiers))
                

             #for e in mouse.getEvents():
            #    print 'Event: ',e
                
            self.hub.clearEvents('all')
        # wait 250 msec before ending the experiment (makes it feel less abrupt after you press the key)
        actualDelay=self.hub.wait(0.250)
        print "Delay requested %.6f, actual delay %.6f, Diff: %.6f"%(0.250,actualDelay,actualDelay-0.250)

        # for fun, test getting a bunch of events at once, likely causing a mutlipacket getEvents()
        stime = Computer.currentSec()
        events=self.hub.getEvents()
        etime=Computer.currentSec()
        
        if events is None:
            events=[]

        print 'event count: ', len(events),' delay (msec): ',(etime-stime)*1000.0

        # _close neccessary files / objects, 'disable high priority.
        psychoWindow.close()
コード例 #32
0
ファイル: run.py プロジェクト: peircej/ioHub
# the default ioHub devices: Keyboard, Mouse, and Display.
# The first arg is the experiment code to use for the ioDataStore Event storage,
# the second arg is the session code to give to the current session of the 
# experiment. Session codes must be unique for a given experiment code within an
# ioDataStore hdf5 event file.
import random
io=quickStartHubServer("exp_code","sess_%d"%(random.randint(1,10000)))
        
# By default, keyboard, mouse, and display devices are created if you 
# do not pass any config info to the ioHubConnection class above.        
mouse=io.devices.mouse
display=io.devices.display
keyboard=io.devices.keyboard

# Lets switch to high priority on the psychopy process.
Computer.enableHighPriority()

# Create a psychopy window, full screen resolution, full screen mode, pix units,
# with no boarder, using the monitor default profile name used by ioHub, 
# which is created on the fly right now by the script. (ioHubDefault)
psychoWindow = visual.Window(display.getPixelResolution(), 
                             monitor=display.getPsychopyMonitorName(), 
                             units=display.getCoordinateType(), 
                             fullscr=True, 
                             allowGUI=False,
                             screen=display.getIndex())

# Hide the 'system mouse cursor' so we can display a cool gaussian mask for a mouse cursor.
mouse.setSystemCursorVisibility(False)

# Set the mouse position to 0,0, which means the 'center' of the screen.