コード例 #1
0
    def get_valid_data_sample(self):
        """
        Get onscreen samples, and convert to degrees
        Coordinate system is centered in the middle of the screen (0,0)
        """

        # Get samples
        x = 0
        y = 0
        res, sampleData = self.et.get_sample()
        if res == 1:

            # Make sure the the samples are on the screen
            xt = sampleData.rightEye.gazeX
            yt = sampleData.rightEye.gazeY

            # Make sure the the samples are on the screen and valid
            if np.any(xt <= 0 or xt > self.screenSize[0] or yt <= 0
                      or yt > self.screenSize[1]):
                pass
            else:
                x = xt - self.screenSize[0] / 2
                y = -1 * (yt - self.screenSize[1] / 2)
                x = misc.pix2deg(x, self.mon)
                y = misc.pix2deg(y, self.mon)

        return res, x, y
コード例 #2
0
ファイル: __init__.py プロジェクト: jonathanoroberts/psychopy
 def pix2degcoord(self, x, y, display_index=None):
     if display_index == self.getIndex():
         ppx, ppy = display2psychopyPix(x, y)
         return misc.pix2deg(
             ppx, self._psychopy_monitor), misc.pix2deg(
             ppy, self._psychopy_monitor)
     return x, y
コード例 #3
0
 def pix2degcoord(self, x, y, display_index=None):
     if display_index == self.getIndex():
         ppx, ppy = display2psychopyPix(x, y)
         return misc.pix2deg(
             ppx, self._psychopy_monitor), misc.pix2deg(
                 ppy, self._psychopy_monitor)
     return x, y
コード例 #4
0
    def get_valid_data_sample(self):
        """
        Get onscreen samples, and convert to degrees
        Coordinate system is centered in the middle of the screen (0,0)
        """

        # Get samples
        x = 0
        y = 0
        res, sampleData = self.et.get_sample()
        if res == 1:

            # Make sure the the samples are on the screen
            xt = sampleData.rightEye.gazeX
            yt = sampleData.rightEye.gazeY

            # Make sure the the samples are on the screen and valid
            if np.any(xt <= 0 or xt > self.screenSize[0] or yt <= 0 or yt > self.screenSize[1]):
                pass
            else:
                x = xt - self.screenSize[0]/2
                y = -1 * (yt - self.screenSize[1]/2)
                x = misc.pix2deg(x, self.mon)
                y = misc.pix2deg(y, self.mon)

        return res, x, y
コード例 #5
0
 def calibratePoint(self, clock, last_pos, p):
     clock.reset()
     currentTime = clock.getTime()
     x_diff = p.x - last_pos.x
     y_diff = p.y - last_pos.y
     angle = degrees(atan2(y_diff, x_diff)) + 90
     self.rocket_img.setOri(angle)
     while currentTime <= 1.5:
         rel_pos = Point2D()
         rel_pos.x = last_pos.x + ((currentTime / 1.5) * (p.x - last_pos.x))
         rel_pos.y = last_pos.y + ((currentTime / 1.5) * (p.y - last_pos.y))
         self.rocket_img.setPos((pix2deg((rel_pos.x - 0.5) * self.win.size[0], self.win.monitor),
                                 pix2deg((0.5 - rel_pos.y) * self.win.size[1], self.win.monitor)))
         self.rocket_img.setSize((pix2deg(110.67 * (1.5 - currentTime) + 4, self.win.monitor),
                                  pix2deg(196 * (1.5 - currentTime) + 4, self.win.monitor)))
         psychopy.event.getKeys()
         self.rocket_img.draw()
         self.win.flip()
         currentTime = clock.getTime()
     self.add_point_completed = False
     self.eyetracker.AddCalibrationPoint(p, lambda error, r: self.on_add_completed(error, r))
     while not self.add_point_completed:
         psychopy.event.getKeys()
         self.rocket_img.draw()
         self.win.flip()
コード例 #6
0
ファイル: revcontmod.py プロジェクト: jzeitoun/pacu-src
 def __enter__(self):
     from psychopy.visual import GratingStim  # eats some time
     from psychopy.visual import TextStim
     win = self.window.instance
     self.textstim = TextStim(win, text='')
     # for some reason x, y were swapped..
     # this may happen if monitor setup was portrait mode instead of landscape.
     width, height = misc.pix2deg(win.size, win.monitor)
     if self.component.width:
         width = self.component.width
     self.instance = GratingStim(
         win=win,
         tex='sin',
         units='deg',
         size=(height, width)
         # size = misc.pix2deg(win.size, win.monitor)
     )
     tf = self.component.tfrequency
     self.contrast_factor = tf * np.pi * (2 / self.component.ct_period)
     self.opacity_factor = tf * np.pi * (2 / self.component.op_period)
     try:
         self.interval = self.window.get_isi()
     except Exception as e:
         raise ServiceRuntimeException(
             'Could not acquire window object. Please try again')
     return self
コード例 #7
0
 def get_all_samples(self, mon = None):
     # Return all values in buffer
     x = self.rb_x.get_all()
     y = self.rb_y.get_all()
     t = self.rb_t.get_all()
     
     # If a monitor object is given, convert from pixels to degrees (and center coordinate system)
     if mon:            
         screenSize = mon.getSizePix()
         # Make sure the the samples are on the screen and valid
         if np.any(x <= 0) or np.any(x > screenSize[0]) or np.any(y <= 0) or np.any(y > screenSize[1]):
             pass
         else:
             x = x - screenSize[0]/2
             y = -1 * (y - screenSize[1]/2)
             x = misc.pix2deg(x, mon)
             y = misc.pix2deg(y, mon)            
         
     return t, x, y
コード例 #8
0
ファイル: controller.py プロジェクト: jzeitoun/pacu-src
 def get_instance(self):
     from psychopy.visual import ImageStim # eats some time
     return ImageStim(
         win   = self.window,
         image = Image.new('L', (self.tex_size, self.tex_size)),
         # units = 'pix',
         # size  = self.monitor.getSizePix(),
         units = 'deg',
         size  = misc.pix2deg(self.window.size, self.window.monitor)*2,
     )
コード例 #9
0
    def get_all_samples(self, mon=None):
        # Return all values in buffer
        x = self.rb_x.get_all()
        y = self.rb_y.get_all()
        t = self.rb_t.get_all()

        # If a monitor object is given, convert from pixels to degrees (and center coordinate system)
        if mon:
            screenSize = mon.getSizePix()
            # Make sure the the samples are on the screen and valid
            if np.any(x <= 0) or np.any(x > screenSize[0]) or np.any(
                    y <= 0) or np.any(y > screenSize[1]):
                pass
            else:
                x = x - screenSize[0] / 2
                y = -1 * (y - screenSize[1] / 2)
                x = misc.pix2deg(x, mon)
                y = misc.pix2deg(y, mon)

        return t, x, y
コード例 #10
0
 def __enter__(self):
     from psychopy.visual import GratingStim  # eats some time
     from psychopy.visual import TextStim
     event.clearEvents()
     win = self.window.instance
     self.textstim = TextStim(win, text='')
     self.instance = GratingStim(
         win=win,
         units='deg',
         tex='sin',
         contrast=self.component.
         contrast,  # maybe this still could be used for initial contrast setup.
         size=misc.pix2deg(win.size, win.monitor) * 2)
     try:
         self.interval = self.window.get_isi()
     except Exception as e:
         raise ServiceRuntimeException(
             'Could not acquire window object. Please try again')
     return self
コード例 #11
0
ファイル: Settings.py プロジェクト: simkovic/animacyStimuli
 def pix2deg(self,pix):
     return pix2deg(pix,self.monitor)
コード例 #12
0
ファイル: Settings.py プロジェクト: simkovic/animacyStimuli
 def norm2deg(self,xy):
     xy=self.norm2pix(xy)
     return pix2deg(xy,self.monitor)
コード例 #13
0
from psychopy import core
from psychopy import misc
from psychopy.visual.windowwarp import Warper
from psychopy.monitors import Monitor
from psychopy.visual.grating import GratingStim
from psychopy.visual.window import Window

mon = Monitor('GenericMonitor', width=33.169, distance=10)
mon.setSizePix((1440, 900))
win = Window((1440, 900),
             monitor=mon,
             fullscr=True,
             useFBO=True,
             allowStencil=True)
warper = Warper(win, warp='spherical', warpGridsize=300, eyepoint=[0.5, 0.5])
stim = GratingStim(win=win,
                   units='deg',
                   tex='sin',
                   sf=0.1,
                   size=misc.pix2deg(win.size, win.monitor))
print win.size
print 'win size', win.size
print 'mon size', win.monitor.getSizePix()
print 'as deg', misc.pix2deg(win.size, win.monitor)
stim.draw()
win.flip()
core.wait(0.5)
win.close()
コード例 #14
0
 def getGazePosition(self, gaze):
     return ((pix2deg((gaze.LeftGazePoint2D.x - 0.5) * self.win.size[0], self.win.monitor),
              pix2deg((0.5 - gaze.LeftGazePoint2D.y) * self.win.size[1], self.win.monitor),
              pix2deg((gaze.RightGazePoint2D.x - 0.5) * self.win.size[0], self.win.monitor),
              pix2deg((0.5 - gaze.RightGazePoint2D.y) * self.win.size[1], self.win.monitor)))
コード例 #15
0
def showTrial(trajectories,maze=None,wind=None,highlightChase=False,
        origRefresh=100.0,gazeData=None,gazeDataRefresh=250.0):
    """
        shows the trial given by trajectories
        helpful for viewing trials
        trajectories - FxNx2 ndarray with coordinates of N agents
            for each of the F frames
        maze & wind - provide window and maze for drawing
        highlightChase - if True chaser and chasee are highlighted in color
        origRefresh - frame rate of the trajectory data
        gazeData & gazeDataRefresh - display gaze data (eyetracking)
    """
    
    if type(wind)==type(None):
        wind=Q.initDisplay(1000)
    core.wait(2)
    try:
        nrframes=int(trajectories.shape[0]/origRefresh*Q.refreshRate)
        cond=trajectories.shape[1]
        if gazeData!=None:
            cond+=1
            gazeData=pix2deg(gazeData,wind.monitor)
        clrs=np.ones((cond,3))
        if gazeData!=None: clrs[-1,[0,1]]=0
        if highlightChase:
            clrs[0,[0,2]]=0
            clrs[1,[1,2]]=0
        elem=visual.ElementArrayStim(wind,fieldShape='sqr',
            nElements=cond,sizes=Q.agentSize,rgbs=clrs,
            elementMask=RING,elementTex=None)
        if type(maze)!=type(None):
            maze.draw(wind)
        wind.flip()
        t0=core.getTime()
        for f in range(nrframes):
            if origRefresh!=Q.refreshRate:
                fnew=f*origRefresh/Q.refreshRate
                if round(fnew)==fnew:
                    pos=trajectories[int(fnew),:,[X,Y]].transpose()
                else: # interpolate
                    pos0=trajectories[np.floor(fnew),:,[X,Y]].transpose()
                    pos1=trajectories[np.ceil(fnew),:,[X,Y]].transpose()
                    pos=pos0+(pos1-pos0)*(fnew-np.floor(fnew))
                    #print pos0[0],pos1[0],pos[0]
            else: pos=trajectories[f,:,[X,Y]].transpose()
            if gazeData!=None:
                fnew=f*gazeDataRefresh/Q.refreshRate
                if np.ceil(fnew)>=gazeData.shape[0]:
                    break
                if round(fnew)==fnew:
                    #print int(fnew)
                    gaze=gazeData[int(fnew),:]
                else: # interpolate
                    pos0=gazeData[np.floor(fnew),:]
                    pos1=gazeData[np.ceil(fnew),:]
                    gaze=pos0+(pos1-pos0)*(fnew-np.floor(fnew))
                
                pos=np.array(np.concatenate((pos,np.matrix(gaze)),axis=0))
            showFrame(pos, wind=wind,elem=elem, highlightChase=highlightChase)
            #core.wait(0.02)
            for key in event.getKeys():
                if key in ['escape']:
                    wind.close()
                    return
                    #core.quit()
        wind.flip()
        print core.getTime() - t0
        core.wait(2)
        wind.close()
    except: 
        wind.close()
        raise
コード例 #16
0
    def doCalibration(self, calibrationPoints, calib=None):
        # Can only calibrate with eyetracker
        if self.eyetracker is None:
            return

        # Points to calibrate
        self.points = calibrationPoints
        self.point_index = -1

        # Rocket image
        self.rocket_img = psychopy.visual.ImageStim(self.win, os.path.join(DATA_DIR, 'images', 'rocket.png'))
        # Results image
        img = Image.new('RGB', self.win.size)
        draw = ImageDraw.Draw(img)
        self.calresult = psychopy.visual.SimpleImageStim(self.win, img)
        # Results message
        self.calresultmsg = psychopy.visual.TextStim(self.win, pos=(pix2deg(0, self.win.monitor),
                                                                    pix2deg(-self.win.size[1] / 4, self.win.monitor)))
        # Calibration point labels
        if calib is None:
            self.point_labels=[]

        # Start calibration instruction
        self.calresultmsg.setText('Start calibration:SPACE')
        # Left eye status
        self.left_eye_status = psychopy.visual.Circle(self.win, radius=pix2deg(40, self.win.monitor),
                                                      pos=(pix2deg(-50, self.win.monitor),
                                                           pix2deg(-self.win.size[1] / 3, self.win.monitor)))
        # Right eye status
        self.right_eye_status = psychopy.visual.Circle(self.win, radius=pix2deg(40, self.win.monitor),
                                                       pos=(pix2deg(50, self.win.monitor),
                                                            pix2deg(-self.win.size[1] / 3, self.win.monitor)))

        # Reset gaze and event data and start tracking
        self.gazeData = []
        self.eventData = []
        self.eyetracker.events.OnGazeDataReceived += self.on_gazedata
        self.eyetracker.StartTracking()

        # Wait until space key is hit
        waitkey = True
        while waitkey:
            for key in psychopy.event.getKeys():
                if key == 'space':
                    waitkey = False
            self.rocket_img.draw()
            self.calresultmsg.draw()
            self.left_eye_status.fillColor = 'red'
            self.right_eye_status.fillColor = 'red'
            if len(self.gazeData):
                if self.gazeData[-1].LeftValidity != 4:
                    self.left_eye_status.fillColor = 'green'
                if self.gazeData[-1].RightValidity != 4:
                    self.right_eye_status.fillColor = 'green'
            self.left_eye_status.draw()
            self.right_eye_status.draw()
            self.win.flip()

        # Stop tracking and reset gaze data
        self.eyetracker.StopTracking()
        self.eyetracker.events.OnGazeDataReceived -= self.on_gazedata
        self.gazeData = []
        self.eventData = []

        # Initialize calibration
        self.initcalibration_completed = False
        print "Init calibration"
        self.eyetracker.StartCalibration(lambda error, r: self.on_calib_start(error, r))
        while not self.initcalibration_completed:
            pass

        # If we're updating a calibration
        if calib is not None:
            # Set calibration
            self.setcalibration_completed=False
            self.eyetracker.SetCalibration(self.calib,lambda error, r: self.on_calib_set(error, r))
            while not self.setcalibration_completed:
                pass

        # Calibrate each point
        clock = psychopy.core.Clock()
        last_pos = Point2D(x=0.5, y=0.5)
        for self.point_index in range(len(self.points)):
            p = Point2D()
            p.x, p.y = self.points[self.point_index]
            self.calibratePoint(clock, last_pos, p)
            last_pos = Point2D(x=p.x, y=p.y)

        # Compute calibration
        self.computeCalibration_completed = False
        self.computeCalibration_succeeded = False
        self.eyetracker.ComputeCalibration(lambda error, r: self.on_calib_compute(error, r))
        while not self.computeCalibration_completed:
            pass
        # Stop calibration
        self.eyetracker.StopCalibration(None)

        self.win.flip()

        # Get calibration
        self.getcalibration_completed = False
        self.calib = self.eyetracker.GetCalibration(lambda error, calib: self.on_calib_response(error, calib))
        while not self.getcalibration_completed:
            pass

        draw.rectangle(((0, 0), tuple(self.win.size)), fill=(128, 128, 128))

        can_accept=False
        if not self.computeCalibration_succeeded:
            #computeCalibration failed.
            self.calresultmsg.setText('Not enough data was collected (Retry:r/Abort:ESC)')

        elif self.calib == None:
            #no calibration data
            self.calresultmsg.setText('No calibration data (Retry:r/Abort:ESC)')
        else:
            can_accept=True
            point_list = []
            points = {}
            for data in self.calib.plot_data:
                points[data.true_point] = {'left': data.left, 'right': data.right}
                point_list.append(data.true_point)

            if len(point_list) == 0:
                self.calresultmsg.setText('No true calibration data (Retry:r/Abort:ESC)')

            else:
                for idx,(x,y) in enumerate(self.points):
                    draw.ellipse(((x * self.win.size[0] - 10,
                                   y * self.win.size[1] - 10),
                                  (x * self.win.size[0] + 10,
                                   y * self.win.size[1] + 10)),
                                 outline=(0, 0, 0))
                    if calib is None:
                        num_txt=psychopy.visual.TextStim(self.win, pos=(pix2deg((x-0.5) * self.win.size[0] - 10, self.win.monitor),
                                                                        pix2deg((0.5-y) * self.win.size[1] - 20, self.win.monitor)))
                        num_txt.setText(str(idx+1))
                        self.point_labels.append(num_txt)
                for idx,p in enumerate(point_list):
                    d = points[p]
                    draw.ellipse(((p.x * self.win.size[0] - 10,
                                   p.y * self.win.size[1] - 10),
                                  (p.x * self.win.size[0] + 10,
                                   p.y * self.win.size[1] + 10)),
                                 outline=(0, 0, 0))
                    if d['left'].validity == 1:
                        draw.line(((p.x * self.win.size[0],
                                    p.y * self.win.size[1]),
                                   (d['left'].map_point.x * self.win.size[0],
                                    d['left'].map_point.y * self.win.size[1])), fill=(255, 0, 0))
                    if d['right'].validity == 1:
                        draw.line(((p.x * self.win.size[0],
                                    p.y * self.win.size[1]),
                                   (d['right'].map_point.x * self.win.size[0],
                                    d['right'].map_point.y * self.win.size[1])), fill=(0, 255, 0))

                self.calresultmsg.setText('Accept calibration results (Accept:a/Redo:#/Retry:r/Abort:ESC)')

            self.calresult.setImage(img)

        return can_accept
コード例 #17
0
ファイル: prfStim_Bars.py プロジェクト: gitter-badger/pyprf
# save a log file and set level for msg to be received
logFile = logging.LogFile(logFileName + '.log', level=logging.INFO)
logging.console.setLevel(logging.WARNING)  # set console to receive warnings

#  %%
"""MONITOR AND WINDOW"""
# set monitor information:
distanceMon = 99  # [99] in scanner
widthMon = 30  # [30] in scanner
PixW = 1920  # [1920.0] in scanner
PixH = 1200  # [1200.0] in scanner

moni = monitors.Monitor('testMonitor', width=widthMon, distance=distanceMon)
moni.setSizePix([PixW, PixH])  # [1920.0, 1080.0] in psychoph lab
degCover = misc.pix2deg(pixCover, moni)

# log monitor info
logFile.write('MonitorDistance=' + unicode(distanceMon) + 'cm' + '\n')
logFile.write('MonitorWidth=' + unicode(widthMon) + 'cm' + '\n')
logFile.write('PixelWidth=' + unicode(PixW) + '\n')
logFile.write('PixelHeight=' + unicode(PixH) + '\n')

# set screen:
# for psychoph lab: make 'fullscr = True', set size =(1920, 1080)
myWin = visual.Window(
    size=(PixW, PixH),
    screen=0,
    winType='pyglet',  # winType : None, ‘pyglet’, ‘pygame’
    allowGUI=False,
    allowStencil=True,
コード例 #18
0
ファイル: Tools.py プロジェクト: simkovic/animacyStimuli
def showTrial(trajectories,qsettings,wind=None,highlightChase=False,
        origRefresh=None,gazeData=None,gazeDataRefresh=250.0):
    """
        shows the trial as given by TRAJECTORIES
    """
    Q=qsettings
    if wind is None:  wind=Q.initDisplay(1000)
    core.wait(2)
    try:
        if origRefresh is None: origRefresh=Q.refreshRate
        nrframes=int(trajectories.shape[0]/origRefresh*Q.refreshRate)
        cond=trajectories.shape[1]
        if gazeData!=None:
            cond+=1
            gazeData=pix2deg(gazeData,wind.monitor)
        clrs=np.ones((cond,3))
        if gazeData!=None: clrs[-1,[0,1]]=0
        if highlightChase:
            clrs[0,[0,2]]=0
            clrs[1,[1,2]]=0
        elem=visual.ElementArrayStim(wind,fieldShape='sqr',
            nElements=cond,sizes=Q.agentSize,colors=clrs,
            elementMask='circle',elementTex=None,interpolate=False)
        #Q.maze.draw(wind)
        wind.flip()
        t0=core.getTime()
        for f in range(nrframes):
            if origRefresh!=Q.refreshRate:
                fnew=f*origRefresh/Q.refreshRate
                if round(fnew)==fnew:
                    pos=trajectories[int(fnew),:,[X,Y]].transpose()
                else: # interpolate
                    pos0=trajectories[np.floor(fnew),:,[X,Y]].transpose()
                    pos1=trajectories[np.ceil(fnew),:,[X,Y]].transpose()
                    pos=pos0+(pos1-pos0)*(fnew-np.floor(fnew))
                    #print pos0[0],pos1[0],pos[0]
            else: pos=trajectories[f,:,[X,Y]].transpose()
            if gazeData!=None:
                fnew=f*gazeDataRefresh/Q.refreshRate
                if np.ceil(fnew)>=gazeData.shape[0]:
                    break
                if round(fnew)==fnew:
                    #print int(fnew)
                    gaze=gazeData[int(fnew),:]
                else: # interpolate
                    pos0=gazeData[np.floor(fnew),:]
                    pos1=gazeData[np.ceil(fnew),:]
                    gaze=pos0+(pos1-pos0)*(fnew-np.floor(fnew))
                
                pos=np.array(np.concatenate((pos,np.matrix(gaze)),axis=0))
            showFrame(pos, wind=wind,elem=elem)
            #core.wait(0.02)
            for key in event.getKeys():
                if key in ['escape']:
                    wind.close()
                    return
                    #core.quit()
        wind.flip()
        print core.getTime() - t0
        core.wait(2)
        wind.close()
    except: 
        wind.close()
        raise