def generateExperiment(vpn): ''' vpn - tuple of ints, each value gives the subject id ''' offs=5.875; sz=(2*offs+Q.agentSize,2*offs+Q.agentSize) quadrants=[EmptyMaze((1,1),dispSize=sz,pos=(offs,offs),lw2cwRatio=0), EmptyMaze((1,1),dispSize=sz,pos=(-offs,offs),lw2cwRatio=0), EmptyMaze((1,1),dispSize=sz,pos=(offs,-offs),lw2cwRatio=0), EmptyMaze((1,1),dispSize=sz,pos=(-offs,-offs),lw2cwRatio=0)] nrtrials=42; os.chdir('..');os.chdir('input/') for vp in vpn: vpname='vp%03d' % vp;os.mkdir(vpname);os.chdir(vpname) for trial in range(nrtrials): if vp>300 and vp<400 and vp!=350: continue trajectories=[] for k in range(len(quadrants)): traj=generateTrial(5,maze=quadrants[k], rejectionDistance=0.0) trajectories.append(traj[:,2:,:]) fn='%strial%03d'% (vpname,trial); np.save(fn,np.concatenate(trajectories,axis=1)) np.save('order%sb%d'% (vpname,0),np.random.permutation(nrtrials)) np.save('order%sb%d'% (vpname,1),np.random.permutation(nrtrials)) np.save('order%sb%d'% (vpname,2),np.random.permutation(nrtrials)) Q.save('SettingsTraj.pkl') os.chdir('..')
def generateGao10e3(vpn): ''' Experiment 3 from Gao et al. (2010) Gao, T., McCarthy, G., & Scholl, B. J. (2010). The Wolfpack Effect Perception of Animacy Irresistibly Influences Interactive Behavior. Psychological science, 21(12), 1845-1853. vpn - tuple of ints, each value gives the subject id ''' offs=5.875; sz=(2*offs+Q.agentSize,2*offs+Q.agentSize) quadrants=[EmptyMaze((1,1),dispSize=sz,pos=(offs,offs),lw2cwRatio=0), EmptyMaze((1,1),dispSize=sz,pos=(-offs,offs),lw2cwRatio=0), EmptyMaze((1,1),dispSize=sz,pos=(offs,-offs),lw2cwRatio=0), EmptyMaze((1,1),dispSize=sz,pos=(-offs,-offs),lw2cwRatio=0)] nrtrials=42; os.chdir('..');os.chdir('input/') for vp in vpn: vpname='vp%03d' % vp;os.mkdir(vpname);os.chdir(vpname) for trial in range(nrtrials): if vp>300 and vp<400 and vp!=350: continue trajectories=[] for k in range(len(quadrants)): traj=generateTrial(5,maze=quadrants[k], rejectionDistance=0.0) trajectories.append(traj[:,2:,:]) fn='%strial%03d'% (vpname,trial); np.save(fn,np.concatenate(trajectories,axis=1)) np.save('order%sb%d'% (vpname,0),np.random.permutation(nrtrials)) np.save('order%sb%d'% (vpname,1),np.random.permutation(nrtrials)) np.save('order%sb%d'% (vpname,2),np.random.permutation(nrtrials)) Q.save('SettingsTraj.pkl') os.chdir('..')
def PFinit(vp,event,suf=''): path,inpath,fp=initPath(vp,event) if event>=0: N=[50,15,8,2][event] else: N=1 dat={'N':N,'os':64,'rot':1, 'width':10,'hz':85.0,'SX':0.3,'SY':0.3,'ST':40} np.save(inpath+'stackPF.npy',range(dat['N']+1)) Q.save(inpath+'PF%s.q'%suf) f=open(inpath+'PF%s.pars'%suf,'w') pickle.dump(dat,f) f.close()
def __init__(self,of=None): # ask subject the information myDlg = gui.Dlg(title="Experiment zur Bewegungswahrnehmung",pos=Q.guiPos) myDlg.addText('VP Infos') myDlg.addField('Subject ID:',0) myDlg.addField('Block:',0) myDlg.addField('Alter:', 21) #age myDlg.addField('Geschlecht (m/w):',choices=(u'weiblich',u'maennlich'))#gender myDlg.addField(u'Händigkeit:',choices=('rechts','links'))# handedness myDlg.addField(u'Dominantes Auge:',choices=('rechts','links')) #dominant eye myDlg.addField(u'Sehschärfe: ',choices=('korrigiert','normal')) # acuity # hours per week spent at screen myDlg.addField(u'Wochenstunden vor dem Komputerbildschirm:', choices=('0','0-2','2-5','5-10','10-20','20-40','40+')) # hours per week playing computer games myDlg.addField(u'Wochenstunden Komputerspielen:', choices=('0','0-2','2-5','5-9','10-20','20+')) myDlg.addField('Starte bei Trial:', 0) # starting trial, for debug only myDlg.addField(u'Stimulus:',choices=('dart','eyes')) # shape of the stimulus myDlg.show()#show dialog and wait for OK or Cancel vpInfo = myDlg.data if myDlg.OK:#then the user pressed OK subinf = open(Q.outputPath+'vpinfo.res','a') subinf.write('%d\t%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n'% tuple(vpInfo)) subinf.close() else: print 'Experiment cancelled' return self.id=vpInfo[0] self.block=vpInfo[1] self.initTrial=vpInfo[-2] self.isDart= vpInfo[-1] == 'dart' # save settings, which we will use Q.save(Q.inputPath+'vp%03d'%self.id+Q.delim+'SettingsExp.pkl') if of==None: self.output = open(Q.outputPath+'vp%03d.res'%self.id,'a') else: self.output = open(Q.outputPath+of,'a') #init stuff self.wind=Q.initDisplay() # init text fs=1 # font size self.text1=visual.TextStim(self.wind,text='Error',wrapWidth=30,pos=[0,2]) self.text2=visual.TextStim(self.wind,text='Error',wrapWidth=30,pos=[0,0]) self.text3=visual.TextStim(self.wind, text='Error',wrapWidth=30,pos=[0,-10]) self.text1.setHeight(fs) self.text2.setHeight(fs) self.text3.setHeight(fs) self.f=0 self.permut=np.load(Q.inputPath+'vp%03d'%self.id+Q.delim +'ordervp%03db%d.npy'%(self.id,self.block)) if len(self.permut.shape)>1 and self.permut.shape[1]>1: self.data=self.permut[:,1:] self.permut=self.permut[:,0] self.nrtrials=self.permut.size
def __init__(self, gazeData,phase=1,wind=None,eyes=1): from ETData import interpRange self.gazeData=gazeData;self.eyes=eyes self.wind=wind;self.phase=phase;self.behsel=None g=self.gazeData.getGaze() self.trialDur=g.shape[0]/self.gazeData.hz*1000 step=1000/float(Q.refreshRate) tcur=np.arange(0,self.trialDur-3*step,step) self.t=tcur step=1000/float(gazeData.hz) t=np.arange(0,g.shape[0]*step,step) self.gaze=np.array((interpRange(t,g[:,1],tcur), interpRange(t,g[:,2],tcur))) self.pos=np.ones((self.gaze.shape[0],1,2))*np.nan try: if type(self.wind)==type(None): self.wind=Q.initDisplay() self.cond=1 self.gazeDataRefresh=gazeData.hz clrs=np.ones((self.cond,3)) self.elem=visual.ElementArrayStim(self.wind,fieldShape='sqr', nElements=self.cond,sizes=Q.agentSize,rgbs=clrs, elementMask='circle',elementTex=None) except: self.wind.close() raise
def PFparallel(vp,event,suf=''): ''' please run PFinit() first suf - output name suffix ''' path,inpath,fp=initPath(vp,event) E=np.load(inpath+'DG%s.npy'%suf)[:,:,:,:2] print E.shape stack=np.load(inpath+'stackPF.npy').tolist() f=open(inpath+'PF%s.pars'%suf,'r');dat=pickle.load(f);f.close() N=dat['N'] wind=Q.initDisplay() elem=visual.ElementArrayStim(wind,fieldShape='sqr', nElements=E.shape[1], sizes=Q.agentSize, elementMask=RING,elementTex=None,colors='white') while len(stack): jobid=stack.pop(0) np.save(inpath+'stackPF.npy',stack) PFextract(E,[jobid,N],wind=wind, elem=elem,inpath=inpath,suf=suf) loaded=False while not loaded: try: stack=np.load(inpath+'stackPF.npy').tolist() loaded=True except IOError: print 'IOError' core.wait(1) wind.close()
def generateBabyExperiment(vpn,nrtrials=10,blocks=1,conditions=[6,8],rd=0,pdch=None, dispSize=29,maze=None): '''my work in progress, baby experiment''' #os.chdir('..') if not pdch is None: Q.setpDirChange(pdch) os.chdir(Q.inputPath) mazes=[] Q.nrframes+= Q.refreshRate *5 print 'Generating Trajectories' for vp in vpn: vpname='vp%03d' % vp os.mkdir(vpname) os.chdir(vpname) r=[] phase=[0,1,1,2] for i in range((len(conditions)*nrtrials-len(phase))/2): if np.random.rand()>0.5: phase.extend([1,2]) else: phase.extend([2,1]) print 'phase', phase for block in range(blocks): i=0 for condition in conditions: for trial in range(nrtrials): if condition==conditions[0]: if np.random.rand()>0.5: r.extend([trial, trial+nrtrials]) else: r.extend([trial+nrtrials,trial]) trajectories=None while trajectories ==None: trajectories=generateTrial(condition, maze=EmptyMaze((1,1),dispSize=(dispSize,dispSize)),rejectionDistance=rd) #fn='%str%03dcond%02d'% (vpname,trial,conditions[order[trial]]) #fn = 'trial%03d' % trial trajectories=trajectories[(Q.refreshRate*5):] #print trajectories.shape fn='%sb%dtrial%03d'% (vpname,block,i) i+=1 print fn np.save(fn,trajectories) #r=np.random.permutation(nrtrials*len(conditions)) r=np.array(r) print r np.save('order%sb%d'% (vpname,block),r) np.save('phase%sb%d'% (vpname,block),phase) Q.save('SettingsTraj.pkl') os.chdir('..') os.chdir('..')
def generateGao10e4(vpn): ''' Experiment 4 from Gao et al. (2010) Gao, T., McCarthy, G., & Scholl, B. J. (2010). The Wolfpack Effect Perception of Animacy Irresistibly Influences Interactive Behavior. Psychological science, 21(12), 1845-1853. vpn - tuple of ints, each value gives the subject id ''' # gao10e4 settings maze=EmptyMaze((1,1),dispSize=(18,18),lw2cwRatio=0) Q.setTrialDur(8); nrtrials=90; Q.setAspeed(5.1) os.chdir('..');os.chdir('input/') for vp in vpn: vpname='vp%03d' % vp;os.mkdir(vpname);os.chdir(vpname) for trial in range(nrtrials): if vp>400 and vp<500: continue trajectories=generateTrial(12,maze=maze, rejectionDistance=0.0) fn='%strial%03d'% (vpname,trial); np.save(fn,trajectories[:,2:,:]) np.save('order%sb0'% (vpname),np.random.permutation(nrtrials)) np.save('order%sb1'% (vpname),np.random.permutation(nrtrials)) np.save('order%sb2'% (vpname),np.random.permutation(nrtrials)) Q.save('SettingsTraj.pkl') os.chdir('..')
def generateMixedExperiment(vpn,trialstotal,blocks=4,condition=14, dispSize=26,maze=None,probeTrials=False): '''my work in progress, experiment with chatch trials''' #os.chdir('..') os.chdir(Q.inputPath) mazes=[] if probeTrials: bs=range(0,blocks+1) else: bs=range(22,blocks+1) print 'Generating Trajectories' for vp in vpn: vpname='vp%03d' % vp #os.mkdir(vpname) os.chdir(vpname) Q.save('SettingsTraj.pkl') for block in bs: if block ==0: nrtrials=10 else: nrtrials=trialstotal for trial in range(nrtrials): if vp>1 and vp<10: continue if trial >= nrtrials*0.9: rd=0.0 else: rd=3.0 trajectories=None while trajectories ==None: trajectories=generateTrial(condition, maze=EmptyMaze((1,1),dispSize=(dispSize,dispSize)),rejectionDistance=rd) #fn='%str%03dcond%02d'% (vpname,trial,conditions[order[trial]]) #fn = 'trial%03d' % trial fn='%sb%dtrial%03d'% (vpname,block,trial) print fn np.save(fn,trajectories) while True:# check that more than 1 consecutive control trials do not occur r=np.random.permutation(nrtrials) r2=np.roll(np.random.permutation(nrtrials)>=nrtrials-0.1*nrtrials,1) #r3=np.roll(np.random.permutation(50)>=45,2) if not np.any(np.bitwise_and(r,r2)): break np.save('order%sb%d'% (vpname,block),r) os.chdir('..') os.chdir('..')
def traj2movie(traj,width=5,outsize=64,elem=None,wind=None,rot=2, hz=85.0,SX=0.3,SY=0.3,ST=20): ''' extracts window at position 0,0 of width WIDTH deg from trajectories and subsamples to OUTSIZExOUTSIZE pixels HZ - trajectory sampling frequency ROT - int number of rotations to output or float angle in radians SX,SY,ST - standard deviation of gaussian filter in deg,deg,ms ''' if type(wind)==type(None): close=True; wind=Q.initDisplay() else: close=False if type(elem)==type(None): elem=visual.ElementArrayStim(wind,fieldShape='sqr', nElements=traj.shape[1], sizes=Q.agentSize, elementMask=RING,elementTex=None,colors='white') try: sig=[ST/1000.0*hz] sig.append(deg2pix(SX,wind.monitor)) sig.append(deg2pix(SY,wind.monitor)) w=int(np.round(deg2pix(width,wind.monitor)/2.0)) D=np.zeros((traj.shape[0],outsize,outsize,rot),dtype=np.uint8) Ims=[] for f in range(0,traj.shape[0]): Im=position2image(traj[f,:,:],wind=wind) cx=int(Im.size[0]/2.0);cy=int(Im.size[1]/2.0) Im=Im.crop(np.int32((cx-1.5*w,cy-1.5*w,cx+1.5*w,cy+1.5*w))) Im=np.asarray(Im,dtype=np.float32) Ims.append(Im) Ims=np.array(Ims) if np.any(np.array(sig)!=0):Ims=gaussian_filter(Ims,sig) if np.any(Ims>255): print 'warning, too large' if np.any(Ims<0): print 'warning, too small' Ims=np.uint8(np.round(Ims)) for f in range(Ims.shape[0]): Im=Image.fromarray(np.array(Ims[f,:,:])) bb=int(Im.size[0]/2.0) I=Im.crop((bb-w,bb-w,bb+w,bb+w)) I=np.asarray(I.resize((outsize,outsize),Image.ANTIALIAS)) D[f,:,:,0]=I for r in range(1,rot): I2=Im.rotate(90/float(rot)*r) I2=I2.crop((bb-w,bb-w,bb+w,bb+w)) I2=np.asarray(I2.resize((outsize,outsize),Image.ANTIALIAS)) D[f,:,:,r]=I2 if close: wind.close() return D except: if close: wind.close() raise
def PFextract(E,part=[0,1],wind=None,elem=None,inpath='',suf=''): """ part[0] - current part part[1] - total number of parts in the paralel computation wind - psychopy window, elem - psychopy ElementArrayStim inpath - input path suf - output name suffix """ f=open(inpath+'PF%s.pars'%suf,'r');dat=pickle.load(f);f.close() inc=E.shape[0]/part[1] start=part[0]*inc ende=min((part[0]+1)*inc,E.shape[0]) print start,ende,E.shape os=dat['os'];rot=dat['rot'] phis=np.load(inpath+'phi%s.npy'%suf) D=np.zeros((ende-start,E.shape[1],os,os,rot),dtype=np.uint8) try: if type(wind)==type(None): close=True; wind=Q.initDisplay() else: close=False if elem==None: elem=visual.ElementArrayStim(wind,fieldShape='sqr', nElements=E.shape[1], sizes=Q.agentSize, elementMask=RING,elementTex=None,colors='white') for i in range(ende-start): phi=phis[i+start]# rotate clockwise by phi R=np.array([[np.cos(phi),np.sin(phi)], [-np.sin(phi),np.cos(phi)]]) temp=np.copy(E[i+start,:,:,:]) for a in range(14):temp[:,a,:]=R.dot(temp[:,a,:].T).T D[i,:,:,:,:]=traj2movie(temp,outsize=os, elem=elem,wind=wind,rot=rot,width=dat['width'], hz=dat['hz'],SX=dat['SX'],SY=dat['SY'],ST=dat['ST']) #from matustools.matusplotlib import ndarray2gif #outt=np.float32(D[i,:,:,:,0].T) #outt-= np.min(outt) #outt/= np.max(outt) #ndarray2gif('test%d'%i,outt) #if i==3: bla if close: wind.close() PF=np.rollaxis(D,1,5) if not oss.path.exists(inpath+'PF%s/'%suf): oss.makedirs(inpath+'PF%s/'%suf) if len(part)==2: np.save(inpath+'PF%s/PF%03d.npy'%(suf,part[0]),PF) else: np.save('PF.npy',PF) except: if close: wind.close() raise
def __init__(self,gazeData,maze=None,wind=None, highlightChase=False,phase=1,eyes=1,coderid=0): self.wind=wind self.phase=phase try: self.cond=gazeData.oldtraj.shape[1] except AttributeError: self.cond=0 self.pos=[] self.eyes=eyes self.behsel=None # determine common time intervals g=gazeData.getGaze(phase) ts=max(g[0,0], gazeData.fs[0,1]) te=min(g[-1,0],gazeData.fs[-1,1]) self.t=np.linspace(ts,te,int(round((te-ts)*Q.refreshRate/1000.0))) # put data together g=gazeData.getGaze(phase,hz=self.t) if self.cond: tr=gazeData.getTraj(hz=self.t) else: tr=np.zeros((g.shape[0],0,2))*np.nan if eyes==1: g=g[:,[7,8]];g=np.array(g,ndmin=3) else: g=np.array([g[:,[1,2]],g[:,[4,5]]]) g=np.rollaxis(g,0,2) self.pos=np.concatenate([tr,g],axis=1) try: if type(self.wind)==type(None): self.wind=Q.initDisplay() #if gazeData!=None: self.cond+=1 if eyes==2: self.cond+=1 clrs=np.ones((self.cond,3)) clrs[-1,[0,1,2]]=-1 if eyes==2: clrs[-2,[0,1,2]]=-1 if highlightChase: clrs[0,[0,2]]=0; clrs[1,[0,2]]=-1 self.elem=visual.ElementArrayStim(self.wind,fieldShape='sqr', nElements=self.cond,sizes=Q.agentSize,interpolate=False, colorSpace='rgb',elementMask='circle',elementTex=None) self.elem.setColors(clrs) if type(maze)!=type(None): self.maze=maze self.maze.draw(wind) except: self.wind.close() raise
def generateGao09e1(vpn): ''' Experiment 1 from Gao et al. (2009) Gao, T., Newman, G. E., & Scholl, B. J. (2009). The psychophysics of chasing: A case study in the perception of animacy. Cognitive psychology, 59(2), 154-179. vpn - tuple of ints, each value gives the subject id ''' # gao09e1 settings # TODO move settings to Settings.py nrtrials=15 maze=EmptyMaze((1,1),dispSize=(32,24),lw2cwRatio=0) chs=[0,60,120,180,240,300] Q.setTrialDur(10);Q.phiRange=(120,120) Q.setpDirChange([5.9,5.9,5.9]) block=0 #os.chdir('..') os.chdir('..') os.chdir('input/') for vp in vpn: vpname='vp%03d' % vp os.mkdir(vpname) os.chdir(vpname) i=0 r=np.zeros((2*6*nrtrials,2)) r[:,0]=np.random.permutation(2*6*nrtrials) for cond in range(6): for trial in range(nrtrials): Q.phiRange=(Q.phiRange[0],chs[cond]) trajectories=None while trajectories ==None: trajectories=generateTrial(5,maze=maze, rejectionDistance=5.0) #target present trial r[i,1]=cond fn='gao09e1%sb%dtrial%03d'% (vpname,block,i); np.save(fn,trajectories[:,:-1,:]);i+=1 #target absent trial r[i,1]=cond+6 fn='gao09e1%sb%dtrial%03d'% (vpname,block,i); np.save(fn,trajectories[:,1:,:]);i+=1 np.save('gao09e1order%sb%d'% (vpname,block),r) Q.save('SettingsTraj.pkl') os.chdir('..') os.chdir('..')
def position2image(positions,elem=None,wind=None): '''transforms vector of agent positions to display snapshot output format is HxW matrix of light intensity values (uint8) ''' if type(wind)==type(None): close=True; wind=Q.initDisplay() else: close=False if type(elem)==type(None): elem=visual.ElementArrayStim(wind,fieldShape='sqr', nElements=positions.shape[0], sizes=Q.agentSize, elementMask=RING,elementTex=None,colors='white') try: elem.setXYs(positions) elem.draw() wind.getMovieFrame(buffer='back') ret=wind.movieFrames[0] wind.movieFrames=[] pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT | pyglet.gl.GL_DEPTH_BUFFER_BIT) wind._defDepth=0.0 if close: wind.close() return grayscale(ret)# make grey, convert to npy except: if close: wind.close() raise
def getWind(self): '''returns the window handle ''' try: return self.wind except AttributeError: self.wind=Q.initDisplay() return self.wind
def __init__(self,gazeData,**kwargs): wind = kwargs.get('wind',None) if wind is None: wind = Q.initDisplay(sz=(1280,1100)) Trajectory.__init__(self,gazeData,wind=wind,**kwargs) self.gazeData=gazeData self.mouse = event.Mouse(True,None,self.wind) self.coderid = kwargs.get('coderid',0) try: indic=['Velocity','Acceleration','Saccade','Fixation','OL Pursuit','CL Pursuit','HEV','Tracking'] self.lim=([0,450],[-42000,42000],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1])# limit of y axis self.span=(0.9,0.9,0.6,0.6,0.6,0.6,0.6,0.6)# height of the window taken by graph self.offset=(0.1,0.1,0.2,0.2,0.2,0.2,0.2,0.2) fhandles=[self.gazeData.getVelocity,self.gazeData.getAcceleration, self.gazeData.getSaccades, self.gazeData.getFixations, self.gazeData.getOLP,self.gazeData.getCLP, self.gazeData.getHEV,self.gazeData.getTracking] self.ws=30; self.sws=150.0 # selection window size ga=[7.8337, 18.7095,-13.3941+5,13.3941+5] # graph area self.ga=ga mid=ga[0]+(ga[1]-ga[0])/2.0 inc=(ga[3]-ga[2])/float(len(indic));self.inc=inc self.spar=(0,-9.4,2) #parameters for selection tool, posx, posy, height self.apar=(0,-12.7,4.5) # parameters for agent selection tool frame=[visual.Line(self.wind,(ga[0],ga[3]),(ga[0],ga[2]),lineWidth=4.0), visual.Line(self.wind,(-ga[1],ga[3]),(ga[1],ga[3]),lineWidth=4.0), visual.Line(self.wind,(mid,ga[3]),(mid,ga[2]),lineWidth=2.0), visual.Line(self.wind,(ga[1],ga[3]),(ga[1],ga[2]),lineWidth=4.0), visual.Line(self.wind, (-ga[1],ga[2]),(ga[1],ga[2]),lineWidth=4.0), visual.Line(self.wind, (-ga[1],ga[2]),(-ga[1],ga[3]),lineWidth=4.0), visual.Rect(self.wind, width=ga[1]*2,height=self.spar[2], pos=(self.spar[0],self.spar[1]),lineWidth=4.0), visual.Rect(self.wind, width=ga[1]*2,height=self.apar[2], pos=(self.apar[0],self.apar[1]),lineWidth=4.0), visual.Line(self.wind,(0,self.spar[1]+self.spar[2]/2.0),(0,self.apar[1]-self.apar[2]/2.0),lineWidth=2.0) ] self.seltoolrect=frame[6] self.atoolrect=frame[7] self.graphs=[] self.selrects=[];self.sacrects=[]; self.arects=[] for i in range(15): self.selrects.append(visual.Rect(self.wind, height=self.spar[2],width=1,fillColor='red',opacity=0.5,lineColor='red')) for i in range(20): self.sacrects.append(visual.Rect(self.wind, height=self.spar[2]+self.apar[2],width=2,fillColor='blue',opacity=0.5,lineColor='blue')) for i in range(30): self.arects.append(visual.Rect(self.wind, height=self.apar[2],width=2,fillColor='green',opacity=0.5,lineColor='blue')) for f in range(len(indic)): frame.append(visual.Line(self.wind,(ga[0],ga[3]-(f+1)*inc), (ga[1],ga[3]-(f+1)*inc),lineWidth=4.0)) frame.append(visual.TextStim(self.wind,indic[f], pos=(ga[0]+0.1,ga[3]-0.1-f*inc), alignHoriz='left',alignVert='top',height=0.5)) self.graphs.append(visual.ShapeStim(self.wind, closeShape=False,lineWidth=2.0)) self.graphs[f].setAutoDraw(True) self.frame=visual.BufferImageStim(self.wind,stim=frame) self.tmsg=visual.TextStim(self.wind,color=(0.5,0.5,0.5),pos=(-13,-7.8)) self.msg= visual.TextStim(self.wind,color=(0.5,0.5,0.5), pos=(0,-7.8),text=' ',wrapWidth=20) self.msg.setAutoDraw(True) self.gData=[] for g in range(len(indic)): yOld=fhandles[g](self.phase,hz=self.t) self.gData.append(yOld) self.sev=[] scale=Q.refreshRate/self.gazeData.hz for gs in self.gazeData.sev: s=int(np.round(scale*gs[0])) e=min(len(self.t)-1,max(s+1, int(np.round(scale*gs[1])))) self.sev.append([s,e,gs[0],gs[1]]) for gs in self.gazeData.bev: s=int(np.round(scale*gs[0])) e=min(len(self.t)-1,max(s+1, int(np.round(scale*gs[1])))) self.sev.append([s,e,gs[0],gs[1]]) self.selected=[[]] try: for tr in self.gazeData.track: s=int(np.round(scale*tr[0])) e=min(len(self.t)-1,max(s+1, int(np.round(scale*tr[1])))) self.selected[0].append([self.t[s],s,tr[0],self.t[e],e,tr[1],tr[2],[],False]) for a in tr[3]: s=int(np.round(scale*a[0])) e=min(len(self.t)-1,max(s+1, int(np.round(scale*a[1])))) self.selected[0][-1][7].append([self.t[s],s,a[0],self.t[e],e,a[1],[-1,-1,-1]]) except AttributeError: print 'Tracking events not available' #self.selected=[Coder.loadSelection(self.gazeData.vp, # self.gazeData.block,self.gazeData.trial,prefix= 'track/coder1/')] self.pos[:,:,0]-=6 # shift agents locations on the screen self.pos[:,:,1]+=5 self.wind.flip() self.released=False # mouse key flag self.save=False # flag for save selection tool data except: self.wind.close() raise
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ## THE SOFTWARE. from psychopy import visual, core, event,gui from psychopy.misc import pix2deg import numpy as np import pylab as plt import random, os from Settings import Q from Constants import * # these routines were used to correct detection response where # the subject clicked incorrect agent because a distractor overlapped # or crossed with a target. wind=Q.initDisplay() # some constants CHASEE=0 CHASER=1 X=0;Y=1;PHI=2 def exportFrame(trajectories,f,a1,a2,fn): nrframes=trajectories.shape[0] cond=trajectories.shape[1] clrs=np.ones((cond,3)) clrs[CHASEE]=(0,1,0) clrs[CHASER]=(1,0,0) if a1>1: clrs[a1]=(0,0,1) else: clrs[a2]=(0,0,1)
def __init__(self,vp=None): ''' inits variables and presents the intro dialog vp - subject id, useful for replay functionality''' # ask infos myDlg = gui.Dlg(title="Experiment zur Bewegungswahrnehmung",pos=Q.guiPos) myDlg.addText('VP Infos') myDlg.addField('Subject ID:',201)# subject id myDlg.addField('Block:',0) # block id myDlg.addField('Alter:', 21) # age myDlg.addField('Geschlecht (m/w):',choices=(u'weiblich',u'maennlich')) #gender myDlg.addField(u'Händigkeit:',choices=('rechts','links'))# handedness myDlg.addField(u'Dominantes Auge:',choices=('rechts','links'))# dominant eye myDlg.addField(u'Sehschärfe: ',choices=('korrigiert','normal')) # visual acuity # weekly hours spent on computer screen myDlg.addField(u'Wochenstunden vor dem Komputerbildschirm:', choices=('0','0-2','2-5','5-10','10-20','20-40','40+')) # weekly hours spent playing video games myDlg.addField(u'Wochenstunden Komputerspielen:', choices=('0','0-2','2-5','5-9','10-20','20+')) myDlg.addField('Starte bei Trial:', 0) # start trial id, for debug only if vp is None: myDlg.show()#show dialog and wait for OK or Cancel vpInfo = myDlg.data else: vpInfo=[vp,0,21,'','','','','','',0] self.id=vpInfo[0] self.block=vpInfo[1] self.initTrial=vpInfo[-1] self.scale=1#vpInfo[2] try:#then the user pressed OK subinf = open(Q.outputPath+'vpinfo.res','a') subinf.write('%d\t%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%d\n'% tuple(vpInfo)) subinf.close() except: print 'Experiment cancelled' # save settings, which we will use Q.save(Q.inputPath+'vp%03d'%self.id+Q.delim+'SettingsExp.pkl') #init stuff self.wind=Q.initDisplay() self.mouse = event.Mouse(False,None,self.wind) self.mouse.setVisible(False) fcw=0.1; fch=0.8 #fixcross width and height fclist=[ visual.ShapeStim(win=self.wind, pos=[0,0],fillColor='white', vertices=((fcw,fch),(-fcw,fch),(-fcw,-fch),(fcw,-fch)),interpolate=False), visual.ShapeStim(win=self.wind, pos=[0,0],fillColor='white', vertices=((fch,fcw),(-fch,fcw),(-fch,-fcw),(fch,-fcw)),interpolate=False), visual.Circle(win=self.wind, pos=[0,0],fillColor='black',radius=0.1,interpolate=False)] self.fixcross=visual.BufferImageStim(self.wind,stim=fclist) self.wind.flip(); self.wind.flip() self.score=0 self.rt=0 # init text fs=1 # font size self.text1=visual.TextStim(self.wind,text='Error',wrapWidth=30,pos=[0,2]) self.text2=visual.TextStim(self.wind,text='Error',wrapWidth=30,pos=[0,0]) self.text3=visual.TextStim(self.wind, text='Error',wrapWidth=30,pos=[0,-10]) self.text1.setHeight(fs) self.text2.setHeight(fs) self.text3.setHeight(fs) self.f=0 self.permut=np.load(Q.inputPath+'vp%03d'%self.id+Q.delim +'ordervp%03db%d.npy'%(self.id,self.block)) if len(self.permut.shape)>1 and self.permut.shape[1]>1: self.data=self.permut[:,1:] self.permut=self.permut[:,0] self.nrtrials=self.permut.size