Пример #1
0
 def update(self):
     """ Automatically called when we need to move the enemy. """
     self.rect.y += self.y_speed
     #self.rect.x += self.x_speed
     #bounce off edges
     #if self.rect.x > SCREEN_WIDTH - self.rect.width or self.rect.x <= 0:
     #    self.x_speed = -self.x_speed
     #change x direction based on probability function
     #self.random = random.random
     #if self.random < self.prob:
     #    self.x_speed = -self.x_speed
     """ Record time right when enemy fully enters screen """
     if -1<= self.rect.y <= 0:
         t_sight = core.getTime()
         #ns.send_event('Site', timestamp = egi.ms_localtime())
         if self.enemy_type=='A':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyA_sight_time.append(t_sight)
         if self.enemy_type =='B':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyB_sight_time.append(t_sight)
         if self.enemy_type=='C':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyC_sight_time.append(t_sight)
         if self.enemy_type=='D':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyD_sight_time.append(t_sight)
def run_main_experiment():
    time_start = core.getTime()
    time_play = time_start
    order = Exp.make_random_stim_order()
    Nonethird = np.floor(len(order)/3)
    Ntwothird = np.floor(2*len(order)/3)

    t = 0
    for i in order:
        t = t+1
        print(core.getTime() -time_start)
        if t in [Nonethird,Ntwothird]:
            set_msg('Short Break!','MAIN')
            set_msg('Press return to continue','KEY')
            win.flip()
            event.waitKeys(keyList=['return','space'])
            core.wait(1) 



        s = sound_build.make_noisy_stim(i,Exp)
        scaled = np.int16(s/np.max(np.abs(s)) * 32767)
        write('test.wav', 44100, scaled)
        core.wait(time_play - core.getTime())        
        set_msg('Up or down?','MAIN')
        win.flip()        
        playsound(s,vol)
        core.wait(0.1) 
        #core.wait(0.5) #wait 500ms; but use a loop of x frames for more accurate timing in fullscreen
        thisResp = get_response()
        iscorrect = Exp.isRespCorrect(i,thisResp) # 1=correct, O=incorrect, -1=missed
        time_play =  core.getTime() + iti
        dataFile.write('%i,%i,%i\n' %(i, thisResp,iscorrect))
    dataFile.close()
Пример #3
0
 def shoot(bullet_type, color):
     self.bullet = Bullet(color)
     self.bullet.color = str(color)
     # Set the bullet so it shoots from middle of player
     self.bullet.rect.x = self.player.middle
     self.bullet.rect.y = self.player.rect.y
     #play bullet sound
     self.shot_sound.out()
     #decrease ammo supply by 1
     self.level.ammo-=1
     # Add the bullet to the lists
     self.all_sprites_list.add(self.bullet)
     if color == GREEN:
         shot = core.getTime()
         self.Ashot_time.append(shot)
         self.A_bullet_list.add(self.bullet)
     elif color == RED:
         shot = core.getTime()
         self.Bshot_time.append(shot)
         self.B_bullet_list.add(self.bullet)
     elif color == YELLOW:
         shot = core.getTime()
         self.Cshot_time.append(shot)
         self.C_bullet_list.add(self.bullet)
     elif color == BROWN:
         shot = core.getTime()
         self.Dshot_time.append(shot)
         self.D_bullet_list.add(self.bullet)
Пример #4
0
 def callback(self, in_data, frame_count, time_info, status):
     data = self._wf.readframes(frame_count)
     if self._starttime is None:
         self._starttime = core.getTime()
     chunk_dur = len(data)/self.bytes_per_sample/self.sampling_rate
     self._endtime = core.getTime()+chunk_dur
     return (data, pyaudio.paContinue)
Пример #5
0
    def run(self):
        """Starts the validation process. This function will not return
        until the validation is complete. The validation results are
        returned in dict format.

        :return: dist containing validation results.

        """

        continue_val = self._enterIntroScreen()

        if continue_val is False:
            return None

        # delay about 0.5 sec before staring validation
        ftime = self.win.flip()
        while core.getTime() - ftime < 0.5:
            self.win.flip()
            self.io.clearEvents()

        val_results = self._enterValidationSequence()

        # delay about 0.5 sec before showing validation end screen
        ftime = self.win.flip()
        while core.getTime() - ftime < 0.5:
            self.win.flip()
            self.io.clearEvents()

        self._enterFinishedScreen(val_results)
        self.io.clearEvents()
        self.win.flip()

        return val_results
Пример #6
0
    def _record(self, sec, filename='', block=True):
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime()  # for duration estimation, high precision
        self.fileOnset = core.getAbsTime()  # for log and filename, 1 sec precision
        logging.data('%s: Record: onset %d, capture %.3fs' %
                     (self.loggingId, self.fileOnset, self.duration) )
        if not file:
            onsettime = '-%d' % self.fileOnset
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(filename).strip('.wav') + '.wav'

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, **self.options)

        self.rate = sound.pyoSndServer.getSamplingRate()
        if block:
            core.wait(self.duration, 0)
            logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' %
                     (self.loggingId, core.getTime(), core.getTime() - t0) )
            while self.recorder.running:
                core.wait(.001, 0)
        else:
            logging.exp('%s: Record: return immediately, no blocking' %
                     (self.loggingId) )

        return self.savedFile
 def runTrial(self,*args):
     self.babyStatus=0 # -1 no signal, 0 saccade, 1 fixation,
     self.sacPerPursuit=0
     self.pursuedAgents=False
     self.rewardIter=0
     self.nrRewards=0
     self.blinkCount=0
     self.tFix=0
     self.isFixLast=False
     self.babySawReward=False
     ende=False
     if core.getTime()> BabyExperiment.expDur*60+self.tStart: ende=True
     if ende:
         print core.getTime()-self.tStart
         self.etController.sendMessage('Finished')
         self.etController.closeConnection()
         self.wind.close(); core.quit()
     self.timeNotLooking=0
     self.etController.preTrial(driftCorrection=self.showAttentionCatcher>0)
     self.etController.sendMessage('Trial\t%d'%self.t)        
     self.etController.sendMessage('Phase\t%d'%self.phases[self.pi])
     if self.eeg!=None: 
         self.eeg.setData(int(self.t+1))
     Experiment.runTrial(self,*args,fixCross=False)
     self.etController.postTrial()
Пример #8
0
    def onGazeData(self, data):
        '''
        Wird aufgerufen, wenn der Tobii neue Daten errechnet und rausgeschickt hat.
        Diese Daten werden im Buffer gespeichert.
        '''
        self.__dataCount += 1

        if len(self.__buffer) >= self.__buffersize:
            self.__buffer.pop(0)
        self.__buffer.append((core.getTime(), data))

        if self.__storing:
            print "\n\nomg storing\n\n", core.getTime()
        if core.getTime() - self.__lastStoreTime > self.__dataSaveIntervall:
            self.__storing = True
            self.__storeData()
            self.__storing = False

        lx = data.x_gazepos_lefteye
        ly = data.y_gazepos_lefteye
        rx = data.x_gazepos_righteye
        ry = data.y_gazepos_righteye

        lx, ly = pyTetClient.tobiiToPsyCoord(lx, ly)
        rx, ry = pyTetClient.tobiiToPsyCoord(rx, ry)

        avgX = (lx + rx) / 2
        avgY = (ly + ry) / 2

        if self.__showGazePoint:
            #passe posi der discs an
            #print "lx:%f\tly:%f\trx:%f\try:%f" % (lx, ly, rx, ry)
            self.__discAvg.setPosition(avgX, avgY)
            self.__discLeft.setPosition(lx, ly)
            self.__discRight.setPosition(rx, ry)
Пример #9
0
    def record(self, sec, file='', block=True):
        """Capture sound input for duration <sec>, save to a file.

        Return the path/name to the new file. Uses onset time (epoch) as
        a meaningful identifier for filename and log.
        """
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime() # note: report onset time in log, and use in filename
        logging.data('%s: Record: onset %.3f, capture %.3fs' %
                     (self.loggingId, self.onset, self.duration) )
        if not file:
            onsettime = '-%.3f' % self.onset
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(file).strip('.wav') + '.wav'

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, self.sampletype)
        self.rate = sound.pyoSndServer.getSamplingRate()

        if block:
            core.wait(self.duration - .0008) # .0008 fudge factor for better reporting
                # actual timing is done by Clean_objects
            logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' %
                     (self.loggingId, core.getTime(), core.getTime() - t0) )
        else:
            logging.exp('%s: Record: return immediately, no blocking' %
                     (self.loggingId) )

        return self.savedFile
Пример #10
0
def testWait(duration=1.55):
    try:
        t1=getTime()
        wait(duration)
        t2=getTime()

        # Check that the actual duration of the wait was close to the requested delay.
        #
        # Note that I have had to set this to a relatively high value of
        # 50 msec because on my Win7, i7, 16GB machine I would get delta's of up to
        # 35 msec when I was testing this.
        #
        # This is 'way high', and I think is because the current wait()
        # implementation polls pyglet for events during the CPUhog period.
        # IMO, during the hog period, which should only need to be only 1 - 2 msec
        # , not the 200 msec default now, nothing should be done but tight looping
        # waiting for the wait() to expire. This is what I do in ioHub and on this same
        # PC I get actual vs. requested duration delta's of < 100 usec consitently.
        #
        # I have not changed the wait in psychopy until feedback is given, as I
        # may be missing a reason why the current wait() implementation is required.
        #
        assert np.fabs((t2-t1)-duration) < 0.05

        printf(">> core.wait(%.2f) Test: PASSED"%(duration))

    except Exception:
        printf(">> core.wait(%.2f) Test: FAILED. Actual Duration was %.3f"%(duration,(t2-t1)))
        printExceptionDetails()

    printf("-------------------------------------\n")
Пример #11
0
 def generate(self):
     """ generate the enemy off screen """
     #distance for offset = desired time * velocity
     #ns.sync()
     self.offset_time = 60*random.randrange(self.offscreen_min, self.offscreen_max) #multiply by 60 for fps-->s
     self.offset_distance = -(self.offset_time * self.y_speed)
     self.rect.y = self.offset_distance
     if self.enemy_type == 'A':
         self.sound.out()
         #ns.send_event('SndA', timestamp = egi.ms_localtime())
         self.rect.x = self.a_pos
         time = core.getTime()
         self.enemyA_generate_time.append(time)
     elif self.enemy_type == 'B':
         self.sound.out()
         #ns.send_event('SndB', timestamp = egi.ms_localtime())
         self.rect.x = self.b_pos
         time = core.getTime()
         self.enemyB_generate_time.append(time)
     elif self.enemy_type == 'C':
         self.sound.out()
         #ns.send_event('SndC', timestamp = egi.ms_localtime())
         self.rect.x = self.c_pos
         time = core.getTime()
         self.enemyC_generate_time.append(time)
     elif self.enemy_type == 'D':
         self.sound.out()
         #ns.send_event('SndC', timestamp = egi.ms_localtime())
         self.rect.x = self.d_pos
         time = core.getTime()
         self.enemyD_generate_time.append(time)
Пример #12
0
    def getGLFont(font_family_name,size=32,bold=False,italic=False,dpi=72):
        """
        Return a FontAtlas object that matches the family name, style info,
        and size provided. FontAtlas objects are cached, so if multiple
        TextBox instances use the same font (with matching font properties)
        then the existing FontAtlas is returned. Otherwise, a new FontAtlas is
        created , added to the cache, and returned.
        """
        from psychopy.visual.textbox import getFontManager
        fm=getFontManager()

        if fm:
            if fm.font_store:
                # should be loading from font store if requested font settings
                # have been saved to the hdf5 file (assuming it is faster)
                pass
                #print "TODO: Check if requested font is in FontStore"
            font_infos=fm.getFontsMatching(font_family_name,bold,italic)
            if len(font_infos) == 0:
                return False
            font_info=font_infos[0]
            fid=MonospaceFontAtlas.getIdFromArgs(font_info,size,dpi)
            font_atlas=fm.font_atlas_dict.get(fid)
            if font_atlas is None:
                font_atlas=fm.font_atlas_dict.setdefault(fid,MonospaceFontAtlas(font_info,size,dpi))
                font_atlas.createFontAtlas()
            if fm.font_store:
                t1=getTime()
                fm.font_store.addFontAtlas(font_atlas)
                t2=getTime()
                print 'font store add atlas:',t2-t1
        return font_atlas
Пример #13
0
def updateStimText(stim,text=None):
    stime=core.getTime()*1000.0
    if text:    
        stim.setText(text)
    stim.draw()
    gl.glFinish()
    etime=core.getTime()*1000.0 
    return etime-stime
Пример #14
0
    def resample(self, newRate=16000, keep=True, log=True):
        """Re-sample the saved file to a new rate, return the full path.

        Can take several visual frames to resample a 2s recording.

        The default values for resample() are for google-speech, keeping the
        original (presumably recorded at 48kHz) to archive.
        A warning is generated if the new rate not an integer factor / multiple of the old rate.

        To control anti-aliasing, use pyo.downsamp() or upsamp() directly.
        """
        if not self.savedFile or not os.path.isfile(self.savedFile):
            msg = "%s: Re-sample requested but no saved file" % self.loggingId
            logging.error(msg)
            raise ValueError(msg)
        if newRate <= 0 or type(newRate) != int:
            msg = "%s: Re-sample bad new rate = %s" % (self.loggingId, repr(newRate))
            logging.error(msg)
            raise ValueError(msg)

        # set-up:
        if self.rate >= newRate:
            ratio = float(self.rate) / newRate
            info = "-ds%i" % ratio
        else:
            ratio = float(newRate) / self.rate
            info = "-us%i" % ratio
        if ratio != int(ratio):
            logging.warn("%s: old rate is not an integer factor of new rate" % self.loggingId)
        ratio = int(ratio)
        newFile = info.join(os.path.splitext(self.savedFile))

        # use pyo's downsamp or upsamp based on relative rates:
        if not ratio:
            logging.warn("%s: Re-sample by %sx is undefined, skipping" % (self.loggingId, str(ratio)))
        elif self.rate >= newRate:
            t0 = core.getTime()
            downsamp(self.savedFile, newFile, ratio)  # default 128-sample anti-aliasing
            if log and self.autoLog:
                logging.exp(
                    "%s: Down-sampled %sx in %.3fs to %s" % (self.loggingId, str(ratio), core.getTime() - t0, newFile)
                )
        else:
            t0 = core.getTime()
            upsamp(self.savedFile, newFile, ratio)  # default 128-sample anti-aliasing
            if log and self.autoLog:
                logging.exp(
                    "%s: Up-sampled %sx in %.3fs to %s" % (self.loggingId, str(ratio), core.getTime() - t0, newFile)
                )

        # clean-up:
        if not keep:
            os.unlink(self.savedFile)
            self.savedFile = newFile
            self.rate = newRate

        return os.path.abspath(newFile)
Пример #15
0
    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        filename = pathToString(filename)
        self._unload()
        self._reset()
        if self._no_audio is False:
            self._createAudioStream()

        # Create Video Stream stuff
        self._video_stream.open(filename)
        vfstime = core.getTime()
        opened = self._video_stream.isOpened()
        if not opened and core.getTime() - vfstime < 1:
            raise RuntimeError("Error when reading image file")

        if not opened:
            raise RuntimeError("Error when reading image file")

        self._total_frame_count = self._video_stream.get(
            cv2.CAP_PROP_FRAME_COUNT)
        self._video_width = int(self._video_stream.get(
            cv2.CAP_PROP_FRAME_WIDTH))
        self._video_height = int(self._video_stream.get(
            cv2.CAP_PROP_FRAME_HEIGHT))
        self._format = self._video_stream.get(
            cv2.CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3

        cv_fps = self._video_stream.get(cv2.CAP_PROP_FPS)

        self._video_frame_rate = cv_fps

        self._inter_frame_interval = 1.0/self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by
        # cv2.
        self._numpy_frame = numpy.zeros((self._video_height,
                                         self._video_width,
                                         self._video_frame_depth),
                                        dtype=numpy.uint8)
        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, 'movie', filename)
Пример #16
0
    def runBlock (self,blockNo):
        """
        runBlock prepares the screen for that block.
        Plots the images required for the train.
        Sets the images that are true and at the end of the trail resets the image.
        Runs the trial and computes reactions time and logs the data to be written into the text file.
        self.resume is set to false in case it is true so that normal routine is followed after loadExistingData() method is called and initial values are reset to resume values
        blockNo is received by this method only for writing in the text file.
        """
        for y in self.orderBlock:

            if self.useTobii:
                tobii.setTrigger(self.trial)
            self.showQuestionText(self.config.questions[self.config.questionIdx[y,0]])
            #print self.trial
            if self.config.questionIdx[y,1] == 0 :
                self.setImageFalse(self.config.questionIdx[y,0])
            if self.useTobii:
                tobii.startTracking()
            self.drawFixation()
            self.showAndSetImage(self.config.constellation[blockNo])
            startTime = core.getTime() 
            key = event.waitKeys(keyList=['y','n','escape'])
            endTime = core.getTime()
            if self.useTobii:
                tobii.stopTracking()
            if key[0] == 'escape':
                '''Escape quits the program by calling the method self.quit()'''
                self.quit()
            
            if key[0] == 'n' and self.config.questionIdx[y,1] == 0:
                '''check for the answer NO is correct or not. If correct set 1 else 0'''
                self.response = 1    
            if key[0] == 'y' and self.config.questionIdx[y,1] == 1:
                '''check for the answer Yes is correct or not. If correct set 1 else 0'''
                self.response = 1
            else:
                self.response = 0
            
            'compute reaction time'
            reactionTime = endTime - startTime
            
            'details about the trial'
            objectType = self.config.questionIdx[y,0] # object type with 5 objects of interest. 0:Green Circle  1:OpenSquare 2:Rhombus 3:RedTriangle 4:Star(Five sided) 
            objectExists = self.config.questionIdx[y,1] # if the object exists in that trail. Can be used to evaluate it with the response.

            self.resetImageFalse(self.config.questionIdx[y,0])
            
            if self.save:
                self.resultData[self.trialIdx,:] = [self.config.subject, blockNo, self.trial, objectType, objectExists, self.response, reactionTime]
                self.trialIdx += 1
                self.trial += 1
                self.saveData()
                if self.resume:
                    self.resume = False
Пример #17
0
    def _syncClocks(self, num_samples, enableInd=None):
        # disable all inputs
        self._enable_byte(0)
        # clear serial buffers
        self._purgeBuffers()

        t_pre = np.zeros(num_samples)
        t_post = np.zeros(num_samples)
        t_receive = np.zeros(num_samples)
        for i in range(num_samples):
            # try to get correct response in 4 tries
            for tries in range(4):
                # 0~1 ms random interval
                time.sleep(np.random.rand()/1000)
                t_pre[i] = core.getTime()
                # send signal to obtain t_receive
                self.ser.write('Y')
                t_post[i] = core.getTime()
                b7 = unpack_bytes(self.ser.read(7))
                if len(b7) == 7 and b7[0] == 89:
                    break
                if tries == 3:
                    raise Exception('RTBox not responding')
            t_receive[i] = self._bytes2secs(b7[1:])
        # t_pre += self.info['tpreOffset'][0] ???
        # the latest tpre is the closest to real write
        t_diff_method1 = (t_pre - t_receive).max()
        i_method1 = (t_pre - t_receive).argmax()
        # tpost-tpre for the selected sample: upper bound
        twin = t_post[i_method1] - t_pre[i_method1]
        # used for linear fit and time check
        t_receive_method1 = t_receive[i_method1]
        t_diff_method2 = (t_post - t_receive).min()
        # earliest tpost - lastest tpre
        tdiff_ub = t_diff_method2 - t_diff_method1

        """
        #ok find minwin index
        i_method2 = (t_post - t_receive).argmin()
        # diff between methods 3 and 1
        pre_post_mean = np.mean(np.concatenate((t_pre, t_post), axis=0), axis=0)
        # t_diff_method3 = pre_post_mean - t_receive[i_method2] ????
        # should actually be...
        t_diff_method3 = (pre_post_mean - t_receive).min()
        method3_1 = t_diff_method3 - t_diff_method1
        """

        if twin > 0.003 and tdiff_ub > 0.001:
            warnings.warn("USB Overload")

        # reenable inputs
        self._update_enabled_event_types()

        return [t_diff_method1, t_receive_method1]
Пример #18
0
 def getJudgment(self,giveFeedback=False):
     '''asks subject to select chaser chasee'''
     position=np.transpose(self.pos)
     cond=position.shape[1]
     self.mouse.clickReset();self.mouse.setVisible(1)
     elem=self.elem
     t0=core.getTime();selected=[]
     mkey=self.mouse.getPressed()
     lastPress=t0
     while sum(mkey)>0:
         elem.draw()
         self.wind.flip()
         mkey=self.mouse.getPressed()
     released=True
     clrs=np.ones((cond,1))*Q.agentCLR
     while len(selected) <2:
         elem.draw()
         self.wind.flip()
         mpos=self.mouse.getPos()
         mkey=self.mouse.getPressed()
         mtime=core.getTime()
         for a in range(cond):
             if (event.xydist(mpos,np.squeeze(position[:,a]))
                 < Q.agentRadius*self.scale):
                 if 0<sum(mkey) and released: # button pressed
                     if selected.count(a)==0: # avoid selecting twice
                         clrs[a]=Q.selectedCLR
                         elem.setColors(clrs,'rgb')
                         selected.append(a)
                         self.output.write('\t%d\t%2.4f' % (a,mtime-t0))
                     released=False
                 elif a in selected: # no button pressed but selected already
                     clrs[a]=Q.selectedCLR
                     elem.setColors(clrs,'rgb')
                 else: # no button pressed but mouse cursor over agent
                     clrs[a]=Q.mouseoverCLR
                     elem.setColors(clrs,'rgb')
             elif a in selected: # no button pressed, no cursor over agent, but already selected
                 clrs[a]=Q.selectedCLR
                 elem.setColors(clrs,'rgb')
             else: # no button press, no cursor over agent, not selected
                 clrs[a]=Q.agentCLR
                 elem.setColors(clrs,'rgb')
         if 0==sum(mkey) and not released:
             released=True       
     t0=core.getTime()
     while core.getTime()-t0<1:
         elem.draw()
         self.wind.flip()
     self.mouse.setVisible(0)
     if (selected[0]==0 and selected[1]==1
         or selected[0]==1 and selected[1]==0):
         return 1
     else: return 0
Пример #19
0
    def run(self):
        
        if self.useTobii and not self.resume:
            
            self.showCustomText("Kalibrierung") 
            tobii.calibrate(perfect=True)
            tobii.showCalibrationResultNet()
            event.waitKeys()

        
        if not self.resume:
            self.trial = 1
            self.trialIdx = 0
            self.count = 1
            self.noOfBlocks = self.config.noOfBlocks
            random.shuffle(self.noOfBlocks)
            self.tutorial()
            self.save = False
            self.practice()
        
        if self.resume:
            self.save = True
            self.showCustomText("Versuch wird fortgesetzt")
            random.shuffle(self.noOfBlocks)
            if self.useTobii:
                self.showCustomText("Kalibrierung")
                tobii.calibrate(perfect=True)
                tobii.showCalibrationResultNet()
                event.waitKeys()
        self.showImage(self.instructions["experiment"])
        self.save = True
        
        
        
        for y in self.noOfBlocks:
            
            if not self.resume:
                self.orderBlock = self.config.orderBlock[y]
            
            if self.count == 4 and not self.resume:
                self.TakeABreak()
            blockName = 'block' + str(self.count)
            self.waitForKeys = True
            self.showImage(self.instructions[blockName])
            self.blockStartTime = core.getTime() #block starting time
            self.runBlock(y)
            self.blockEndTime = core.getTime() #block end time
            elapsedTime = self.blockEndTime - self.blockStartTime # time elapsed from the start till the end of the block
            self.showStat(elapsedTime)
            self.count += 1
        self.showImage(self.instructions["danke"])
        self.quit()
def run_training(session,duration):
    time_start = core.getTime()
    time_play = time_start
    time_stop = time_start + duration
    while (core.getTime()<time_stop):
        
        s = sound_build.make_random_training_sound(session,Exp)     
        core.wait(time_play - core.getTime())
        set_msg('Up or down?','MAIN')
        win.flip()        
        playsound(s,vol)
        get_response()
        print(core.getTime() -time_start)
        time_play =  core.getTime() + iti
Пример #21
0
 def _purge(self):
     byte = self.ser.inWaiting()
     tout = core.getTime() + 1
     # check to make sure RTBox is idle
     while True:
         time.sleep(0.0001)
         byte1 = self.ser.inWaiting()
         if byte1 == byte:
             break
         if core.getTime() > tout:
             raise Exception('RTBox not responding')
         byte = byte1
     # purge buffers
     self._purgeBuffers()
Пример #22
0
 def __shrinkQuad(self, duration = 0.5):
     '''
     Animiert das kleinerwerden des Quadrats über die angegebene Zeitspanne.
     (Blockierend)
     '''
     self.__quad.scale(1)
     minScale = 0.1
     startTime = core.getTime()
     currentTime = startTime
     while currentTime - startTime < duration:
         curScale = minScale + (1-(currentTime-startTime)/duration) * (1-minScale)
         self.__quad.scale(curScale)
         self.__window.flip()
         currentTime = core.getTime()
 def point_expand_contract(self, duration):
     start_time = getTime()
     ratio = 0
     while ratio < 1:
         ratio = (getTime()-start_time)/(duration*0.5)
         self.outer_point.setRadius(25+25*ratio)
         self.outer_point.draw()
         self.inner_point.draw()
         self.flip()
     while ratio < 2:
         ratio = (getTime()-start_time)/(duration*0.5)
         self.outer_point.setRadius(75-25*ratio)
         self.outer_point.draw()
         self.inner_point.draw()
         self.flip()
Пример #24
0
 def reset(self, log=True):
     """Restores to fresh state, ready to record again
     """
     if log and self.autoLog:
         msg = '%s: resetting at %.3f'
         logging.exp(msg % (self.loggingId, core.getTime()))
     self.__init__(name=self.name, saveDir=self.saveDir)
Пример #25
0
 def kill_enemy(bullet_color, enemy_type):
     time = core.getTime()
     if enemy_type=='A':
         self.enemyA_kill_time.append(('Level-'+str(self.level.currentLevel) + ', time-' +str(time)))
     if enemy_type=='B':
         self.enemyB_kill_time.append(('Level-'+str(self.level.currentLevel) + ', time-' +str(time)))
     if enemy_type=='C':
         self.enemyC_kill_time.append(('Level-'+str(self.level.currentLevel) + ', time-' +str(time)))
     if enemy_type=='D':
         self.enemyD_kill_time.append(('Level-'+str(self.level.currentLevel) + ', time-' +str(time)))
     self.enemy.sound.stop()
     self.enemy.pop.out()
     self.score += 10
     self.elapsedTime = 0
     self.level.kill_list.append('Kill')
     self.dead_enemies.add(enemy)
     self.enemy_live = False
     write_trajectory(self.trial, self.level.currentLevel)
     self.all_sprites_list.remove(bullet)
     if bullet_color == 'green':
         self.A_bullet_list.remove(bullet)
     elif bullet_color == 'red':
         self.B_bullet_list.remove(bullet)
     elif bullet_color == 'yellow':
         self.C_bullet_list.remove(bullet)
     elif bullet_color == 'brown':
         self.D_bullet_list.remove(bullet)
Пример #26
0
    def test_getTime(self):
        ta = Computer.currentSec()
        tb = Computer.currentTime()
        tc = Computer.getTime()
        tp = getTime()

        assert ta <= tb <= tc <= tp
        assert tp - ta < 0.002

        ta = getTime()
        tb = self.io.getTime()
        tc = self.io.getTime()
        tp = getTime()

        assert ta <= tb <= tc <= tp
        assert tp - ta < 0.01
Пример #27
0
def tapping_exp(win, randid ,hand='r'):
        if hand == 'l': 
                keylist = LH_TAPPING_KEYLIST
        else:
                keylist = RH_TAPPING_KEYLIST
         
        #create some stimuli
        circle = visual.ImageStim(win=win, image=circle_image_path, pos=SA_circle_pos)

        #draw the stimuli and update the window
        stim_times = []
        for i in range(int(ST_repetition_times)):
                #Escribimos el circulo 
                circle.draw()
                #Enviamos la pantalla con el circulo
                win.flip()
                #Tomamos el tiempo en el que fue enviado
                stim_times.append(core.getTime())
                #Lo mostramos por "ST_duration_time" segundos
                core.wait(ST_duration_time)
                #Mandamos pantalla en blanco
                win.flip()
                #Mostramos pantalla en blanco por "ST_interval_time" segundos.
                core.wait(ST_interval_time)

        #Vemos cuando fueron apretadas las teclas        
        user_times = event.getKeys(keyList=keylist, timeStamped = True)
        return stim_times, user_times
Пример #28
0
def updateStimText(stim,text=None):
    """
    Function used by all text stim types for redrawing the stim.
    
    Update the text for the stim type assigned to 'stim', call stim.draw(),
    and ensure that all graphics card operations are complete before returning
    the time (in msec) taken to run the update logic. If 'text' is None, just 
    time the call to stim.draw(). 
    """
    stime=core.getTime()*1000.0
    if text:    
        stim.setText(text)
    stim.draw()
    gl.glFinish()
    etime=core.getTime()*1000.0 
    return etime-stime
Пример #29
0
def arrow_exp(win, randid ,hand='r'):
        if hand == 'l':
                keylist = LH_ARROWS_KEYLIST
        else:
                keylist = RH_ARROWS_KEYLIST

        #Create our stimuli
        arrow = visual.ImageStim(win=win, image=arrow_image_path, pos=ST_arrow_pos)

        stim_times = []
        for i in range(int(SA_repetition_times)):
                #Escribimos el circulo 
                arrow.size*= -1 #(hack ;) ) 
                arrow.draw()
                #Enviamos la pantalla con el circulo
                win.flip()
                #Tomamos el tiempo en el que fue enviado
                stim_times.append(core.getTime())
                #Lo mostramos por "SA_duration_time" segundos
                core.wait(SA_duration_time)
                #Mandamos pantalla en blanco
                win.flip()
                #Mostramos pantalla en blanco por "SA_interval_time" segundos.
                core.wait(SA_interval_time)
                #Vemos cuando fueron apretadas las teclas
                
        user_times = event.getKeys(keyList=keylist, timeStamped = True)
        return stim_times, user_times
Пример #30
0
 def setTrigger(self, triggerNum):
     '''Setzt einen Trigger für die Ausgabedateien.'''
     if triggerNum == self.__lastTrigger: 
         return
     self.__lastTrigger = triggerNum
     trigger = core.getTime(), triggerNum
     self.__buffer.append(trigger)
Пример #31
0
    def _setCurrentProcessInfo(self, verbose=False, userProcsDetailed=False):
        """what other processes are currently active for this user?"""
        appFlagList = [# flag these apps if active, case-insensitive match:
            'Firefox', 'Safari', 'Explorer', 'Netscape', 'Opera', 'Google Chrome', # web browsers can burn CPU cycles
            'Dropbox', 'BitTorrent', 'iTunes', # but also matches iTunesHelper (add to ignore-list)
            'mdimport', 'mdworker', 'mds', # can have high CPU
            'Office', 'KeyNote', 'Pages', 'LaunchCFMApp', # productivity; on mac, MS Office (Word etc) can be launched by 'LaunchCFMApp'
            'Skype',
            'VirtualBox', 'VBoxClient', # virtual machine as host or client
            'Parallels', 'Coherence', 'prl_client_app', 'prl_tools_service',
            'VMware'] # just a guess
        appIgnoreList = [# always ignore these, exact match:
            'ps', 'login', '-tcsh', 'bash', 'iTunesHelper']

        # assess concurrently active processes owner by the current user:
        try:
            # ps = process status, -c to avoid full path (potentially having spaces) & args, -U for user
            if sys.platform not in ['win32']:
                proc = shellCall("ps -c -U "+os.environ['USER'])
            else:
                proc, err = shellCall("tasklist", stderr=True) # "tasklist /m" gives modules as well
                if err:
                    logging.error('tasklist error:', err)
                    #raise
            systemProcPsu = []
            systemProcPsuFlagged = []
            systemUserProcFlaggedPID = []
            procLines = proc.splitlines()
            headerLine = procLines.pop(0) # column labels
            if sys.platform not in ['win32']:
                try:
                    cmd = headerLine.upper().split().index('CMD') # columns and column labels can vary across platforms
                except ValueError:
                    cmd = headerLine.upper().split().index('COMMAND')
                pid = headerLine.upper().split().index('PID')  # process id's extracted in case you want to os.kill() them from psychopy
            else: # this works for win XP, for output from 'tasklist'
                procLines.pop(0) # blank
                procLines.pop(0) # =====
                pid = -5 # pid next after command, which can have
                cmd = 0  # command is first, but can have white space, so end up taking line[0:pid]
            for p in procLines:
                pr = p.split() # info fields for this process
                if pr[cmd] in appIgnoreList:
                    continue
                if sys.platform in ['win32']:  #allow for spaces in app names
                    systemProcPsu.append([' '.join(pr[cmd:pid]), pr[pid]]) # later just count these unless want details
                else:
                    systemProcPsu.append([' '.join(pr[cmd:]), pr[pid]]) #
                matchingApp = [a for a in appFlagList if a.lower() in p.lower()]
                for app in matchingApp:
                    systemProcPsuFlagged.append([app, pr[pid]])
                    systemUserProcFlaggedPID.append(pr[pid])
            self['systemUserProcCount'] = len(systemProcPsu)
            self['systemUserProcFlagged'] = systemProcPsuFlagged

            if verbose and userProcsDetailed:
                self['systemUserProcCmdPid'] = systemProcPsu
                self['systemUserProcFlaggedPID'] = systemUserProcFlaggedPID
        except:
            if verbose:
                self['systemUserProcCmdPid'] = None
                self['systemUserProcFlagged'] = None

        # CPU speed (will depend on system busy-ness)
        d = numpy.array(numpy.linspace(0., 1., 1000000))
        t0 = core.getTime()
        numpy.std(d)
        t = core.getTime() - t0
        del d
        self['systemTimeNumpySD1000000_sec'] = t
Пример #32
0
#####################################################################

#
# Start the experiment.
#

pstbox.clearEvents()
start_time = computer.getTime()

# Display instruction and check if we collected any button events.
# If there is no button press within a 30 s period, quit.
instruction.draw()
win.flip()
while not pstbox.getEvents():
    if core.getTime() - start_time > 30:
        print('Timeout waiting for button event. Exiting...')
        io.quit()
        core.quit()

# Clear the screen.
win.flip()

nreps = 10
RT = np.array([])
button = np.array([])
io.wait(2)

for i in range(nreps):
    print('Trial #', i)
Пример #33
0
    #xyCasting.stop()
    text.setText('Press any key to start the experiment')
    text.draw()
    win.flip()
    event.waitKeys()
    xyCasting.send('start')

    # Wait for the clients to finish and store reaction times
    ip_list_temp = constants_wally.CLIENTS[:]
    search_time = []
    text.setText('Waiting for clients to finish\n\n Remaining clients: ' +
                 str(ip_list_temp))
    text.draw()
    win.flip()
    time.sleep(1)
    t0 = core.getTime()
    while ip_list_temp:
        allData = xyCasting.consumeAll()
        #print(allData)
        for data, addr, time_arrive in allData:
            if 'exp_done' in data:
                ip = int(addr[0].split('.')[-1])
                rt = float(data.split(' ')[1])
                ip_list_temp.remove(ip)
                search_time.append([ip, rt])
                text.setText(
                    'Waiting for clients to finish\n\n Remaining clients: ' +
                    str(ip_list_temp))
                text.draw()
                win.flip()
Пример #34
0
    pos=(0, -1))
end2 = visual.TextStim(win=mywin,
                       text='Please contact the experimenter.',
                       pos=(0, -2))

###show our instructions, and wait for a response###
fixation.draw()
instr1.draw()
instr2.draw()
instr3.draw()
instr4.draw()
instr5.draw()
mywin.flip()

###wait for button press to start experiment###
start_time = core.getTime()
keys = event.waitKeys()
while keys not in [['space']]:
    keys = event.waitKeys()

###first we will run the practice trials###
for i_trial in range(len(trial_order_practice)):
    ITI = (randint(0, 500) * 0.001) + 0.5
    ###wait a bit###
    mywin.flip()
    core.wait(ITI)
    ###present fixation cross###
    fixation.draw()
    mywin.flip()
    core.wait(1)
    ###now remove the fixation and wait for a bit###
Пример #35
0
win = visual.Window(
    [1200,1000],
    monitor="testMonitor",
    units="deg",
    fullscr=fullscr
    )
win.setMouseVisible(False)

# Sinusoidal control frequency.
freq = 1.5
# Color of the rectangle.
color = '#606a79'
# Position of the rectange, default 0,0 (middle of the screen).
pos = (0, 0)

start = core.getTime()
cnt = 0
while cnt<300:
    second = core.getTime() - start
    sin_val = 0.5+0.5*np.sin(
        2 * np.pi * second * float(freq)
        )
    # If you remove or comment this print, it sould work faster
    print('sec: %.4f; sin: %.4f' % (second, sin_val))
    
    rect = visual.Rect(
        win=win,
        lineColor=color, 
        fillColor=color,
        size=20,
        opacity=sin_val,
Пример #36
0
    def resample(self, newRate=16000, keep=True, log=True):
        """Re-sample the saved file to a new rate, return the full path.

        Can take several visual frames to resample a 2s recording.

        The default values for resample() are for Google-speech, keeping the
        original (presumably recorded at 48kHz) to archive.
        A warning is generated if the new rate not an integer factor /
        multiple of the old rate.

        To control anti-aliasing, use pyo.downsamp() or upsamp() directly.
        """
        if not self.savedFile or not os.path.isfile(self.savedFile):
            msg = '%s: Re-sample requested but no saved file' % self.loggingId
            logging.error(msg)
            raise ValueError(msg)
        if newRate <= 0 or type(newRate) != int:
            msg = '%s: Re-sample bad new rate = %s' % (self.loggingId,
                                                       repr(newRate))
            logging.error(msg)
            raise ValueError(msg)

        # set-up:
        if self.rate >= newRate:
            ratio = float(self.rate) / newRate
            info = '-ds%i' % ratio
        else:
            ratio = float(newRate) / self.rate
            info = '-us%i' % ratio
        if ratio != int(ratio):
            msg = '%s: old rate is not an integer factor of new rate'
            logging.warn(msg % self.loggingId)
        ratio = int(ratio)
        newFile = info.join(os.path.splitext(self.savedFile))

        # use pyo's downsamp or upsamp based on relative rates:
        if not ratio:
            msg = '%s: Re-sample by %sx is undefined, skipping'
            logging.warn(msg % (self.loggingId, str(ratio)))
        elif self.rate >= newRate:
            t0 = core.getTime()
            # default 128-sample anti-aliasing
            pyo.downsamp(self.savedFile, newFile, ratio)
            if log and self.autoLog:
                msg = '%s: Down-sampled %sx in %.3fs to %s'
                vals = (self.loggingId, str(ratio), core.getTime() - t0,
                        newFile)
                logging.exp(msg % vals)
        else:
            t0 = core.getTime()
            # default 128-sample anti-aliasing
            pyo.upsamp(self.savedFile, newFile, ratio)
            if log and self.autoLog:
                msg = '%s: Up-sampled %sx in %.3fs to %s'
                vals = (self.loggingId, str(ratio), core.getTime() - t0,
                        newFile)
                logging.exp(msg % vals)

        # clean-up:
        if not keep:
            os.unlink(self.savedFile)
            self.savedFile = newFile
            self.rate = newRate

        return os.path.abspath(newFile)
Пример #37
0
def testTimebaseQuality(sample_size=1000):
    gc.disable()

    callTimes = np.zeros((5, sample_size))

    timer_clock_jumpbacks = 0
    core_getTime_jumpbacks = 0

    for t in xrange(sample_size):
        s = py_time()
        e = py_time()
        callTimes[0][t] = e - s
        if e < s:
            timer_clock_jumpbacks += 1

        s = getTime()
        e = getTime()
        callTimes[1][t] = e - s
        if e < s:
            core_getTime_jumpbacks += 1

        s = py_time()
        x = testEmptyFunction()
        e = py_time()
        callTimes[2][t] = e - s

        s = py_time()
        x = py_time()
        e = py_time()
        callTimes[3][t] = e - s

        s = py_time()
        x = getTime()
        e = py_time()
        callTimes[4][t] = e - s

    gc.enable()

    printf("## Timebase 'Quality' Tests :\n")
    test_headers = (">> %s Resolution (msec.usec):" % (py_timer_name),
                    ">> core.getTime() Resolution (msec.usec):",
                    ">> Empty function (msec.usec):", ">> %s (msec.usec):" %
                    (py_timer_name), ">> core.getTime() (msec.usec):")
    for i, header in enumerate(test_headers):
        printf(header)
        printf("\tmin:\t\t%.9f" % (callTimes[i].min() * 1000.0))
        printf("\tmax:\t\t%.6f" % (callTimes[i].max() * 1000.0))
        printf("\tmean:\t\t%.6f" % (callTimes[i].mean() * 1000.0))
        printf("\tstd:\t\t%.6f" % (callTimes[i].std() * 1000.0))

    printf(">> %s jumpbacks: " % (py_timer_name), timer_clock_jumpbacks)
    printf(">> core.getTime() jumpbacks: ", core_getTime_jumpbacks)

    # Test that these conditions are true:
    #   - Effective Resolution (mean inter timer call duration) of timer is < 10 usec
    #   - Maximum inter timer call duration is < 100 usec
    #   - no negative durations in timer call durations
    try:
        assert (callTimes[0].mean() * 1000.0) < 0.01
        assert (callTimes[0].max() * 1000.0) < 0.1
        assert timer_clock_jumpbacks == 0
        printf("\n%s Call Time / Resolution Test: PASSED" % (py_timer_name))
    except Exception:
        printf("\n%s Call Time / Resolution Test: FAILED" % (py_timer_name))

    try:
        assert (callTimes[1].mean() * 1000.0) < 0.01
        assert (callTimes[1].max() * 1000.0) < 0.1
        assert core_getTime_jumpbacks == 0
        printf("\ncore.getTime() Call Time / Resolution Test: PASSED")
    except Exception:
        printf("\ncore.getTime() Call Time / Resolution Test: FAILED")

    printf("-------------------------------------\n")
Пример #38
0
def lissajous_func(trial_dur, movement_pars, trial_index):
    """
    a function to run Lissajous movement trial.
    :param trial_dur: the duration of the pursuit movement
    :param movement_pars: [amp_x, amp_y, phase_x, phase_y, freq_x, freq_y]
    :param trial_index: record the order of trial presentation in the task
    :return:
    """

    # parse the movement patter parameters
    movement, amp_x, amp_y, phase_x, phase_y, freq_x, freq_y = movement_pars

    # get a reference to the currently active EyeLink connection
    el_tracker = pylink.getEYELINK()

    # put the tracker in the offline mode first
    el_tracker.setOfflineMode()

    # send a "TRIALID" message to mark the start of a trial, see Data
    # Viewer User Manual, "Protocol for EyeLink Data to Viewer Integration"
    el_tracker.sendMessage('TRIALID %d' % trial_index)

    # record_status_message : show some info on the Host PC
    # here we show how many trial has been tested
    status_msg = 'TRIAL number %d, %s' % (trial_index, movement)
    el_tracker.sendCommand("record_status_message '%s'" % status_msg)

    # draw a reference grid on the Host PC screen
    # For details, See section 25.7 'Drawing Commands' in the
    # EyeLink Programmers Guide manual
    line_hor = (scnWidth / 2.0 - amp_x, scnHeight / 2.0,
                scnWidth / 2.0 + amp_x, scnHeight / 2.0)
    line_ver = (scnWidth / 2.0, scnHeight / 2.0 - amp_y, scnWidth / 2.0,
                scnHeight / 2.0 + amp_x)
    el_tracker.sendCommand('clear_screen 0')  # clear the host Display
    el_tracker.sendCommand('draw_line %d %d %d %d 15' % line_hor)
    el_tracker.sendCommand('draw_line %d %d %d %d 15' % line_ver)

    # put tracker in idle/offline mode before recording
    el_tracker.setOfflineMode()

    # Start recording
    # arguments: sample_to_file, events_to_file, sample_over_link,
    # event_over_link (1-yes, 0-no)
    try:
        el_tracker.startRecording(1, 1, 1, 1)
    except RuntimeError as error:
        print("ERROR:", error)
        abort_trial()
        return pylink.TRIAL_ERROR

    # Allocate some time for the tracker to cache some samples
    pylink.pumpDelay(100)

    # Send a message to clear the Data Viewer screen, get it ready for
    # drawing the pictures during visualization
    bgcolor_RGB = (116, 116, 116)
    el_tracker.sendMessage('!V CLEAR %d %d %d' % bgcolor_RGB)

    # open a INTEREAT AREA SET file to make a dynamic IA for the target
    ias = 'IA_%d.ias' % trial_index
    ias_file = open(os.path.join(aoi_folder, ias), 'w')

    # initial target position
    time_elapsed = 0
    tar_x = amp_x * sin(2 * pi * freq_x * time_elapsed + phase_x)
    tar_y = amp_y * sin(2 * pi * freq_y * time_elapsed + phase_y)

    ia_radius = 60  # radius of the elliptical IA
    frame_num = 0  # keep track of the frames displayed

    # used a fixation trigger in not dummy mode
    if not dummy_mode:
        fixation = visual.TextStim(win=win, text='+', height=50)
        fixation.draw()
        win.flip()
        el_tracker.sendMessage("FIXATION_TRIGGER")

        eye_used = el_tracker.eyeAvailable()
        if eye_used == 2:
            eye_used = 0

        fixation_time_list = []
        current_eye_pos = [100, 100]

        while True:
            ltype = el_tracker.getNextData()
            if ltype is None:
                pass
            if ltype == FIXUPDATE:
                # send a message to mark the arrival time of a fixation update event
                el_tracker.sendMessage('fixUpdate')
                ldata = el_tracker.getFloatData()
                if ldata.getEye() == eye_used:
                    gaze_pos = ldata.getAverageGaze()
                    current_eye_pos = [
                        gaze_pos[0] - scnWidth / 2, scnHeight / 2 - gaze_pos[1]
                    ]
            if (-25 <= current_eye_pos[0] <= 25) and (-25 <= current_eye_pos[1]
                                                      <= 25):
                fixation_time_list.append(core.getTime())
            else:
                fixation_time_list = []
            if len(fixation_time_list) > 1:
                # if fixation duration > 300 ms, break
                if fixation_time_list[-1] - fixation_time_list[0] > 0.3:
                    break

    target.pos = (tar_x, tar_y)
    target.draw()
    win.flip()
    el_tracker.sendMessage('TARGET_WAIT')
    core.wait(0.5)  # wait 500 ms for moving

    while True:
        # abort the current trial if the tracker is no longer recording
        error = el_tracker.isRecording()
        if error is not pylink.TRIAL_OK:
            el_tracker.sendMessage('tracker_disconnected')
            abort_trial()
            return error

        # check keyboard events
        for keycode, modifier in event.getKeys(modifiers=True):
            # Abort a trial if "ESCAPE" is pressed
            if keycode == 'escape':
                el_tracker.sendMessage('trial_skipped_by_user')
                # clear the screen
                clear_screen(win)
                # abort trial
                abort_trial()
                return pylink.SKIP_TRIAL

            # Terminate the task if Ctrl-c
            if keycode == 'c' and (modifier['ctrl'] is True):
                el_tracker.sendMessage('terminated_by_user')
                terminate_task()
                return pylink.ABORT_EXPT

        # draw the target
        target.pos = (tar_x, tar_y)
        target.draw()
        win.flip()
        frame_num += 1
        flip_time = core.getTime()

        if frame_num == 1:
            # send a message to mark movement onset
            el_tracker.sendMessage('TARGET_ONSET')

            # record a message to let Data Viewer know where to find
            # the dynamic IA file for the current trial.
            ias_path = os.path.join('aoi', ias)
            el_tracker.sendMessage('!V IAREA FILE %s' % ias_path)

            # pursuit start time
            movement_start = flip_time
        else:
            # save the Interest Area info following movement onset
            ia_pars = (-1 * round(
                (pre_frame_time - movement_start) * 1000), -1 * round(
                    (flip_time - movement_start) * 1000) + 1,
                       int(scnWidth / 2.0 + pre_x - ia_radius),
                       int(scnHeight / 2.0 - pre_y - ia_radius),
                       int(scnWidth / 2.0 + pre_x + ia_radius),
                       int(scnHeight / 2.0 - pre_y + ia_radius))

            ia_msg = '%d %d ELLIPSE 1 %d %d %d %d TARGET\n' % ia_pars
            ias_file.write(ia_msg)

            # log the target position after each screen refresh
            tar_pos = (tar_x + int(scnWidth / 2), int(scnHeight / 2) - tar_y)
            tar_pos_msg = '!V TARGET_POS target %d, %d 1 0' % tar_pos
            el_tracker.sendMessage(tar_pos_msg)

            # OPTIONAL - send over another message to request Data Viewer
            # to draw the pursuit target when visualizing the data
            el_tracker.sendMessage('!V CLEAR 128 128 128')
            tar_msg = '!V FIXPOINT 255 0 0 255 0 0 %d %d 50 50' % tar_pos
            el_tracker.sendMessage(tar_msg)

        # keep track of target position and frame timing
        pre_frame_time = flip_time
        pre_x = tar_x
        pre_y = tar_y

        # update target position and draw the target
        time_elapsed = flip_time - movement_start
        tar_x = amp_x * sin(2 * pi * freq_x * time_elapsed + phase_x)
        tar_y = amp_y * sin(2 * pi * freq_y * time_elapsed + phase_y)

        # check for time out
        if time_elapsed >= trial_dur:
            # send over a message to log movement offset
            el_tracker.sendMessage('TARGET_OFFSET')
            print(time_elapsed)
            break

    # clear the screen
    # clear_screen(win)
    win.color = (0, 0, 0)
    win.flip()
    el_tracker.sendMessage('black_screen')
    # send a message to clear the Data Viewer screen as well
    el_tracker.sendMessage('!V CLEAR 128 128 128')
    core.wait(0.5)

    # close the IAS file that contain the dynamic IA definition
    ias_file.close()

    # stop recording; add 100 msec to catch final events before stopping
    pylink.pumpDelay(100)
    el_tracker.stopRecording()

    # record trial variables to the EDF data file, for details, see Data
    # Viewer User Manual, "Protocol for EyeLink Data to Viewer Integration"
    # movement, dur, amp_x, amp_y, phase_x, phase_y, freq_x, freq_y
    el_tracker.sendMessage('!V TRIAL_VAR movement %s' % movement)
    el_tracker.sendMessage('!V TRIAL_VAR max_duration %d' %
                           int(trial_dur * 1000))
    el_tracker.sendMessage('!V TRIAL_VAR amp_x %.02f' % amp_x)
    pylink.msecDelay(4)  # take a break of 4 millisecond
    el_tracker.sendMessage('!V TRIAL_VAR amp_y %.02f' % amp_y)
    el_tracker.sendMessage('!V TRIAL_VAR phase_x %.02f' % (phase_x / pi * 180))
    el_tracker.sendMessage('!V TRIAL_VAR phase_y %.02f' % (phase_y / pi * 180))
    pylink.msecDelay(4)  # take a break of 4 millisecond
    el_tracker.sendMessage('!V TRIAL_VAR freq_x %.02f' % freq_x)
    el_tracker.sendMessage('!V TRIAL_VAR freq_y %.02f' % freq_y)

    # send a 'TRIAL_RESULT' message to mark the end of trial, see Data
    # Viewer User Manual, "Protocol for EyeLink Data to Viewer Integration"
    el_tracker.sendMessage('TRIAL_RESULT %d' % pylink.TRIAL_OK)
Пример #39
0
    def run(self,
            confidence=False,
            end_on_response=True,
            catch=False,
            save_data=True):
        """
        Runs the trial

        Parameters
        ----------
        confidence: Indicates whether this is a confidence trial
        end_on_response: Ends the trial once they've made a response
        catch: Indicates whether this is a catch trial
        save_data: Whether to save the data

        Returns
        -------
        Subject's response - correct or incorrect

        """

        # Reset the clock
        self.task.clock.reset()

        continue_trial = True
        key_pressed = None
        detected = None

        current_temp = self.baseline_mean
        self.task.trigger_shock(
            self.baseline_mean)  # Start off with some stimulation

        # RUN THE TRIAL

        self.task.analogOutputTask.stop()

        while continue_trial:

            t = self.task.clock.getTime()  # get the time

            # Show fixation cross
            if t < self.fixation:

                # Baseline stimulation
                if self.give_baseline_stimulation:
                    current_temp = self.baseline_stimulation(
                        self.baseline_interval, self.baseline_mean,
                        self.baseline_sd, current_temp)

            # Ramp up / down visual and heat stimuli
            self.ramp_stumuli(t,
                              catch,
                              start=self.fixation,
                              start_temp=current_temp,
                              end_temp=current_temp,
                              max_voltage=self.max_voltage)

            # Get response
            if self.fixation + self.ramp_up + self.hold + self.ramp_down <= t < \
                    self.fixation + self.ramp_up + self.hold + self.ramp_down + \
                    self.task.config['durations']['response_time']:  # if we're after the stimulus presentation

                # Baseline stimulation
                if self.give_baseline_stimulation:
                    current_temp = self.baseline_stimulation(
                        self.baseline_interval, self.baseline_mean,
                        self.baseline_sd, current_temp)

                # Get keybaord events
                keys = self.task.keyboard.getEvents()

                # If no key is pressed yet, show fixation cross
                if not len(keys) and key_pressed is None:
                    self.task.fixation.draw()

                # Otherwise, show the confidence rating scale or just get responses and move on for binary trials
                else:

                    # Draw the confidence scale if we're on a confidence trial
                    if confidence:
                        self.task.confidence_scale.draw()

                    # Otherwise just show a fixation cross
                    else:
                        self.task.fixation.draw()

                    # Deal with keyboard presses
                    for key_event in keys:

                        # If the subject pressed a valid key
                        if key_event.type == EventConstants.KEYBOARD_PRESS and \
                                key_event.key in [self.task.pain_key, self.task.no_pain_key]:
                            key_pressed = (key_event.key, key_event.time)

                            # Get responsesaaa
                            if key_pressed[0] == self.task.pain_key:
                                self.pain_response = True
                            elif key_pressed[0] == self.task.no_pain_key:
                                self.pain_response = False

                            # End the trial if we're not getting confidence ratings
                            if not confidence and end_on_response:
                                self.task.win.flip()
                                core.wait(0.2)
                                continue_trial = False

                        # Once they release the key, end the trial after a short delay
                        elif key_event.type == EventConstants.KEYBOARD_RELEASE:

                            if end_on_response:
                                self.task.win.flip()
                                core.wait(0.2)
                                continue_trial = False

                    # Reset the confidence scale
                    if key_pressed is not None and confidence:
                        self.task.confidence_scale.fill_bars(
                            int(np.floor(core.getTime() - key_pressed[1])))

            # flip to draw everything
            self.task.win.flip()

            # End trial
            if t > self.fixation + self.ramp_up + self.hold + self.ramp_down + self.task.config[
                    'durations']['response_time']:
                continue_trial = False

            # If the trial has ended
            if not continue_trial:
                if confidence:
                    self.confidence_rating = self.task.confidence_scale.confidence_rating

                if self.pain_response == True and self.stimulation == True:
                    self.detected = True
                else:
                    self.detected = False
                if confidence:
                    self.task.confidence_scale.fill_bars(0)
                print("Trial done")

            # quit if subject pressed scape
            if event.getKeys(["escape", "esc"]):
                core.quit()

        return self.detected
Пример #40
0
def fixation_trigger():
    """
    """
    fixation = visual.TextStim(win=win, text='+', height=50)
    # fx = visual.GratingStim(win=win, tex=None, mask='circle', size=25)
    fixation.draw()
    # fx.draw()
    win.flip()
    el_tracker = pylink.getEYELINK()

    el_tracker.sendCommand("record_status_message 'EVENT RETRIEVAL'")
    el_tracker.sendMessage('TRIALID')

    error = el_tracker.startRecording(1, 1, 1, 1)
    pylink.msecDelay(100)

    eye_used = el_tracker.eyeAvailable()

    if eye_used == 2:
        eye_used = 0

    # start_time_list = []
    # end_time_list = []
    fixation_time_list = []
    current_eye_pos = [100, 100]

    while True:
        ltype = el_tracker.getNextData()
        if ltype is None:
            pass
        if ltype == FIXUPDATE:
            # send a message to mark the arrival time of a fixation update event
            el_tracker.sendMessage('fixUpdate')
            ldata = el_tracker.getFloatData()
            if ldata.getEye() == eye_used:
                gaze_pos = ldata.getAverageGaze()
                # print(fixation.wrapWidth)
                current_eye_pos = [
                    gaze_pos[0] - scnWidth / 2, scnHeight / 2 - gaze_pos[1]
                ]
                print(current_eye_pos)
        if (-25 <= current_eye_pos[0] <= 25) and (-25 <= current_eye_pos[1] <=
                                                  25):
            fixation_time_list.append(core.getTime())
        else:
            fixation_time_list = []
            print('haha ')
        if len(fixation_time_list) > 1:
            if fixation_time_list[-1] - fixation_time_list[0] > 1:
                print('1: ' + str(fixation_time_list[0]))
                print('2: ' + str(fixation_time_list[-1]))
                print('duration: ' +
                      str(fixation_time_list[-1] - fixation_time_list[1]))
                break

    # Step 6.7 stop recording
    el_tracker().stopRecording()

    # Step 6.8: send messages to register trial variables
    # Send over messages to record variables useful for analysis
    el_tracker().sendMessage("!V TRIAL_VAR trial")

    # Step 6.9: send TRIAL_RESULT to mark the end of a trial
    # send over a "TRIAL_RESULT" message for Data Viewer to segment the 'trials'
    el_tracker().sendMessage("TRIAL_RESULT 0")
Пример #41
0
    background_color=[-1, -1, -1, 1],
    border_color=[-1, -1, 1, 1],
    border_stroke_width=4,
    textgrid_shape=[20, 4],  # 20 cols (20 chars wide)
    # by 4 rows (4 lines of text)
    pos=(0.0, -0.25),
    grid_color=(-1, 1, -1, 1))

textbox1.draw()
textbox2.draw()
demo_start = window.flip()

event.clearEvents()
last_attrib_change_time = demo_start
while True:
    if core.getTime() - last_attrib_change_time > 2.5:
        last_attrib_change_time = core.getTime()

    textbox1.draw()
    textbox2.draw()

    # Update the display to show any stim changes
    flip_time = window.flip()

    # End the test when a keyboard event is detected
    #
    kb_events = event.getKeys()
    if kb_events:
        break

core.quit()
Пример #42
0
task = psychTask('test.yaml', 'test1')

task.writeToLog(task.toJSON())

# prepare to start
task.setupWindow()
task.presentTextToWindow(
    'Waiting for key to begin (or press 5)\nPress q to quit')
resp, task.startTime = task.waitForKeypress(task.trigger_key)
task.checkRespForQuitKey(resp)
event.clearEvents()

for trial in task.stimulusInfo:
    # wait for onset time
    while core.getTime() < trial['onset'] + task.startTime:
        key_response = event.getKeys(None, True)
        if len(key_response) == 0:
            continue
        for key, response_time in key_response:
            if task.quit_key == key:
                task.shutDownEarly()
            elif task.trigger_key == key:
                task.trigger_times.append(response_time - task.startTime)
                continue

    trial = task.presentTextTrial(trial)
    task.writeToLog(json.dumps(trial))
    task.alldata.append(trial)

task.writeToLog(json.dumps({'trigger_times': task.trigger_times}))
Пример #43
0
    def _enterValidationSequence(self):
        val_results = dict(target_data=dict(), avg_err=0, min_err=1000,
                           max_err=-1000, status='PASSED', point_count=0,
                           ok_point_count=0)

        self._lastPenSample = None

        kb = self.io.devices.keyboard
        pen = self.io.devices.tablet

        self._positionGrid.randomize()

        pen.reporting = True
        for tp in self._positionGrid:
            self._targetStim.setPos(tp)
            self._targetStim.draw()
            targ_onset_time = self.win.flip()

            pen.clearEvents()

            val_sample_list = []

            while len(val_sample_list) < self.NUM_VALID_SAMPLES_PER_TARG:
                if core.getTime() - targ_onset_time > self.TARGET_TIMEOUT:
                    break
                self._targetStim.draw()

                samples = pen.getSamples()
                for s in samples:
                    spos = s.getPixPos(self.win)
                    if s.pressure > 0 and self.targetStim.contains(spos):
                        dx = math.fabs(tp[0] - spos[0])
                        dy = math.fabs(tp[1] - spos[1])
                        perr = math.sqrt(dx * dx + dy * dy)
                        val_sample_list.append((spos[0], spos[1], perr))
                    else:
                        val_sample_list = []

                if samples:
                    self._drawPenStim(samples[-1])
                    self._lastPenSample = samples[-1]
                elif self._lastPenSample:
                    self._drawPenStim(self._lastPenSample)
                self.win.flip()

            tp = int(tp[0]), int(tp[1])
            val_results['target_data'][tp] = None
            val_results['point_count'] = val_results['point_count'] + 1

            if val_sample_list:
                pos_acc_array = np.asarray(val_sample_list)
                serr_array = pos_acc_array[:, 2]

                targ_err_stats = val_results['target_data'][tp] = dict()
                targ_err_stats['samples'] = pos_acc_array
                targ_err_stats['count'] = len(val_sample_list)
                targ_err_stats['min'] = serr_array.min()
                targ_err_stats['max'] = serr_array.max()
                targ_err_stats['mean'] = serr_array.mean()
                targ_err_stats['median'] = np.median(serr_array)
                targ_err_stats['stdev'] = serr_array.std()

                val_results['min_err'] = min(
                    val_results['min_err'], targ_err_stats['min'])
                val_results['max_err'] = max(
                    val_results['max_err'], targ_err_stats['max'])

                val_results['avg_err'] = val_results[
                    'avg_err'] + targ_err_stats['mean']
                val_results['ok_point_count'] = val_results[
                    'ok_point_count'] + 1
            else:
                val_results['status'] = 'FAILED'

            self._lastPenSample = None

        if val_results['ok_point_count'] > 0:
            val_results['avg_err'] = val_results[
                'avg_err'] / val_results['ok_point_count']

        pen.reporting = False

        return val_results
Пример #44
0
def run_trial(params):
    """ Run a single trial

    params: a list containing tiral parameters, e.g.,
            ['red',   'red',   'left',  'cong']"""

    # Unpacking the parameters
    text, text_color, correct_answer, congruency = params

    # Prepare the stimuli
    word = visual.TextStim(win=win,
                           text=text,
                           font='Arial',
                           height=100.0,
                           color=text_color)

    # Take the tracker offline
    tk.setOfflineMode()
    pylink.msecDelay(50)

    # Send a "TRIALID" message to mark the start of a trial
    tk.sendMessage("TRIALID %s %s %s" % (text, text_color, congruency))

    # Record_status_message : show some info on the Host PC
    msg = "record_status_message 'word: %s, color: %s'" % (text, text_color)
    tk.sendCommand(msg)

    # Drift check/correction, params, x, y, draw_target, allow_setup
    try:
        tk.doDriftCorrect(int(SCN_WIDTH / 2), int(SCN_HEIGHT / 2), 1, 1)
    except:
        tk.doTrackerSetup()

    # Start recording; params: sample_in_file, event_in_file,
    # sampe_over_link, event_over_link (1-yes, 0-no)
    tk.startRecording(1, 1, 1, 1)
    # wait for 100 ms to cache some samples
    pylink.msecDelay(100)

    # Draw the target word on the screen
    word.draw()
    win.flip()
    # Record the onset time of the stimuli
    tar_onset = core.getTime()
    # Send a message to mark the onset of visual stimuli
    tk.sendMessage("stim_onset")

    # Save a screenshot to use as background graphics in Data Viewer
    if not os.path.exists('screenshots'):
        os.mkdir('screenshots')
    screenshot = 'screenshots/cond_%s_%s.jpg' % (text, text_color)
    win.getMovieFrame()
    win.saveMovieFrames(screenshot)

    # The command we used to take screenshots take time to return;
    # we need to provide a "time offset" in the IMGLOAD message, so
    # Data Viewer knows the correct onset time of the screen
    msg_offset = int((core.getTime() - tar_onset) * 1000)
    # Send an IMGLOAD message to let DV know which screenshot to load
    path_to_scnshot = '..' + os.sep + screenshot
    tk.sendMessage('%d !V IMGLOAD FILL %s' % (msg_offset, path_to_scnshot))

    # Clear bufferred events (in PsychoPy), then wait for key presses
    event.clearEvents(eventType='keyboard')
    gotKey = False
    key_pressed, RT, ACC = ['None', 'None', 'None']
    while not gotKey:
        keyp = event.getKeys(['left', 'right', 'escape'])
        if len(keyp) > 0:
            key_pressed = keyp[0]  # which key was pressed
            RT = core.getTime() - tar_onset  # response time
            # correct=1, incorrect=0
            ACC = int(key_pressed == correct_answer)

            # Terminate the task if ESCAPE is pressed
            if key_pressed == 'escape':
                tk.stopRecording()
                tk.close()
                core.quit()

            # Send a message mark the key response
            tk.sendMessage("Key_resp %s" % key_pressed)
            gotKey = True

    # Clear the window at the end of a trials2Test
    win.color = (0, 0, 0)
    win.flip()

    # Stop recording
    tk.stopRecording()

    # Send trial variables to record in the EDF data file
    tk.sendMessage("!V TRIAL_VAR word %s" % text)
    tk.sendMessage("!V TRIAL_VAR color %s" % text_color)
    tk.sendMessage("!V TRIAL_VAR congruency %s" % congruency)
    tk.sendMessage("!V TRIAL_VAR key_pressed %s" % key_pressed)
    tk.sendMessage("!V TRIAL_VAR RT %d" % RT)
    tk.sendMessage("!V TRIAL_VAR ACC %d" % ACC)

    # Send a 'TRIAL_RESULT' message to mark the end of trial
    tk.sendMessage("TRIAL_RESULT %d" % ACC)
Пример #45
0
    autoLog=False,
    wrapWidth=display_resolution[0] * .9)
message2 = visual.TextStim(win,
                           pos=(0.0, -(display_resolution[1] / 4)),
                           alignHoriz='center',
                           alignVert='center',
                           height=40,
                           text='Press Any Key to Quit.',
                           autoLog=False,
                           wrapWidth=display_resolution[0] * .9)

last_wheelPosY = 0

io.clearEvents('all')

demo_timeout_start = core.getTime()
# Run the example until a keyboard event is received.

kb_events = None
while not kb_events:
    # Get the current mouse position
    # posDelta is the change in position * since the last call *
    position, posDelta = mouse.getPositionAndDelta()
    mouse_dX, mouse_dY = posDelta

    # Get the current state of each of the Mouse Buttons
    left_button, middle_button, right_button = mouse.getCurrentButtonStates()

    # If the left button is pressed, change the grating's spatial frequency
    if left_button:
        grating.setSF(mouse_dX / 5000.0, '+')
Пример #46
0
        if pressed_buttons:
            grating.setColor('red')
        else:
            grating.setColor('white')

        if 'A' in pressed_buttons:
            # Rumble the pad, 50% low frequency motor, 25% high frequency
            # motor, for 1 second. Method is asyncronous, in that it returns
            # as soon as the ioHub Server has responded that the rumble request
            # was received and started.
            rt, rd = gamepad.setRumble(50.0, 25.0, 1.0)
            rumble_command_time, rumble_command_duration = rt, rd

        # Drift the grating
        t = core.getTime()
        grating.setPhase(t * 2)

        grating.draw()
        fixSpot.draw()
        message.draw()
        flip_time = win.flip()

        _kbe = keyboard.getEvents(EventConstants.KEYBOARD_PRESS)
        key_presses = [event.key for event in _kbe]

        # Do this each frame to avoid keyboard event buffer
        #   filling with non-press event types.
        io.clearEvents('all')

io.quit()
Пример #47
0
    missing_gpos_str = 'Eye Position: MISSING. In Region: No\n'
    missing_gpos_str += 'Press space key to start next trial.'
    text_stim = visual.TextStim(win,
                                text=text_stim_str,
                                pos=[0, 0],
                                height=24,
                                color='black',
                                wrapWidth=win.size[0] * .9)

    # Run Trials.....
    t = 0
    while t < TRIAL_COUNT:
        io.clearEvents()
        tracker.setRecordingState(True)
        run_trial = True
        tstart_time = core.getTime()
        while run_trial is True:
            # Get the latest gaze position in dispolay coord space..
            gpos = tracker.getLastGazePosition()
            #print("gpos:",gpos)
            # Update stim based on gaze position
            valid_gaze_pos = isinstance(gpos, (tuple, list))
            gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos)
            if valid_gaze_pos:
                # If we have a gaze position from the tracker, update gc stim
                # and text stim.
                if gaze_in_region:
                    gaze_in_region = 'Yes'
                else:
                    gaze_in_region = 'No'
                text_stim.text = text_stim_str % (gpos[0], gpos[1],
Пример #48
0
    output_file.write('\n')


if __name__ == '__main__':
    # Select the hdf5 file to process.
    data_file_path = displayDataFileSelectionDialog(starting_dir=os.path.join(
        module_directory(writeOutputFileHeader), 'results'))
    if data_file_path is None:
        print("File Selection Cancelled, exiting...")
        sys.exit(0)

    dpath, dfile = os.path.split(data_file_path)

    # Lets time how long it takes to read and save to .txt format
    #
    start_time = getTime()

    # Create an instance of the ExperimentDataAccessUtility class
    # for the selected DataStore file. This allows us to access data
    # in the file based on Device Event names and attributes, as well
    # as access the experiment session metadata saved with each session run.
    dataAccessUtil = ExperimentDataAccessUtility(dpath,
                                                 dfile,
                                                 experimentCode=None,
                                                 sessionCodes=[])

    duration = getTime() - start_time

    dvs_selected = displayTimeRangeVariableSelectionDialog(dataAccessUtil)

    # restart processing time calculation...
    def __init__(self,
                 file,
                 lang='en-US',
                 timeout=10,
                 samplingrate=16000,
                 flac_exe='C:\\Program Files\\FLAC\\flac.exe',
                 pro_filter=2,
                 quiet=True):
        """
            :Parameters:

                `file` : <required>
                    name of the speech file (.flac, .wav, or .spx) to process. wav files will be
                    converted to flac, and for this to work you need to have flac (as an
                    executable). spx format is speex-with-headerbyte (for google).
                `lang` :
                    the presumed language of the speaker, as a locale code; default 'en-US'
                `timeout` :
                    seconds to wait before giving up, default 10
                `samplingrate` :
                    the sampling rate of the speech clip in Hz, either 16000 or 8000. You can
                    record at a higher rate, and then down-sample to 16000 for speech
                    recognition. `file` is the down-sampled file, not the original.
                `flac_exe` :
                    **Windows only**: path to binary for converting wav to flac;
                    must be a string with **two back-slashes where you want one** to appear
                    (this does not display correctly above, in the web documentation auto-build);
                    default is 'C:\\\\\\\\Program Files\\\\\\\\FLAC\\\\\\\\flac.exe'
                `pro_filter` :
                    profanity filter level; default 2 (e.g., f***)
                `quiet` :
                    no reporting intermediate details; default `True` (non-verbose)

        """
        # set up some key parameters:
        results = 5  # how many words wanted
        self.timeout = timeout
        useragent = PSYCHOPY_USERAGENT
        host = "www.google.com/speech-api/v1/recognize"
        if sys.platform == 'win32':
            FLAC_PATH = flac_exe
        else:
            # best not to do every time
            FLAC_PATH, _ = core.shellCall(['/usr/bin/which', 'flac'],
                                          stderr=True)

        # determine file type, convert wav to flac if needed:
        ext = os.path.splitext(file)[1]
        if not os.path.isfile(file):
            raise IOError("Cannot find file: %s" % file)
        if ext not in ['.flac', '.spx', '.wav']:
            raise SoundFormatNotSupported("Unsupported filetype: %s\n" % ext)
        self.file = file
        if ext == ".flac":
            filetype = "x-flac"
        elif ext == ".spx":
            filetype = "x-speex-with-header-byte"
        elif ext == ".wav":  # convert to .flac
            if not os.path.isfile(FLAC_PATH):
                sys.exit("failed to find flac")
            filetype = "x-flac"
            tmp = 'tmp_guess%.6f' % core.getTime() + '.flac'
            flac_cmd = [
                FLAC_PATH, "-8", "-f", "--totally-silent", "-o", tmp, file
            ]
            _, se = core.shellCall(flac_cmd, stderr=True)
            if se: logging.warn(se)
            while not os.path.isfile(tmp):  # just try again
                # ~2% incidence when recording for 1s, 650+ trials
                # never got two in a row; core.wait() does not help
                logging.warn('Failed to convert to tmp.flac; trying again')
                _, se = core.shellCall(flac_cmd, stderr=True)
                if se: logging.warn(se)
            file = tmp  # note to self: ugly & confusing to switch up like this
        logging.info("Loading: %s as %s, audio/%s" %
                     (self.file, lang, filetype))
        try:
            c = 0  # occasional error; core.wait(.1) is not always enough; better slow than fail
            while not os.path.isfile(file) and c < 10:
                core.wait(.1, hogCPUperiod=0)
                c += 1
            audio = open(file, 'r+b').read()
        except:
            msg = "Can't read file %s from %s.\n" % (file, self.file)
            logging.error(msg)
            raise SoundFileError(msg)
        finally:
            try:
                os.remove(tmp)
            except:
                pass

        # urllib2 makes no attempt to validate the server certificate. here's an idea:
        # http://thejosephturner.com/blog/2011/03/19/https-certificate-verification-in-python-with-urllib2/
        # set up the https request:
        url = 'https://' + host + '?xjerr=1&' +\
              'client=psychopy2&' +\
              'lang=' + lang +'&'\
              'pfilter=%d' % pro_filter + '&'\
              'maxresults=%d' % results
        header = {
            'Content-Type': 'audio/%s; rate=%d' % (filetype, samplingrate),
            'User-Agent': useragent
        }
        try:
            self.request = urllib2.Request(url, audio, header)
        except:  # try again before accepting defeat
            logging.info("https request failed. %s, %s. trying again..." %
                         (file, self.file))
            core.wait(0.2, hogCPUperiod=0)
            self.request = urllib2.Request(url, audio, header)
Пример #50
0
def runTrial(params, expInfo):
    """ 
    pars should be a list, like ['red',   'red',   'left',  'cong']
    dataFile is an opened csv file that use to store our data in a sheet.
    """
    # unpacking the parameters
    text, textColor, correctAnswer, congruency = params

    # prepare the stimuli
    word = visual.TextStim(win=win,
                           text=text,
                           font='Arial',
                           height=100.0,
                           color=textColor)
    w, h = word.boundingBox

    # flush cached button presses (eyelink)
    tk.flushKeybuttons(0)
    tk.setOfflineMode()
    pylink.msecDelay(50)

    # OPTIONAL-- draw the text on the Host screen and show the bounding box
    tk.sendCommand('clear_screen 0')  # clear the host Display first
    tk.sendCommand('draw_text %d %d 6 %s' %
                   (scnWidth / 2, scnHeight / 2, text))
    tk.sendCommand('draw_box %d %d %d %d 6' %
                   (scnWidth / 2 - w / 2, scnHeight / 2 - h / 2,
                    scnWidth / 2 + w / 2, scnHeight / 2 + h / 2))

    # log trial onset message
    tk.sendMessage("TRIALID %s %s %s" % (text, textColor, congruency))

    # record_status_message : show some info on the host PC
    tk.sendCommand("record_status_message 'congruency: %s'" % congruency)

    #Optional - start realtime mode
    pylink.beginRealTimeMode(100)

    # do driftcheck
    try:
        error = tk.doDriftCorrect(scnWidth / 2, scnHeight / 2, 1, 1)
    except:
        tk.doTrackerSetup()

    # start recording, parameters specify whether events and samples are
    # stored in file, and available over the link
    tk.startRecording(1, 1, 1, 1)
    pylink.msecDelay(50)

    # Clear bufferred events (in Psychopy)
    event.clearEvents(eventType='keyboard')

    # draw the target word on display
    word.draw()
    win.flip()
    tk.sendMessage("SYNCTIME")  # message to mark the onset of visual stimuli

    # save a screenshot so we can use it in Data Viewer to superimpose the gaze
    if not os.path.exists('screenshotFolder'): os.mkdir('screenshotFolder')
    screenshot = 'screenshotFolder' + os.sep + 'cond_%s_%s.jpg' % (text,
                                                                   textColor)
    win.getMovieFrame()
    win.saveMovieFrames(screenshot)

    # send a Data Viewer integration message here, so DV knows which screenshot to load
    tk.sendMessage('!V IMGLOAD FILL %s' % ('..' + os.sep + screenshot))

    # check for response & time out
    gotKey = False
    timeOut = False
    tStart = core.getTime()
    subjResp = ['None', 'None']
    while not (gotKey or timeOut):
        # check for time out
        tNow = core.getTime()
        if tNow - tStart >= 10.0: timeOut = True

        # check for key presses
        keyPressed = event.getKeys(['left', 'right', 'down', 'escape'])
        if len(keyPressed) > 0:
            if 'escape' in keyPressed:
                tk.sendMessage("Quit")
                win.close()
                core.quit()  # terminate the task if ESCAPE is pressed
            else:
                subjResp = [keyPressed[0], tNow]
                tk.sendMessage("RESPONSE %s" % (keyPressed[0]))
                gotKey = True

    # clear the subject display
    win.color = 'black'
    win.flip()
    # clear the host Display
    tk.sendCommand('clear_screen 0')

    # was the subject's response 'correct'?
    if subjResp[0] == correctAnswer:
        respAcc = 1
    else:
        respAcc = 0

    # OPTIONAL-- set an Interest Area for data viewer integraiton
    # a full list of Data Viewer integration messages and their syntax can be found in the Data Viewer Manual
    # (Help menu -> Contents -> Protocol for EyeLink Data To Viewer Integraiton).
    tk.sendMessage("!V IAREA RECTANGLE 1 %d %d %d %d target" %
                   (scnWidth / 2 - w / 2, scnHeight / 2 - h / 2,
                    scnWidth / 2 + w / 2, scnHeight / 2 + h / 2))

    # EyeLink - Send Trialvar messages for data viewer integraiton
    # a full list of Data Viewer integration messages and their syntax can be found in the Data Viewer Manual
    # (Help menu -> Contents -> Protocol for EyeLink Data To Viewer Integraiton).
    tk.sendMessage("!V TRIAL_VAR word %s" % (text))
    tk.sendMessage("!V TRIAL_VAR color %s" % (textColor))
    tk.sendMessage("!V TRIAL_VAR congruency %s" % (congruency))
    tk.sendMessage("!V TRIAL_VAR respAcc %d" % (respAcc))

    # Optional-- end realtime mode
    pylink.endRealTimeMode()
    pylink.msecDelay(100)

    # send a message to mark the end of trial
    tk.sendMessage("TRIAL_RESULT %d" % (respAcc))
    pylink.msecDelay(100)

    # EyeLink - stop recording eye data
    tk.stopRecording()
    pylink.msecDelay(50)
    tk.setOfflineMode()
 def reset(self):
     """Restores to fresh state, ready to record again"""
     logging.exp('%s: resetting at %.3f' % (self.loggingId, core.getTime()))
     self.__init__(name=self.name, saveDir=self.saveDir)
    if continueRoutine:  # don't flip if this routine is over or we'll get a blank screen
        win.flip()

# -------Ending Routine "interval"-------
for thisComponent in intervalComponents:
    if hasattr(thisComponent, "setAutoDraw"):
        thisComponent.setAutoDraw(False)

# ------Prepare to start Routine "CE"-------
continueRoutine = True
# update component parameters for each repeat
# setup some python lists for storing info about the mouse_CE
mouse_CE.clicked_name = []
gotValidClick = False  # until a click is received
slider.reset()
last = core.getTime()
# keep track of which components have finished
CEComponents = [
    interface_CE, mouse_CE, buttonOK_CE, slider, down, up, text_Aup_CE,
    text_Adown_CE, text_Bmiddle_CE, text_Bup_CE, text_Bdown_CE
]
for thisComponent in CEComponents:
    thisComponent.tStart = None
    thisComponent.tStop = None
    thisComponent.tStartRefresh = None
    thisComponent.tStopRefresh = None
    if hasattr(thisComponent, 'status'):
        thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Пример #53
0
    def detect_saccade(self,
                       algorithm_type='velocity',
                       threshold=0.25,
                       direction=None,
                       fixation_position=None,
                       max_time=1.0):
        """
		detect_saccade tries to detect a saccade based on position (needs fixation_position argument) or velocity (perhaps a direction argument?) information. 
		It can be 'primed' with a vector giving the predicted direction of the impending saccade. 
		detect_saccade looks for a saccade between call_time (= now) and max_time+call_time
		"""
        no_saccade = True
        start_time = core.getTime()
        if algorithm_type == 'velocity':
            sample_array = np.zeros((max_time * self.sample_rate, 2),
                                    dtype=np.float32)
            velocity_array = np.zeros((max_time * self.sample_rate, 2),
                                      dtype=np.float32)
            f = np.array([1, 1, 2, 3], dtype=np.float32) / 7.0
            nr_samples = 1
            sample_array[0, :] = self.eye_pos()
            velocity_array[0, :] = 0.001, 0.001
            if direction != None:  # make direction a unit vector if it is an argument to this function
                direction = direction / np.linalg.norm(direction)

            while no_saccade:
                saccade_polling_time = core.getTime()
                sample_array[nr_samples][:] = self.eye_pos()
                if (sample_array[nr_samples - 1][0] !=
                        sample_array[nr_samples][0]) or (
                            sample_array[nr_samples - 1][1] !=
                            sample_array[nr_samples][1]):
                    velocity_array[nr_samples][:] = sample_array[
                        nr_samples][:] - sample_array[nr_samples - 1][:]
                    if nr_samples > 3:
                        # scale velocities according to x and y median-based standard deviations, as in engbert & mergenthaler, 2006
                        med_scaled_velocity = velocity_array[:nr_samples] / np.mean(
                            np.sqrt(((velocity_array[:nr_samples] - np.median(
                                velocity_array[:nr_samples], axis=0))**2)),
                            axis=0)
                        if direction != None:
                            # scale the velocity array according to the direction in the direction argument before thresholding
                            # assuming direction is a x,y unit vector specifying the expected direction of the impending saccade
                            if np.inner(med_scaled_velocity[nr_samples],
                                        direction) > threshold:
                                no_saccade = False
                        if np.linalg.norm(med_scaled_velocity[-1]) > threshold:
                            no_saccade = False
                    nr_samples += 1
                if (saccade_polling_time - start_time) > max_time:
                    no_saccade = False

        if algorithm_type == 'position' or not self.tracker:
            if fixation_position == None:
                fixation_position = np.array(self.eye_pos())
            while no_saccade:
                saccade_polling_time = core.getTime()
                ep = np.array(self.eye_pos())
                #		print ep, fixation_position, threshold, np.linalg.norm(ep - fixation_position) / self.pixels_per_degree
                if (np.linalg.norm(ep - fixation_position) /
                        self.pixels_per_degree) > threshold:
                    # eye position is outside the safe zone surrounding fixation - swap the buffers to change saccade target position
                    no_saccade = False
        #			print '\n'
                if (saccade_polling_time - start_time) > max_time:
                    no_saccade = False

        if algorithm_type == 'eyelink':
            while no_saccade:
                self.tracker.wait_for_saccade_start()
                saccade_polling_time = core.getTime()
                # ev =
                # if ev == 5: # start of a saccade
                # 	no_saccade = False
                # if ( saccade_polling_time - start_time ) > max_time:
                # 	no_saccade = False

        return saccade_polling_time
Пример #54
0
def run_trial(params):
    """ Run a single trial

    params: a list containing tiral parameters, e.g.,
            ['red',   'red',   'left',  'cong']"""

    # Unpacking the parameters
    text, text_color, correct_answer, congruency = params

    # Prepare the stimuli
    word = visual.TextStim(win=win,
                           text=text,
                           font='Arial',
                           height=100.0,
                           color=text_color)

    # Take the tracker offline
    tk.setOfflineMode()

    # Send a "TRIALID" message to mark the start of a trial
    tk.sendMessage(f"TRIALID {text} {text_color} {congruency}")

    # Record_status_message : show some info on the Host PC
    msg = f"record_status_message 'Congruency-{congruency}'"
    tk.sendCommand(msg)

    # Drift check/correction, params, x, y, draw_target, allow_setup
    tk.doDriftCorrect(int(SCN_W / 2), int(SCN_H / 2), 1, 1)

    # Put the tracker in idle mode before we start recording
    tk.setOfflineMode()

    # Start recording
    # params: file_sample, file_event, link_sampe, link_event (1-yes, 0-no)
    tk.startRecording(1, 1, 1, 1)

    # Wait for 100 ms to cache some samples
    pylink.msecDelay(100)

    # Draw the target word on the screen
    word.draw()
    win.flip()
    # Record the onset time of the stimuli
    tar_onset = core.getTime()
    # Send a message to mark the onset of visual stimuli
    tk.sendMessage("stim_onset")

    # Save a screenshot to use as background graphics in Data Viewer
    if not os.path.exists('screenshots'):
        os.mkdir('screenshots')
    screenshot = f'screenshots/cond_{text}_{text_color}.jpg'
    win.getMovieFrame()
    win.saveMovieFrames(screenshot)

    # The command we used to take screenshots takes time to return
    # we need to provide a "time offset" in the IMGLOAD message, so
    # Data Viewer knows the correct onset time of the screen
    msg_offset = int((core.getTime() - tar_onset) * 1000)
    # Send an IMGLOAD message to let DV know which screenshot to load
    scn_shot = '../' + screenshot
    tk.sendMessage(f'{msg_offset} !V IMGLOAD FILL {scn_shot}')

    # Clear bufferred events (in PsychoPy), then wait for key presses
    event.clearEvents(eventType='keyboard')
    gotKey = False
    key_pressed, RT, ACC = ['None', 'None', 'None']
    while not gotKey:
        keyp = event.getKeys(['left', 'right', 'escape'])
        if len(keyp) > 0:
            key_pressed = keyp[0]  # which key was pressed
            RT = core.getTime() - tar_onset  # response time
            # correct=1, incorrect=0
            ACC = int(key_pressed == correct_answer)

            # Send a message mark the key response
            tk.sendMessage(f"Key_resp {key_pressed}")
            gotKey = True

    # Clear the window at the end of a trials2Test
    win.color = (0, 0, 0)
    win.flip()

    # Stop recording
    tk.stopRecording()

    # Send trial variables to record in the EDF data file
    tk.sendMessage(f"!V TRIAL_VAR word {text}")
    tk.sendMessage(f"!V TRIAL_VAR color {text_color}")
    tk.sendMessage(f"!V TRIAL_VAR congruency {congruency}")
    pylink.pumpDelay(2)  # give the link a break
    tk.sendMessage(f"!V TRIAL_VAR key_pressed {key_pressed}")
    tk.sendMessage(f"!V TRIAL_VAR RT {round(RT * 1000)}")
    tk.sendMessage(f"!V TRIAL_VAR ACC {ACC}")

    # Send a 'TRIAL_RESULT' message to mark the end of trial
    tk.sendMessage(f"TRIAL_RESULT {ACC}")
Пример #55
0
 def play_sound(self, sound_index='1'):
     """docstring for play_sound"""
     super(EyelinkSession, self).play_sound(sound_index=sound_index)
     if self.tracker != None:
         self.tracker.log('sound ' + str(sound_index) + ' at ' +
                          str(core.getTime()))
Пример #56
0
    #Announce the sentence
# sentenceText = fullSentence
# text = visual.TextStim(win, sentenceText, pos=(0, 0), units = 'pix')
# text.draw()
# win.flip()
# core.wait(1)
# k = event.waitKeys()

    keystext = "PRESS 'escape' to Quit.\n"
    text = visual.TextStim(win, keystext, pos=(0, -250), units='pix')

    #Only draw more than 1 frame if this is a video "OFF" trial
    firstFrame = 1

    movStart = core.getTime()
    while core.getTime(
    ) - movStart < soundDur + .1:  #mov.status != visual.FINISHED:
        if firstFrame == 1:
            mov.draw()
            text.draw()
            win.flip()
            audioSound.play()
            movStart = core.getTime()
            firstFrame = 0
        else:
            if vidSwitch[trial] == 'AV' or test:
                mov.draw()
                text.draw()
                win.flip()
        # Check for action keys.....
Пример #57
0
def wait_after_resp(start_time, wait_time):
    if core.getTime() - start_time < wait_time:
        core.wait(wait_time - (core.getTime() - start_time))
Пример #58
0
# Setting initial numbers
item = 0
pre_key = []
expStatus = 1
response = []


# Start experiment ----
# Greeting page
img = visual.ImageStim(win = my_win, image = img_start, 
                       units = 'pix')
img.draw()
my_win.flip()
core.wait(2)

stimuli_time =core.getTime()

# Trials
while expStatus == 1:

    img = visual.ImageStim(win = my_win, 
                           image = imageLUT[stimulus_seq[item]]['path'],
                           units = 'pix')
    img.draw()
    my_win.flip()

    # Get response
    response_hw, response_key, response_status = getAnything(mouse, joy)
    
    # if clicks != pre_mouse and response_status == 1:
    if response_status == 1 and response_key != pre_key:
Пример #59
0
def half_long_new(move_pars, trial_index):
    """
    a function to run half and long movement.
    :param move_pars: a list containing trial parameters. i.e.,
                [movement, start_x, start_y, end_x, end_y]
    :param trial_index: record the order of trial presentation in the task
    :return:
    """
    movement, start_x, start_y, end_x, end_y = move_pars
    x_length = end_x - start_x
    y_length = end_y - start_y

    # get a reference to the currently active EyeLink connection
    el_tracker = pylink.getEYELINK()

    # put the tracker in the offline mode first
    el_tracker.setOfflineMode()

    # send a 'TRIALID' message to mark the start of a trial
    el_tracker.sendMessage('TRIALID %d' % trial_index)

    # record_status_message : show some info on the Host PC
    # here we show how many trial has been tested
    status_msg = 'TRIAL number %d, %s' % (trial_index, movement)
    el_tracker.sendCommand("record_status_message '%s'" % status_msg)

    # draw a reference grid on the Host PC screen
    # For details, See section 25.7 'Drawing Commands' in the
    # EyeLink Programmers Guide manual
    line_hor = (scnWidth / 2.0 - start_x, scnHeight / 2.0,
                scnWidth / 2.0 + start_x, scnHeight / 2.0)
    line_ver = (scnWidth / 2.0, scnHeight / 2.0 - start_y, scnWidth / 2.0,
                scnHeight / 2.0 + start_y)
    el_tracker.sendCommand('clear_screen 0')  # clear the host Display
    el_tracker.sendCommand('draw_line %d %d %d %d 15' % line_hor)
    el_tracker.sendCommand('draw_line %d %d %d %d 15' % line_ver)

    # put tracker in idle/offline mode before recording
    el_tracker.setOfflineMode()

    # Start recording
    # arguments: sample_to_file, events_to_file, sample_over_link,
    # event_over_link (1-yes, 0-no)
    try:
        el_tracker.startRecording(1, 1, 1, 1)
    except RuntimeError as error:
        print("ERROR:", error)
        abort_trial()
        return pylink.TRIAL_ERROR

    # Allocate some time for the tracker to cache some samples
    pylink.pumpDelay(100)

    # Send a message to clear the Data Viewer screen, get it ready for
    # drawing the pictures during visualization
    bgcolor_RGB = (116, 116, 116)
    el_tracker.sendMessage('!V CLEAR %d %d %d' % bgcolor_RGB)

    # open a INTEREAT AREA SET file to make a dynamic IA for the target
    ias = 'IA_%d.ias' % trial_index
    ias_file = open(os.path.join(aoi_folder, ias), 'w')

    # ia_radius = 60  # radius of the elliptical IA
    frame_num = 0  # keep track of the frames displayed

    # used a fixation trigger in not dummy mode
    if not dummy_mode:
        fixation = visual.TextStim(win=win, text='+', height=50)
        fixation.draw()
        win.flip()
        el_tracker.sendMessage("FIXATION_TRIGGER")

        eye_used = el_tracker.eyeAvailable()
        if eye_used == 2:
            eye_used = 0

        fixation_time_list = []
        current_eye_pos = [100, 100]

        while True:
            ltype = el_tracker.getNextData()
            if ltype is None:
                pass
            if ltype == FIXUPDATE:
                # send a message to mark the arrival time of a fixation update event
                el_tracker.sendMessage('fixUpdate')
                ldata = el_tracker.getFloatData()
                if ldata.getEye() == eye_used:
                    gaze_pos = ldata.getAverageGaze()
                    current_eye_pos = [
                        gaze_pos[0] - scnWidth / 2, scnHeight / 2 - gaze_pos[1]
                    ]
            if (-25 <= current_eye_pos[0] <= 25) and (-25 <= current_eye_pos[1]
                                                      <= 25):
                fixation_time_list.append(core.getTime())
            else:
                fixation_time_list = []
            if len(fixation_time_list) > 1:
                # if fixation duration > 300 ms, break
                if fixation_time_list[-1] - fixation_time_list[0] > 0.3:
                    break

    tar_x, tar_y = start_x, start_y
    target.pos = (tar_x, tar_y)
    target.draw()
    win.flip()
    el_tracker.sendMessage('TARGET_WAIT')
    core.wait(0.5)  # wait 500 ms

    pursuitClock.reset()
    time_elapsed = 0

    while True:
        # abort the current trial if the tracker is no longer recording
        error = el_tracker.isRecording()
        if error is not pylink.TRIAL_OK:
            el_tracker.sendMessage('tracker_disconnected')
            abort_trial()
            return error

        frame_num += 1
        flip_time = pursuitClock.getTime()
        # flip_time = core.getTime()
        print('flip_time_a: ' + str(flip_time))

        if frame_num == 1:
            # send a message to mark movement onset
            el_tracker.sendMessage('TARGET_ONSET')

            # record a message to let Data Viewer know where to find
            # the dynamic IA file for the current trial.
            ias_path = os.path.join('aoi', ias)
            el_tracker.sendMessage('!V IAREA FILE %s' % ias_path)

            # pursuit start time
            movement_start = flip_time
            # print('start time ' + str(movement_start))
        else:
            # save the Interest Area info following movement onset
            ia_pars = (-1 * round(
                (pre_frame_time - movement_start) * 1000), -1 * round(
                    (flip_time - movement_start) * 1000) + 1,
                       int(scnWidth / 2.0 + pre_x - ia_radius),
                       int(scnHeight / 2.0 - pre_y - ia_radius),
                       int(scnWidth / 2.0 + pre_x + ia_radius),
                       int(scnHeight / 2.0 - pre_y + ia_radius))

            ia_msg = '%d %d ELLIPSE 1 %d %d %d %d TARGET\n' % ia_pars
            ias_file.write(ia_msg)

            # log the target position after each screen refresh
            tar_pos = (tar_x + int(scnWidth / 2), int(scnHeight / 2) - tar_y)
            tar_pos_msg = '!V TARGET_POS target %d, %d 1 0' % tar_pos
            el_tracker.sendMessage(tar_pos_msg)

            # OPTIONAL - send over another message to request Data Viewer
            # to draw the pursuit target when visualizing the data
            el_tracker.sendMessage('!V CLEAR 128 128 128')
            tar_msg = '!V FIXPOINT 255 0 0 255 0 0 %d %d 50 50' % tar_pos
            el_tracker.sendMessage(tar_msg)

            # keep track of target position and frame timing
        pre_frame_time = flip_time
        pre_x = tar_x
        pre_y = tar_y

        time_elapsed = flip_time - movement_start

        if movement.startswith('Vertical'):
            # 半程:小球从上方 - 中间运动 OR 小球从中间 - 下方
            # 全程:小球从上方 - 下方运动
            if y_length < 0:
                tar_y -= hl_speed
                if tar_y <= end_y:  # 到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
            # 半程:小球从下方 - 中间 OR 小球小中间 - 上方
            # 全程:小球从下方 - 上方运动
            elif y_length > 0:
                tar_y += hl_speed
                if tar_y >= end_y:  # 到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
        elif movement.startswith('Horizontal'):
            # 半程:小球从右方 - 中间运动 OR 小球从中间 - 左方
            # 全程:小球从右方 - 左方
            if x_length < 0:
                tar_x -= hl_speed
                if tar_x <= end_x:  # 到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
            # 半程:小球从左方 - 中间运动 OR 小球从中间 - 右方
            # 全程:小球从左方 - 右方
            elif x_length > 0:
                tar_x += hl_speed
                if tar_x >= end_x:  # 到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
        elif movement.startswith('Tilt'):
            # x_length < 0 and y_length < 0
            # 半程包含两种情况
            # 1. 小球从右上 - 中心
            # 2. 小球小中心 - 左下
            # 全程:小球从右上 - 左下
            if x_length < 0 and y_length < 0:
                tar_x -= hl_speed / 1.4
                tar_y -= hl_speed / 1.4
                if tar_x <= end_x or tar_y <= end_y:  # x或y到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
            # x_length > 0 and y_length < 0
            # 半程包含两种情况
            # 1. 小球从左上 - 中心
            # 2. 小球从中心 - 右下
            # 全程:小球从左上 - 右下
            elif x_length > 0 > y_length:
                tar_x += hl_speed / 1.4
                tar_y -= hl_speed / 1.4
                if tar_x >= end_x or tar_y <= end_y:  # x或y到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
            # x_length > 0 and y_length > 0
            # 半程包含两种情况
            # 1. 小球从左下 - 中心
            # 2. 小球从中心 - 右上
            # 全程:小球从左下 - 右上
            elif x_length > 0 and y_length > 0:
                tar_x += hl_speed / 1.4
                tar_y += hl_speed / 1.4
                if tar_x >= end_x or tar_y >= end_y:  # x或y到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break
            # x_length < 0 and y_length > 0
            # 半程包含两种情况
            # 1. 小球从右下 - 中心
            # 2. 小球从中心 - 左上
            # 全程:小球从右下 - 中心
            elif x_length < 0 < y_length:
                tar_x -= hl_speed / 1.4
                tar_y += hl_speed / 1.4
                if tar_x <= end_x or tar_y >= end_y:  # x或y到达终点后,跳出循环
                    el_tracker.sendMessage('TARGET_OFFSET')
                    break

        target.pos = (tar_x, tar_y)
        target.draw()
        win.flip()

    # clear the screen
    # clear_screen(win)
    win.color = (0, 0, 0)
    win.flip()
    el_tracker.sendMessage('black_screen')
    el_tracker.sendMessage('!V CLEAR 128 128 128')
    core.wait(0.5)

    # close the IAS file that contain the dynamic IA definition
    ias_file.close()

    # stop recording; add 100 msec to catch final events before stopping
    pylink.pumpDelay(100)
    el_tracker.stopRecording()

    el_tracker.sendMessage('!V TRIAL_VAR movement %s' % movement)
    el_tracker.sendMessage('!V TRIAL_VAR max_duration %d' %
                           int(time_elapsed * 1000))
    el_tracker.sendMessage('!V TRIAL_VAR start_x %d' % start_x)
    pylink.msecDelay(4)  # take a break of 4 millisecond
    el_tracker.sendMessage('!V TRIAL_VAR start_y %d' % start_y)
    el_tracker.sendMessage('!V TRIAL_VAR end_x %d' % end_x)
    el_tracker.sendMessage('!V TRIAL_VAR end_y %d' % end_y)

    # send a 'TRIAL_RESULT' message to mark the end of trial, see Data
    # Viewer User Manual, "Protocol for EyeLink Data to Viewer Integration"
    el_tracker.sendMessage('TRIAL_RESULT %d' % pylink.TRIAL_OK)
Пример #60
0
def run_trial(trial_duration, movement_pars):
    """ Run a smooth pursuit trial

    trial_duration: the duration of the pursuit movement
    movement_pars: [amp_x, amp_y, phase_x, phase_y, freq_x, freq_y]
    The Sinusoidal movement pattern is determined by the following equation
    y(t) = amplitude * sin(frequency * t + phase)
    for a circular or elliptical movements, the phase in x and y directions
    should be pi/2 (direction matters). Angular frequency
    (radians/second) is used in the equation."""

    # Parse the movement pattern parameters
    amp_x, amp_y, phase_x, phase_y, freq_x, freq_y = movement_pars

    # Take the tracker offline
    tk.setOfflineMode()

    # Send the standard "TRIALID" message to mark the start of a trial
    tk.sendMessage("TRIALID")

    # Record_status_message : show some info on the Host PC
    tk.sendCommand("record_status_message 'Pursuit demo'")

    # Drift check/correction, params, x, y, draw_target, allow_setup
    tar_x = amp_x * sin(phase_x)
    tar_y = amp_y * sin(phase_y)
    target.pos = (tar_x, tar_y)
    target.draw()
    win.flip()
    tk.doDriftCorrect(int(tar_x + SCN_W / 2), int(SCN_H / 2 - tar_y), 0, 1)

    # Put the tracker in idle mode before we start recording
    tk.setOfflineMode()

    # Start recording
    # params: file_sample, file_event, link_sampe, link_event (1-yes, 0-no)
    tk.startRecording(1, 1, 1, 1)

    # Wait for 100 ms to cache some samples
    pylink.msecDelay(100)

    # Send a message to mark movement onset
    frame = 0
    while True:
        target.pos = (tar_x, tar_y)
        target.draw()
        win.flip()
        flip_time = core.getTime()
        frame += 1
        if frame == 1:
            tk.sendMessage('Movement_onset')
            move_start = core.getTime()
        else:
            _x = int(tar_x + SCN_W / 2)
            _y = int(SCN_H / 2 - tar_y)
            tar_msg = f'!V TARGET_POS target {_x}, {_y} 1 0'
            tk.sendMessage(tar_msg)

        time_elapsed = flip_time - move_start

        # update the target position
        tar_x = amp_x * sin(freq_x * time_elapsed + phase_x)
        tar_y = amp_y * sin(freq_y * time_elapsed + phase_y)

        # break if the time elapsed exceeds the trial duration
        if time_elapsed > trial_duration:
            break

    # clear the window
    win.color = (0, 0, 0)
    win.flip()

    # Stop recording
    tk.stopRecording()

    # Send trial variables to record in the EDF data file
    tk.sendMessage(f"!V TRIAL_VAR amp_x {amp_x:.2f}")
    tk.sendMessage(f"!V TRIAL_VAR amp_y {amp_y:.2f}")
    tk.sendMessage(f"!V TRIAL_VAR phase_x {phase_x:.2f}")
    pylink.pumpDelay(2)  # give the tracker a break
    tk.sendMessage(f"!V TRIAL_VAR phase_y {phase_y:.2f}")
    tk.sendMessage(f"!V TRIAL_VAR freq_x {freq_x:.2f}")
    tk.sendMessage(f"!V TRIAL_VAR freq_y {freq_y:.2f}")
    tk.sendMessage(f"!V TRIAL_VAR duration {trial_duration:.2f}")

    # Send a 'TRIAL_RESULT' message to mark the end of trial
    tk.sendMessage('TRIAL_RESULT')