Пример #1
0
 def run(self):
     self.running = True
     # epi = sound.Sound(self.soundfile)
     if self.skip:
         # if self.soundfile:
         # epi.play()
         core.wait(self.TR * self.skip)  # emulate T1 stabilization without data collection
     self.clock.reset()
     for vol in range(1, self.volumes + 1):
         # if self.sound:
         # epi.stop()
         # epi.play()
         # epi.fadeOut(int(1500*self.TR))
         if self.stopflag:
             break
         # "emit" a sync pulse by placing a key in the buffer:
         event._keyBuffer.append(self.sync)
         # wait for start of next volume, doing our own hogCPU for tighter sync:
         core.wait(self.timesleep - self.hogCPU, hogCPUperiod=0)
         while self.clock.getTime() < vol * self.TR:
             pass  # ?? no need to pump pyglet window events because threaded (see core wait)
             if core.havePyglet:
                 pyglet.media.dispatch_events()  # events for sounds/video should run independently of wait()
                 wins = pyglet.window.get_platform().get_default_display().get_windows()
                 for win in wins:
                     win.dispatch_events()  # pump events on pyglet windows
     self.running = False
Пример #2
0
def testStaticPeriod():
    static = StaticPeriod()
    static.start(0.1)
    wait(0.05)
    assert static.complete()==1
    static.start(0.1)
    wait(0.11)
    assert static.complete()==0

    win = Window(autoLog=False)
    static = StaticPeriod(screenHz=60, win=win)
    static.start(.002)
    assert win.recordFrameIntervals == False
    static.complete()
    assert static._winWasRecordingIntervals == win.recordFrameIntervals
    win.close()

    # Test if screenHz parameter is respected, i.e., if after completion of the
    # StaticPeriod, 1/screenHz seconds are still remaining, so the period will
    # complete after the next flip.
    refresh_rate = 100.0
    period_duration = 0.1
    timer = CountdownTimer()
    win = Window(autoLog=False)

    static = StaticPeriod(screenHz=refresh_rate, win=win)
    static.start(period_duration)
    timer.reset(period_duration )
    static.complete()

    assert np.allclose(timer.getTime(),
                       1.0/refresh_rate,
                       atol=0.001)
    win.close()
Пример #3
0
    def __init__(self, files, threads=3, verbose=False):
        """Like `Speech2Text()`, but takes a list of sound files or a directory name to search
        for matching sound files, and returns a list of `(filename, response)` tuples.
        `response`'s are described in `Speech2Text.getResponse()`.

        Can use up to 5 concurrent threads. Intended for
        post-experiment processing of multiple files, in which waiting for a slow response
        is not a problem (better to get the data).

        If `files` is a string, it will be used as a directory name for glob
        (matching all `*.wav`, `*.flac`, and `*.spx` files).
        There's currently no re-try on http error."""
        list.__init__(self)  # [ (file1, resp1), (file2, resp2), ...]
        maxThreads = min(threads, 5)  # I get http errors with 6
        self.timeout = 30
        if type(files) == str and os.path.isdir(files):
            f = glob.glob(os.path.join(files, '*.wav'))
            f += glob.glob(os.path.join(files, '*.flac'))
            f += glob.glob(os.path.join(files, '*.spx'))
            fileList = f
        else:
            fileList = list(files)
        web.requireInternetAccess()  # needed to access google's speech API
        for i, filename in enumerate(fileList):
            gs = Speech2Text(filename, level=5)
            self.append((filename, gs.getThread()))  # tuple
            if verbose:
                logging.info("%i %s" % (i, filename))
            while self._activeCount() >= maxThreads:
                core.wait(.1, 0)  # idle at max count
def display_instructions():
    set_msg('INTRODUCTION','TITLE')
    set_msg('AXB control','MAIN')
    set_msg('Press any key to continue','KEY')
    win.flip()
    core.wait(0.5)
    event.waitKeys()
Пример #5
0
def testWait(duration=1.55):
    try:
        t1=getTime()
        wait(duration)
        t2=getTime()

        # Check that the actual duration of the wait was close to the requested delay.
        #
        # Note that I have had to set this to a relatively high value of
        # 50 msec because on my Win7, i7, 16GB machine I would get delta's of up to
        # 35 msec when I was testing this.
        #
        # This is 'way high', and I think is because the current wait()
        # implementation polls pyglet for events during the CPUhog period.
        # IMO, during the hog period, which should only need to be only 1 - 2 msec
        # , not the 200 msec default now, nothing should be done but tight looping
        # waiting for the wait() to expire. This is what I do in ioHub and on this same
        # PC I get actual vs. requested duration delta's of < 100 usec consitently.
        #
        # I have not changed the wait in psychopy until feedback is given, as I
        # may be missing a reason why the current wait() implementation is required.
        #
        assert np.fabs((t2-t1)-duration) < 0.05

        printf(">> core.wait(%.2f) Test: PASSED"%(duration))

    except Exception:
        printf(">> core.wait(%.2f) Test: FAILED. Actual Duration was %.3f"%(duration,(t2-t1)))
        printExceptionDetails()

    printf("-------------------------------------\n")
Пример #6
0
def showpix(stimpath, stim, duration):
    fixation()
    pix = visual.ImageStim(win =win, 
            image = stimpath + stim, 
            pos = [0,0], 
            size = [800, 450], 
            opacity = 1,
            units = 'pix'
            )

    pix.draw()    
  #  win.logOnFlip('parallel port trigger picture: %d' %trigger_stim , level=logging.EXP)
    win.flip()
    stimOnset= trialClock.getTime()
    pparallel.setData(0) # sets all pin lo
    pparallel.setData(trigger_stim) # sets all pin lo
    core.wait(0.005)
    pparallel.setData(0)
    core.wait(duration)
    #mouse.getPressed()
    fixation()
    # get key press at the end of clip
    event.waitKeys(keyList=keyStop)
    respTime= trialClock.getTime()
   

    trials.addData('stimOnset', stimOnset)
    trials.addData('respTime',respTime)  
Пример #7
0
    def getResponse(self):
        """Calls `getThread()`, and then polls the thread until there's a response.

        Will time-out if no response comes within `timeout` seconds. Returns an
        object having the speech data in its namespace. If there's no match,
        generally the values will be equivalent to `None` (e.g., an empty string).

        If you do `resp = getResponse()`, you'll be able to access the data
        in several ways:

            `resp.word` :
                the best match, i.e., the most probably word, or `None`
            `resp.confidence` :
                Google's confidence about `.word`, ranging 0 to 1
            `resp.words` :
                tuple of up to 5 guesses; so `.word` == `.words[0]`
            `resp.raw` :
                the raw response from Google (just a string)
            `resp.json` :
                a parsed version of raw, from `json.load(raw)`
        """
        gsqthread = self.getThread()
        while gsqthread.elapsed() < self.timeout:
            # don't need precise timing to poll an http connection
            core.wait(0.05, 0)
            if not gsqthread.running:
                break
        if gsqthread.running:  # timed out
            gsqthread.status = 408  # same as http code
        return gsqthread  # word and time data are already in the namespace
Пример #8
0
def pheno():
    #event.clear(Events)
    pparallel.setData(0) # sets all pin lo
                
    while vividRatingScale.noResponse: 
        vivid_quest.draw()
        vividRatingScale.draw()
        win.flip()
    trig_resp = whatresp(vividRatingScale.getRating())
    print "resp=", trig_resp,  vividRatingScale.getRating()
    pparallel.setData(trig_resp)
    core.wait(0.005)
    pparallel.setData(0)
        
    while effRatingScale.noResponse: 
        eff_quest.draw()
        effRatingScale.draw()
        win.flip()
    trig_resp = whatresp(effRatingScale.getRating())
    print "resp=", trig_resp ,  effRatingScale.getRating()
    pparallel.setData(trig_resp)
    core.wait(0.005)
    pparallel.setData(0)
        


    trials.addData('scale1', vividRatingScale.getRating())
    trials.addData('RTscale1', vividRatingScale.getRT())
    trials.addData('scale2', effRatingScale.getRating())
    trials.addData('RTscale2',  effRatingScale.getRT())

    vividRatingScale.reset()
    effRatingScale.reset()
Пример #9
0
def playclip(stimpath, stim):
    fixation()
    core.wait(0.3)
    #pparallel.setData(0) # sets all pin lo
    
    clip = visual.MovieStim(win=win,
            name= 'clip', 
            filename= stimpath + stim,
            size = [800, 450],
            ori =0, 
            pos=[0,0], 
            opacity =1, 
            depth = -1.0
            )
    pparallel.setData(trigger_stim) # sets all pin lo
    core.wait(0.005)
    pparallel.setData(0)
    stimOnset= trialClock.getTime()
    while clip.status != visual.FINISHED:
        clip.draw()
        win.flip()
        
    fixation()
    # get key press at the end of clip
    event.waitKeys(keyList=keyStop)
    respTime= trialClock.getTime()
    #mouse.clickReset()
    #button, time = mouse.getPressed(getTime=True)
    #print('mouse: ', button)
    
    #event.waitKeys(keyList= button)
    trials.addData('stimOnset', stimOnset)
    trials.addData('respTime',respTime)  
Пример #10
0
def feedback(levelscore):
    rst.text = str(levelscore)
    questions.draw()
    feed.draw()
    rst.draw()
    mywin.flip()
    core.wait(6)
Пример #11
0
    def waitEvents(self, downOnly=True,
                   timeout=0, escape='escape', wait=0.002):
        '''Wait for and return the first button press event.

        Always calls `clearEvents()` first (like PsychoPy keyboard waitKeys).

        Use `downOnly=False` to include button-release events.

        `escape` is a list/tuple of keyboard events that, if pressed, will
        interrupt the bbox wait; `waitKeys` will return `None` in that case.

        `timeout` is the max time to wait in seconds before returning `None`.
        `timeout` of 0 means no time-out (= default).
        '''
        self.clearEvents()  # e.g., removes UP from previous DOWN
        if timeout > 0:
            c = core.Clock()
        if escape and not type(escape) in [list, tuple]:
            escape = [escape]
        while True:
            if wait:
                core.wait(wait, 0)  # throttle CPU; event RTs come from bbox
            evt = self.getEvents(downOnly=downOnly)
            if evt:
                evt = evt[0]
                break
            if escape and event.getKeys(escape) or 0 < timeout < c.getTime():
                    return
        return evt
Пример #12
0
def pst(controller, outfile):
    duration = pstSettings(controller)

    if duration == -1:
        print 'PST Cancelled'
        return

    display.text(controller.experWin, 'Running PST')
    testWin = controller.testWin

    display.countdown(controller)

    display.fill_screen(testWin, [-1, -1, -1])

    if not controller.testing:
        controller.tobii_cont.setDataFile(outfile)
        controller.tobii_cont.startTracking()
        controller.tobii_cont.setEventsAndParams(['task','duration'])
        controller.tobii_cont.setParam('task', 'pst')
        controller.tobii_cont.setParam('duration', duration)

    core.wait(duration)

    if not controller.testing:
        controller.tobii_cont.stopTracking()
        controller.tobii_cont.closeDataFile()
Пример #13
0
def fScore():
    global score
    roundscore = 0
    wordsToScore = wordsTheyRecalled
    wordList = './words/%s/words%s.txt' %(version, round-1)
    throwAwayWords = list(cor for cor in open(wordList).read().split("\n") if cor)
    for w in wordsToScore:
        instructions2[2].draw()
        core.wait(0.1)
        if w in throwAwayWords:
            risk = int(findBet(w))
            throwAwayWords.remove(w)
            score += risk
            roundscore += risk
            acc = 1
            writeToDataFile(scorefileName,w,risk,score,acc)
            print score
    for w in throwAwayWords:
        risk = int(findBet(w))
        score -= risk
        roundscore -= risk
        acc = 0
        writeToDataFile(scorefileName,w,risk,score, acc)
        print score
    return score,roundscore
Пример #14
0
    def _record(self, sec, filename="", block=True, log=True):
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime()  # for duration estimation, high precision
        self.fileOnset = core.getAbsTime()  # for log and filename, 1 sec precision
        ms = "%.3f" % (core.getTime() - int(core.getTime()))
        if log and self.autoLog:
            logging.data("%s: Record: onset %d, capture %.3fs" % (self.loggingId, self.fileOnset, self.duration))
        if not filename:
            onsettime = "-%d" % self.fileOnset + ms[1:]
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(filename).strip(".wav") + ".wav"

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, **self.options)

        self.rate = sound.pyoSndServer.getSamplingRate()
        if block:
            core.wait(self.duration, 0)
            if log and self.autoLog:
                logging.exp(
                    "%s: Record: stop. %.3f, capture %.3fs (est)"
                    % (self.loggingId, core.getTime(), core.getTime() - t0)
                )
            while self.recorder.running:
                core.wait(0.001, 0)
        else:
            if log and self.autoLog:
                logging.exp("%s: Record: return immediately, no blocking" % (self.loggingId))

        return self.savedFile
Пример #15
0
    def playback(self, block=True, loops=0, stop=False, log=True):
        """Plays the saved .wav file, as just recorded or resampled. Execution
        blocks by default, but can return immediately with `block=False`.

        `loops` : number of extra repetitions; 0 = play once

        `stop` : True = immediately stop ongoing playback (if there is one), and return
        """
        if not self.savedFile or not os.path.isfile(self.savedFile):
            msg = "%s: Playback requested but no saved file" % self.loggingId
            logging.error(msg)
            raise ValueError(msg)

        if stop:
            if hasattr(self, "current_recording") and self.current_recording.status == PLAYING:
                self.current_recording.stop()
            return

        # play this file:
        name = self.name + ".current_recording"
        self.current_recording = sound.Sound(self.savedFile, name=name, loops=loops)
        self.current_recording.play()
        if block:
            core.wait(self.duration * (loops + 1))  # set during record()

        if log and self.autoLog:
            if loops:
                logging.exp(
                    "%s: Playback: play %.3fs x %d (est) %s"
                    % (self.loggingId, self.duration, loops + 1, self.savedFile)
                )
            else:
                logging.exp("%s: Playback: play %.3fs (est) %s" % (self.loggingId, self.duration, self.savedFile))
Пример #16
0
def PFparallel(vp,event,suf=''):
    ''' please run PFinit() first
        suf - output name suffix
    '''
    path,inpath,fp=initPath(vp,event)
    E=np.load(inpath+'DG%s.npy'%suf)[:,:,:,:2]
    print E.shape
    stack=np.load(inpath+'stackPF.npy').tolist()
    f=open(inpath+'PF%s.pars'%suf,'r');dat=pickle.load(f);f.close()
    N=dat['N']
    wind=Q.initDisplay()
    elem=visual.ElementArrayStim(wind,fieldShape='sqr',
            nElements=E.shape[1], sizes=Q.agentSize,
            elementMask=RING,elementTex=None,colors='white')
    while len(stack):
        jobid=stack.pop(0)
        np.save(inpath+'stackPF.npy',stack)
        PFextract(E,[jobid,N],wind=wind, elem=elem,inpath=inpath,suf=suf)
        loaded=False
        while not loaded:
            try:
                stack=np.load(inpath+'stackPF.npy').tolist()
                loaded=True
            except IOError:
                print 'IOError'
                core.wait(1)
    wind.close()
def triggerAndLog( trigCode, id_str, major_inc, minor_inc, payload, trigDuration=10 ):
    ''' Parallel port code, LSL and test logging. '''

    global paraport
    global lsl_outlet_marker
    global USE_LSL
    global startTime

    id_str    = str(id_str)
    major_inc = "{:02d}".format(major_inc)
    minor_inc = "{:02d}".format(minor_inc)
    payload   = string.replace( payload, '\t', '_' )
    outstr    = '\t'.join([str((datetime.utcnow() - startTime).total_seconds()), str(trigCode), id_str, major_inc, minor_inc, payload])
    # outstr  = str( (datetime.utcnow() - startTime).total_seconds() ) + '\t' + str(trigCode) + '\t' + id_str + '\t' + major_inc + '\t' + minor_inc + '\t' + payload

    # Write string to log and also send to LSL outlet
    logThis(outstr)

    if triggers:
        windll.inpout32.Out32(paraport, trigCode)
        core.wait( trigDuration/1000.0, hogCPUperiod = trigDuration/1000.0 ) # <-- add this for parallel triggering
        windll.inpout32.Out32(paraport, portCodes['clear'] ) # <-- add this for parallel triggering

        if USE_LSL:
            lsl_outlet_marker.push_sample([trigCode])
Пример #18
0
def metronome_alone():
  stim_win.flip()
  core.wait(stim_interval)
  metronome(cvc_slow_rate)
  metronome(cvc_faster_rate)
  metronome(cvc_faster_rate)
  metronome(cvc_faster_rate)
Пример #19
0
def wait_get_response(p, clock, oddball, wait_time):
    """Get response info specific to this experiment."""
    check_clock = core.Clock()
    good_resp = False
    corr, response, resp_rt = 0, 0, -1
    while not good_resp:
        keys = event.getKeys(timeStamped=clock)
        for key, stamp in keys:
            if key in p.quit_keys:
                print "Subject quit execution"
                core.quit()
            elif key in p.match_keys:
                corr = 0 if oddball else 1
                response = 1
                resp_rt = stamp
                good_resp = True
                break
            elif key in p.nonmatch_keys:
                corr = 1 if oddball else 0
                response = 2
                resp_rt = stamp
                good_resp = True
                break
            event.clearEvents()
        # Possibly exit with nothing
        if check_clock.getTime() >= wait_time:
            return corr, response, resp_rt
    # Wait the rest of the time
    core.wait(wait_time - resp_rt)
    return corr, response, resp_rt
Пример #20
0
def tapping_exp(win, randid ,hand='r'):
        if hand == 'l': 
                keylist = LH_TAPPING_KEYLIST
        else:
                keylist = RH_TAPPING_KEYLIST
         
        #create some stimuli
        circle = visual.ImageStim(win=win, image=circle_image_path, pos=SA_circle_pos)

        #draw the stimuli and update the window
        stim_times = []
        for i in range(int(ST_repetition_times)):
                #Escribimos el circulo 
                circle.draw()
                #Enviamos la pantalla con el circulo
                win.flip()
                #Tomamos el tiempo en el que fue enviado
                stim_times.append(core.getTime())
                #Lo mostramos por "ST_duration_time" segundos
                core.wait(ST_duration_time)
                #Mandamos pantalla en blanco
                win.flip()
                #Mostramos pantalla en blanco por "ST_interval_time" segundos.
                core.wait(ST_interval_time)

        #Vemos cuando fueron apretadas las teclas        
        user_times = event.getKeys(keyList=keylist, timeStamped = True)
        return stim_times, user_times
Пример #21
0
def arrow_exp(win, randid ,hand='r'):
        if hand == 'l':
                keylist = LH_ARROWS_KEYLIST
        else:
                keylist = RH_ARROWS_KEYLIST

        #Create our stimuli
        arrow = visual.ImageStim(win=win, image=arrow_image_path, pos=ST_arrow_pos)

        stim_times = []
        for i in range(int(SA_repetition_times)):
                #Escribimos el circulo 
                arrow.size*= -1 #(hack ;) ) 
                arrow.draw()
                #Enviamos la pantalla con el circulo
                win.flip()
                #Tomamos el tiempo en el que fue enviado
                stim_times.append(core.getTime())
                #Lo mostramos por "SA_duration_time" segundos
                core.wait(SA_duration_time)
                #Mandamos pantalla en blanco
                win.flip()
                #Mostramos pantalla en blanco por "SA_interval_time" segundos.
                core.wait(SA_interval_time)
                #Vemos cuando fueron apretadas las teclas
                
        user_times = event.getKeys(keyList=keylist, timeStamped = True)
        return stim_times, user_times
Пример #22
0
    def record(self, sec, file='', block=True):
        """Capture sound input for duration <sec>, save to a file.

        Return the path/name to the new file. Uses onset time (epoch) as
        a meaningful identifier for filename and log.
        """
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime() # note: report onset time in log, and use in filename
        logging.data('%s: Record: onset %.3f, capture %.3fs' %
                     (self.loggingId, self.onset, self.duration) )
        if not file:
            onsettime = '-%.3f' % self.onset
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(file).strip('.wav') + '.wav'

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, self.sampletype)
        self.rate = sound.pyoSndServer.getSamplingRate()

        if block:
            core.wait(self.duration - .0008) # .0008 fudge factor for better reporting
                # actual timing is done by Clean_objects
            logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' %
                     (self.loggingId, core.getTime(), core.getTime() - t0) )
        else:
            logging.exp('%s: Record: return immediately, no blocking' %
                     (self.loggingId) )

        return self.savedFile
Пример #23
0
def general_trigger(port, code):
    if exp['use trigger']:
        core.wait(0.05)
        onflip_work(port, code=code)
        stim['window'].flip()
        core.wait(0.05)
        clear_port(port)
Пример #24
0
 def run(self):
     self.running = True
     if self.skip:
         for i in range(int(self.skip)):
             if self.playSound:  # pragma: no cover
                 self.sound1.play()
                 self.sound2.play()
             # emulate T1 stabilization without data collection
             core.wait(self.TR, hogCPUperiod=0)
     self.clock.reset()
     for vol in range(1, self.volumes + 1):
         if self.playSound:  # pragma: no cover
             self.sound1.play()
             self.sound2.play()
         if self.stopflag:
             break
         # "emit" a sync pulse by placing a key in the buffer:
         event._onPygletKey(symbol=self.sync, modifiers=0,
                            emulated=True)
         # wait for start of next volume, doing our own hogCPU for
         # tighter sync:
         core.wait(self.timesleep - self.hogCPU, hogCPUperiod=0)
         while self.clock.getTime() < vol * self.TR:
             pass  # hogs the CPU for tighter sync
     self.running = False
     return self
Пример #25
0
    def _record(self, sec, filename='', block=True):
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime()  # for duration estimation, high precision
        self.fileOnset = core.getAbsTime()  # for log and filename, 1 sec precision
        logging.data('%s: Record: onset %d, capture %.3fs' %
                     (self.loggingId, self.fileOnset, self.duration) )
        if not file:
            onsettime = '-%d' % self.fileOnset
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(filename).strip('.wav') + '.wav'

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, **self.options)

        self.rate = sound.pyoSndServer.getSamplingRate()
        if block:
            core.wait(self.duration, 0)
            logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' %
                     (self.loggingId, core.getTime(), core.getTime() - t0) )
            while self.recorder.running:
                core.wait(.001, 0)
        else:
            logging.exp('%s: Record: return immediately, no blocking' %
                     (self.loggingId) )

        return self.savedFile
Пример #26
0
 def run(self):
     self.repmom= 0
     self.chradius=0.65/2.0
     if self.block==0:
         NT= 3; temp=[]
         for n in range(NT): temp.append(np.ones(self.nrtrials/NT)*n)
         self.trialType=np.concatenate(temp)
         self.trialType=self.trialType[np.random.permutation(self.nrtrials)]
     elif self.block==1:
         self.trialType=np.zeros(self.nrtrials)
     else:
          self.trialType=np.ones(self.nrtrials)
     self.chaser=HeatSeekingChaser(Q.nrframes,[18,18],(0,0),Q.pDirChange[1],Q.aSpeed,Q.phiRange[1],True)
     self.chaser.vis=visual.Circle(self.wind,radius=self.chradius,fillColor='red',lineColor='red',interpolate=False)
     self.pnt1=visual.Circle(self.wind,radius=self.chradius,fillColor='green',lineColor='green',interpolate=False)
     #stuff=[visual.Line(self.wind,start=[-0.2,0],end=[0.2,0],units='deg'),visual.Line(self.wind,start=[0,-0.2],end=[0,0.2],units='deg')] 
     self.pnt2=visual.ShapeStim(self.wind, 
         vertices=[(-0.5,0),(-0,0),(0,0.5),(0,0),(0.5,0),(0,-0),(0,-0.5),(-0,0), (-0.5,0)],
         closeShape=False,lineColor='white',interpolate=False)
     self.mouse=visual.CustomMouse(self.wind, leftLimit=-9,rightLimit=9,showLimitBox=True,
         topLimit=9,bottomLimit=-9,pointer=self.pnt2)
     Experiment.run(self,prefix='')
     
     self.text1.setText(u'Der Block ist nun zu Ende.')
     self.text1.draw()
     self.wind.flip()
     core.wait(10)
     self.output.close()
     self.wind.close()
    def showForXSec(self, text, sec):
        """
        Show text for a given amount of seconds.
        """

        self._display(text)
        core.wait(sec)
Пример #28
0
    def test_write(self):
        win = visual.Window([600,400], units='height')
        svg = exp.SVG(win, filename='stims')

        circle = visual.Circle(win, pos=(-.5,0), fillColor='yellow', lineColor=None)
        circle.draw()
        svg.write(circle)

        line = visual.Line(win, pos=(0,0), lineColor='black', lineWidth=5)
        line.draw()
        svg.write(line)

        rect = visual.Rect(win, height=.8, pos=(.5,0))
        rect.draw()
        svg.write(rect)

        shape = visual.ShapeStim(win, fillColor='blue', opacity=.5)
        shape.draw()
        svg.write(shape)

        text = visual.TextStim(win, pos=(.5,0.25))
        text.draw()
        svg.write(text)
        
        thick = exp.ThickShapeStim(win, vertices=[(-.5,.5),(.5,-.5)], lineWidth=.01)
        thick.draw()
        svg.write(thick)

        win.flip()
        #win.getMovieFrame()
        #win.saveMovieFrames('stims.png')
        #svg.save()

        core.wait(5)
def fixation_screen(myClock, waittime=1):
    fixation.draw()
    win.logOnFlip(level=logging.EXP, msg='fixation cross on screen') #new log haoting
    win.flip()
    fixStart = myClock.getTime() #fixation cross onset
    core.wait(waittime)
    return fixStart
Пример #30
0
    def test_Speech2Text(self):
        pytest.skip()  # google speech API gives Error 400: Bad request

        try:
            web.requireInternetAccess()
        except web.NoInternetAccessError:
            pytest.skip()

        # load a known sound file
        testFile = join(self.tmp, 'red_16000.wav')

        gs = Speech2Text(filename=testFile)
        resp = gs.getResponse()
        assert resp.word == 'red'

        # test batch-discover files in a directory
        tmp = join(self.tmp, 'tmp')
        os.mkdir(tmp)
        shutil.copy(testFile, tmp)
        bs = BatchSpeech2Text(files=tmp)

        bs = BatchSpeech2Text(files=glob.glob(join(self.tmp, 'red_*.wav')))
        while bs._activeCount():
            core.wait(.1, 0)
        resp = bs[0][1]
        assert 0.6 < resp.confidence < 0.75  # 0.68801856
        assert resp.word == 'red'
Пример #31
0
def onFlip(stimName, logName):
    "send trigger on flip, set keyboard clock, and save timepoint"
    trigger.send(stimName)
    kb.clock.reset(
    )  # this starts the keyboard clock as soon as stimulus appears
    datalog[logName] = mainClock.getTime()


##############
# Introduction
##############

# Display overview of session
screen.show_overview()
core.wait(CONF["timing"]["overview"])

# Optionally, display instructions
if CONF["showInstructions"]:
    screen.show_instructions()
    key = event.waitKeys()
    quitExperimentIf(key[0] == 'q')

eyetracker.start_recording(
    os.path.join(CONF["participant"], CONF["task"]["name"], CONF["session"]))

#################
# Main experiment
#################

mwtTimer = core.CountdownTimer(CONF["task"]["duration"])
Пример #32
0
    trials.addData("Switch_pass", trials_after_switch)

    if positions[trial] == 0:
        gratings[0].pos = (
            dist_to_center, 0
        )  #position 1: vertical grating right, horizontal left
        gratings[1].pos = (-dist_to_center, 0)
    else:
        gratings[0].pos = (
            -dist_to_center, 0
        )  #position 2: vertical grating left, horizontal right
        gratings[1].pos = (dist_to_center, 0)

    fixation.draw()
    window.flip()
    core.wait(2)

    event.clearEvents(eventType="keyboard")
    my_clock.reset()
    while my_clock.getTime() < .2:
        fixation.draw()
        gratings[0].draw()
        gratings[1].draw()
        window.flip()

    fixation.draw()
    window.flip()

    response = event.waitKeys(keyList=["f", "j", "escape"])

    if response[0] == "escape":
Пример #33
0
win.flip()

n = 1
for char in char_intro_order:
    temp_instr = visual.TextStim(win,
                                 instr[n] + char,
                                 color='black',
                                 pos=(0, 0.3))
    char_stim = Image.open(
        char_dir + [i for i in os.listdir(char_dir) if i.startswith(char)][0])
    char_stim.thumbnail(item_size, Image.ANTIALIAS)
    char_stim = visual.ImageStim(win, char_stim, pos=[0, -0.2])
    temp_instr.draw()
    char_stim.draw()
    win.update()
    core.wait(time_intro)
    win.flip()
    n = n + 1

# Example of encoding presentation
"""temp_instr = visual.TextStim(win, instr[12], color='black', pos=[0,0])
temp_instr.draw()
win.update()
event.waitKeys(keyList=['space'])
win.flip()"""
"""
temp_instr = visual.TextStim(win, instr[13], color='black', pos=[0,0.7])
temp_instr.draw()
scene_stim = scene_dir+'example.png'
item_stim = Image.open(item_dir+'books.png')
item_stim.thumbnail(item_size, Image.ANTIALIAS)
Пример #34
0
    def runTestTrial(self,
                     trialType,
                     trialNumber,
                     curTrial,
                     updateScore,
                     updateEveryXTrials,
                     totalScore,
                     runningScore,
                     feedback,
                     feedback2="none"):

        setAndPresentStimulus(self.win, [
            self.background, self.score, self.treasurePic, self.clockFrame,
            self.clockCenter, self.startPrompt
        ])

        event.waitKeys(keyList=['space'])

        curTrial['header'] = self.header

        #reset clock hand
        self.clockHand.ori = int(curTrial['angle'])

        #base stimulus array
        baseStims = [
            self.background, self.score, self.treasurePic, self.clockFrame,
            self.clockCenter, self.clockHand
        ]

        promptStims = baseStims + [self.textPrompt, self.textPrompt2]

        #draw clock
        setAndPresentStimulus(self.win, promptStims)

        if trialType == "test":
            #responded=False
            timer = core.Clock()
            label = self.enterText(promptStims, 1, self.timeoutTime)
            RT = timer.getTime() * 1000
        elif trialType == "name":
            timer = core.Clock()
            label = self.enterText(promptStims, 1, self.learnTimeoutTime)
            RT = timer.getTime() * 1000

    #while not responded:
    #    if self.myMouse.getPressed()[0]==1 and (numWheelTurnsUp>0 or numWheelTurnsDown>0):
    #			RT = timer.getTime()*1000
    #			responded=True
        if trialType == "recordName":
            RT = self.recordIt(self.runTimeVars['subjCode'] + "_" +
                               str(trialNumber) + "_" + curTrial['label'],
                               baseStims,
                               chunk=1024)
            label = "NA"
            core.wait(self.audioEndTime)

        isRight = 'NA'
        if trialType == "name":
            isRight = 0
            if label == curTrial['target']:
                isRight = 1

        curScore = 0
        if trialType == "name":
            if label == curTrial['target']:
                curScore = self.namingScore
        if trialType == "test":
            if label in self.labelList:
                curScore = int(curTrial[label])
        curScore = int(
            round(
                curScore * (1 + (self.timeoutTime * 1000 - RT) /
                            (self.timeoutTime * 1000)), 0))
        totalScore += curScore
        runningScore += curScore

        #write runtime and indep variables
        responses = [curTrial[_] for _ in curTrial['header']]

        resp = 'NA'

        #write dep variables
        responses.extend(
            [trialType, trialNumber, resp, label, isRight, RT, curScore])
        writeToFile(self.outputFile,
                    responses,
                    separator='\t',
                    sync=True,
                    writeNewLine=True)

        #feedback
        if feedback == "yes":
            #update feedback text
            self.feedbackText.text = "+" + str(curScore)
            self.feedbackLabel.text = "You entered: " + label
            if not (label in self.labelList):
                setAndPresentStimulus(
                    self.win, baseStims + [
                        self.feedbackText, self.feedbackLabel,
                        self.incorrLabelFeedback, self.treasurePicFeedback,
                        self.elfFeedback
                    ])
            else:
                setAndPresentStimulus(
                    self.win, baseStims + [
                        self.feedbackText, self.feedbackLabel,
                        self.treasurePicFeedback, self.elfFeedback
                    ])
            core.wait(self.feedbackTime)

        elif feedback == "partial":
            self.feedbackLabel.text = "You entered: " + label
            if not (label in self.labelList):
                setAndPresentStimulus(
                    self.win,
                    baseStims + [self.feedbackLabel, self.incorrLabelFeedback])
            else:
                setAndPresentStimulus(self.win,
                                      baseStims + [self.feedbackLabel])
            core.wait(self.feedbackTime)

        if feedback2 == "cumulative":
            #update feedback text
            self.feedbackText.text = "+" + str(runningScore)
            self.feedback2Text.text = "You helped the elves collect " + str(
                runningScore) + " treasure coins on your last " + str(
                    self.numFeedbackBlocks) + " hunts combined."
            setAndPresentStimulus(
                self.win, baseStims + [
                    self.feedbackText, self.feedback2Text,
                    self.treasurePicFeedback, self.elfFeedback
                ])
            core.wait(self.feedback2Time)

        endTrialStims = [
            self.background, self.clockFrame, self.clockCenter, self.score,
            self.treasurePic
        ]
        self.textBoxEntry.text = ""

        if int(updateScore) == 1 and trialNumber % updateEveryXTrials == 0:
            self.score.text = str(totalScore)

        setAndPresentStimulus(self.win, endTrialStims)
        core.wait(self.ITI)

        return [curScore, totalScore, runningScore]
Пример #35
0
    def runLearningTrial(self,
                         trialType,
                         trialNumber,
                         curTrial,
                         totalScore,
                         labelFeedback=0,
                         feedback=True):

        curTrial['header'] = self.header

        #reset clock hand
        self.clockHand.ori = int(curTrial['angle'])

        #set label choices
        if trialType == "name" and labelFeedback == 0:
            self.learnPrompt.text = 'This direction is called ' + curTrial[
                'target']
        elif trialType == "name" and labelFeedback == 1:
            self.learnPrompt.text = 'What is this direction called?'
        elif trialType == "pairLearn":
            self.learnPrompt.text = curTrial['left'] + " or " + curTrial[
                'right'] + '?'
        elif trialType == "finalName":
            self.learnPrompt.text = 'What is this direction called?'

        #base stimulus array
        baseStims = [
            self.background, self.clockFrame, self.clockCenter, self.clockHand,
            self.learnPrompt, self.learnResponseInfo
        ]

        #enter text response
        timer = core.Clock()
        label = self.enterText(baseStims, 0)
        RT = timer.getTime() * 1000

        #check if correct
        isRight = int(curTrial['target'] == label)

        #write runtime and indep variables to
        responses = [curTrial[_] for _ in curTrial['header']]

        resp = "NA"

        #write dep variables
        responses.extend(
            [trialType, trialNumber, resp, label, isRight, RT, totalScore])
        writeToFile(self.outputFile,
                    responses,
                    separator='\t',
                    sync=True,
                    writeNewLine=True)

        if feedback:
            #feedback
            if isRight == 1:
                feedbackColor = "green"
                totalScore += 1
                feedbackSound = "bleep"
            else:
                feedbackColor = "red"
                feedbackSound = "buzz"

            self.textBoxInner.color = feedbackColor
            self.textBoxEntry.text = label

            #draw clock with feedback on response
            stimArray = [
                self.background, self.clockFrame, self.clockCenter,
                self.clockHand, self.learnPrompt, self.learnResponseInfo,
                self.textBoxOuter, self.textBoxInner, self.textBoxEntry
            ]
            if labelFeedback == 1 and isRight == 0:
                self.labelFeedbackText.text = "The correct label is " + curTrial[
                    'target']
                stimArray = stimArray + [self.labelFeedbackText]
            setAndPresentStimulus(self.win, stimArray)

            #play feedback sound
            playAndWait(self.sounds[feedbackSound])
            core.wait(self.learnFeedbackTime)

        #flip screen
        setAndPresentStimulus(
            self.win, [self.background, self.clockFrame, self.clockCenter])
        core.wait(self.ITI)
        self.textBoxInner.color = "white"
        self.textBoxEntry.text = ""

        return totalScore
Пример #36
0
    def runLearningTrial2AFC(self, trialType, trialNumber, curTrial,
                             totalScore):

        curTrial['header'] = self.header

        #reset clock hand
        self.clockHand.ori = int(curTrial['angle'])

        #set label choices
        self.learningTextLeft.text = curTrial['left']
        self.learningTextRight.text = curTrial['right']

        #draw clock & options
        setAndPresentStimulus(self.win, [
            self.background, self.clockFrame, self.clockCenter, self.clockHand,
            self.learningBoxOuterLeft, self.learningBoxInnerLeft,
            self.learningBoxOuterRight, self.learningBoxInnerRight,
            self.learningTextLeft, self.learningTextRight, self.learningPrompt,
            self.learningResponseInfo
        ])

        #get keyboard response
        [resp, RT] = getKeyboardResponse(self.learningResponses, duration=0)
        RT = RT * 1000
        label = curTrial[resp]

        #check if correct
        isRight = int(curTrial['target'] == label)

        #write runtime and indep variables to
        responses = [curTrial[_] for _ in curTrial['header']]

        #write dep variables
        responses.extend(
            [trialType, trialNumber, resp, label, isRight, RT, totalScore])
        writeToFile(self.outputFile,
                    responses,
                    separator='\t',
                    sync=True,
                    writeNewLine=True)

        #feedback
        if isRight == 1:
            feedbackColor = "green"
            totalScore += 1
        else:
            feedbackColor = "red"

        if resp == "left":
            self.learningBoxInnerLeft.color = feedbackColor
        else:
            self.learningBoxInnerRight.color = feedbackColor

        #draw clock with feedback on response
        setAndPresentStimulus(self.win, [
            self.background, self.clockFrame, self.clockCenter, self.clockHand,
            self.learningBoxOuterLeft, self.learningBoxInnerLeft,
            self.learningBoxOuterRight, self.learningBoxInnerRight,
            self.learningTextLeft, self.learningTextRight, self.learningPrompt,
            self.learningResponseInfo
        ])
        core.wait(self.learnFeedbackTime)

        #flip screen
        setAndPresentStimulus(
            self.win, [self.background, self.clockFrame, self.clockCenter])
        core.wait(self.ITI)
        self.learningBoxInnerLeft.color = "white"
        self.learningBoxInnerRight.color = "white"

        return totalScore
Пример #37
0
def init(rate=44100, stereo=True, buffer=128):
    """setup the pyo (sound) server
    """
    global pyoSndServer, Sound, audioDriver, duplex, maxChnls
    Sound = SoundPyo
    global pyo
    try:
        assert pyo
    except NameError:  # pragma: no cover
        import pyo
        # can be needed for microphone.switchOn(), which calls init even
        # if audioLib is something else

    # subclass the pyo.Server so that we can insert a __del__ function that
    # shuts it down skip coverage since the class is never used if we have
    # a recent version of pyo

    class _Server(pyo.Server):  # pragma: no cover
        # make libs class variables so they don't get deleted first
        core = core
        logging = logging

        def __del__(self):
            self.stop()
            # make sure enough time passes for the server to shutdown
            self.core.wait(0.5)
            self.shutdown()
            # make sure enough time passes for the server to shutdown
            self.core.wait(0.5)
            # this may never get printed
            self.logging.debug('pyo sound server shutdown')

    if '.'.join(map(str, pyo.getVersion())) < '0.6.4':
        Server = _Server
    else:
        Server = pyo.Server

    # if we already have a server, just re-initialize it
    if 'pyoSndServer' in globals() and hasattr(pyoSndServer, 'shutdown'):
        pyoSndServer.stop()
        # make sure enough time passes for the server to shutdown
        core.wait(0.5)
        pyoSndServer.shutdown()
        core.wait(0.5)
        pyoSndServer.reinit(sr=rate,
                            nchnls=maxChnls,
                            buffersize=buffer,
                            audio=audioDriver)
        pyoSndServer.boot()
    else:
        if sys.platform == 'win32':
            # check for output device/driver
            #todo: Throwing errors on one users' config https://discourse.psychopy.org/t/error-with-microphone-component-on-psychopy-2020/13168
            devNames, devIDs = get_output_devices()
            audioDriver, outputID = _bestDriver(devNames, devIDs)
            if outputID is None:
                # using the default output because we didn't find the one(s)
                # requested
                audioDriver = 'Windows Default Output'
                outputID = pyo.pa_get_default_output()
            if outputID is not None:
                logging.info(u'Using sound driver: %s (ID=%i)' %
                             (audioDriver, outputID))
                maxOutputChnls = pyo.pa_get_output_max_channels(outputID)
            else:
                logging.warning(
                    'No audio outputs found (no speakers connected?')
                return -1
            # check for valid input (mic)
            # If no input device is available, devNames and devIDs are empty
            # lists.
            devNames, devIDs = get_input_devices()
            audioInputName, inputID = _bestDriver(devNames, devIDs)
            # Input devices were found, but requested devices were not found
            if len(devIDs) > 0 and inputID is None:
                defaultID = pyo.pa_get_default_input()
                if defaultID is not None and defaultID != -1:
                    # default input is found
                    # use the default input because we didn't find the one(s)
                    # requested
                    audioInputName = 'Windows Default Input'
                    inputID = defaultID
                else:
                    # default input is not available
                    inputID = None
            if inputID is not None:
                msg = u'Using sound-input driver: %s (ID=%i)'
                logging.info(msg % (audioInputName, inputID))
                maxInputChnls = pyo.pa_get_input_max_channels(inputID)
                duplex = bool(maxInputChnls > 0)
            else:
                maxInputChnls = 0
                duplex = False
        # for other platforms set duplex to True (if microphone is available)
        else:
            audioDriver = prefs.hardware['audioDriver'][0]
            maxInputChnls = pyo.pa_get_input_max_channels(
                pyo.pa_get_default_input())
            maxOutputChnls = pyo.pa_get_output_max_channels(
                pyo.pa_get_default_output())
            duplex = bool(maxInputChnls > 0)

        maxChnls = min(maxInputChnls, maxOutputChnls)
        if maxInputChnls < 1:  # pragma: no cover
            msg = (u'%s.init could not find microphone hardware; '
                   u'recording not available')
            logging.warning(msg % __name__)
            maxChnls = maxOutputChnls
        if maxOutputChnls < 1:  # pragma: no cover
            msg = (u'%s.init could not find speaker hardware; '
                   u'sound not available')
            logging.error(msg % __name__)
            return -1

        # create the instance of the server:
        if sys.platform == 'darwin' or sys.platform.startswith('linux'):
            # for mac/linux we set the backend using the server audio param
            pyoSndServer = Server(sr=rate,
                                  nchnls=maxChnls,
                                  buffersize=buffer,
                                  audio=audioDriver)
        else:
            # with others we just use portaudio and then set the OutputDevice
            # below
            pyoSndServer = Server(sr=rate, nchnls=maxChnls, buffersize=buffer)

        pyoSndServer.setVerbosity(1)
        if sys.platform == 'win32':
            pyoSndServer.setOutputDevice(outputID)
            if inputID is not None:
                pyoSndServer.setInputDevice(inputID)
        # do other config here as needed (setDuplex? setOutputDevice?)
        pyoSndServer.setDuplex(duplex)
        pyoSndServer.boot()
    core.wait(0.5)  # wait for server to boot before starting the sound stream
    pyoSndServer.start()

    # atexit is filo, will call stop then shutdown upon closing
    atexit.register(pyoSndServer.shutdown)
    atexit.register(pyoSndServer.stop)
    try:
        Sound()  # test creation, no play
    except pyo.PyoServerStateException:
        msg = "Failed to start pyo sound Server"
        if sys.platform == 'darwin' and audioDriver != 'portaudio':
            msg += "; maybe try prefs.general.audioDriver 'portaudio'?"
        logging.error(msg)
        core.quit()
    logging.debug('pyo sound server started')
    logging.flush()
Пример #38
0
    # that is actually stimulated (this is typically a square at the centre of
    # the screen, flanked by unstimulated areas on the left and right side).
    aryFrames = np.zeros((pixCover, pixCover, NrOfVols), dtype=np.int16)

    # Make sure that pixCover is of interger type:
    pixCover = int(np.around(pixCover))

    # Counter for screenshots:
    idxFrame = 0

# %%
"""RENDER_LOOP"""
# Create Counters
i = 0
# give the system time to settle
core.wait(1)

if not (lgcLogMde):
    # wait for scanner trigger
    triggerText.draw()
    myWin.flip()
    event.waitKeys(keyList=['5'], timeStamped=False)

# reset clocks
clock.reset()
logging.data('StartOfRun' + unicode(expInfo['run']))

while clock.getTime() < totalTime:  # noqa

    # get key for motion direction
    keyPos = Conditions[i, 0]
Пример #39
0
timeWithLabjack = True
maxReps = 100

# setup labjack U3
ports = u3.U3()
ports.__del__ = ports.close  # try to autoclose the ports if script crashes

# get zero value of FIO6
startVal = ports.getFIOState(6)  # is FIO6 high or low?
print('FIO6 is at', startVal, end='')
print('AIN0 is at', ports.getAIN(0))
if timeWithLabjack:
    print('OS\tOSver\taudioAPI\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax')

snd = sound.Sound(1000, secs=0.1)
core.wait(2)  # give the system time to settle?
delays = []
nReps = 0
while True:  # run the repeats for this sound server
    if event.getKeys('q'):
        core.quit()
    nReps += 1
    # do this repeatedly for timing tests
    ports.setFIOState(4, 0)  # start FIO4 low

    # draw black square
    stim.draw()
    win.flip()

    if not timeWithLabjack:
        # wait for a key press
Пример #40
0
        stimuli.setImage(exp_info['training stimuli directory'] + '\stim15_' +
                         dino + '.jpg')  # sets the stimuli
        stimuli.draw()
        win.flip()
        press = event.waitKeys(keyList=['a', 'l'])
        RT = trial_clock.getTime()
        RTs.append(str(RT))  # logs RT
        feedback_text = visual.TextStim(win, height=20, color='white')
        if ((press[0] == 'a') & (dino in ffpA)) | (
            (press[0] == 'l') &
            (dino in ffpB)):  # checks if the answer was correct
            feedback_text.setText(
                feedback[0])  # sets the feedback massage to correct
            feedback_text.draw()
            win.flip()
            core.wait(0.5)
            answers.append('1')  # logs that it was correct
        else:
            feedback_text.setText(
                feedback[1])  # sets feedback massage to incorrect
            feedback_text.draw()
            win.flip()
            core.wait(0.5)
            answers.append('0')  # logs that it was incorrect
order = [i for i in range(1, (3 * len(ffp)) + 1)
         ]  # makes a list of the numbers from 1 to the length of the training
log = zip(
    order, training_presented, answers, RTs
)  # puts together the order number, the code, the hit and the RT for each training stimuli

#test
Пример #41
0
def show_instructions():
    title.setAutoDraw(True)
    instrText.setText(instructions1)
    instrText.setAutoDraw(True)
    win.flip()
    #core.wait(20)
    instrKey.setText("PRESS 2 [pointer finger] to continue")
    instrKey.draw()
    win.flip()
    event.waitKeys(keyList=['2'])
    instrText.setText(instructions2)
    win.flip()
    #core.wait(20)
    instrKey.setText("PRESS 3 [middle finger] to begin...")
    instrKey.draw()
    win.flip()
    event.waitKeys(keyList=['3'])
    instrText.setAutoDraw(False)

    p1_ticker = "."
    p3_ticker = "."
    p1_ticker_end = 120
    p3_ticker_end = 425

    title.setText('Joining VBT Game Room')
    instr_p1.setText("PLAYER 1: Wating for player to join")
    instr_p2.setText("PLAYER 2: Welcome %s" % player_name)
    instr_p3.setText("PLAYER 3: Wating for player to join")
    instr_p1.setAutoDraw(True)
    instr_p2.setAutoDraw(True)
    instr_p3.setAutoDraw(True)
    p1_tick.setAutoDraw(True)
    p3_tick.setAutoDraw(True)
    win.flip()
    for tick in range(500):
        if tick == p1_ticker_end:
            instr_p1.setText("PLAYER 1: Welcome %s" % player1_name)
            p1_tick.setAutoDraw(False)
        elif tick == p3_ticker_end:
            instr_p3.setText("PLAYER 3: Welcome %s" % player3_name)
            p3_tick.setAutoDraw(False)
        else:
            if tick % 10 == 0:
                p1_ticker = p1_ticker + "."
                if len(p1_ticker) > 6:
                    p1_ticker = ""
            if tick % 12 == 0:
                p3_ticker = p3_ticker + "."
                if len(p3_ticker) > 6:
                    p3_ticker = ""
            if tick < p1_ticker_end:
                p1_tick.setText(p1_ticker)
            if tick < p3_ticker_end:
                p3_tick.setText(p3_ticker)
        win.flip()
    core.wait(2)

    title.setAutoDraw(False)
    instr_p1.setAutoDraw(False)
    instr_p2.setAutoDraw(False)
    instr_p3.setAutoDraw(False)
Пример #42
0
def run_block(N, features, phase, task):
    """ Run the experiment for a trial list """
    global cumulative_trials
    trials = make_trials(N, features, phase, task)

    # Show instruction
    N_text.text = instructions['N'] % (N, 'sekvenser' if N > 1 else 'sekvens')
    draw_trial(trials[0], targets=False)

    # Show task instructions.
    if phase != 'training':
        show_instruction(instructions[task])
    else:
        # During training, the instructions have already been shown.
        show_instruction(instructions['training_start'])

    # Show trials
    for trial in trials:
        # Prepare stimuli
        draw_trial(trial)

        figure = figure_stims[trial['figure']]
        figure.fillColor = trial['color']
        figure.pos = grid_coords[trial['position']]
        figure.draw()

        # Show it
        win.flip()
        if trial['sound_tested']:
            sounds[trial['sound']].play()
        clock.reset()
        event.clearEvents()

        # Control stimulus offset and responses
        flipped = False
        while clock.getTime() < RESPONSE_WINDOW[trial['factor']]:
            # Stimulus offset
            if clock.getTime() > VISUAL_DURATION and not flipped:
                draw_trial(trial)
                win.flip()
                flipped = True

            # Collect response and score it
            if DIALOGUE['simulate'] == 'no':
                responses = event.getKeys(keyList=RESPONSE_KEYS.keys())
            else:
                responses = [random.choice(list(RESPONSE_KEYS.keys()))]
            if responses:
                for response in responses:
                    response_feature = RESPONSE_KEYS[response]
                    if response_feature in features:
                        trial[response_feature + '_response'] = 1
                        trial[response_feature + '_score'] = int(
                            trial[response_feature + '_target'] == 1)

                draw_trial(trial)
                win.flip()

            # Save on computer ressources. RT is not important
            core.wait(0.1, hogCPUperiod=0)

        # End of trial. Check for CR and misses
        for feature in features:
            if trial[feature + '_response'] == '':
                trial[feature + '_response'] = 0
                trial[feature + '_score'] = int(
                    trial[feature + '_target'] == trial[feature + '_response'])

        # Save this trial immediately
        writer.write(trial)
        writer.flush()

        # ... and give feedback
        draw_trial(trial)
        win.flip()
        core.wait(FEEDBACK_DURATION)

        # Quit and count
        cumulative_trials += 1
        if event.getKeys(keyList=QUIT_KEYS):
            nice_quit()

    # Performance summary after each training block
    if phase == 'training':
        show_training_feedback(trials, features)
Пример #43
0
                         lineWidth=900,
                         closeShape=False,
                         lineColor='red')

# Wait for trigger
out = event.waitKeys(maxWait=inf, keyList=['5'], timeStamped=True)

# Reset Clock
globalClock = core.Clock()
logging.setDefaultClock(globalClock)

#First RED Cross
currWindow.logOnFlip('', level=logging.EXP + 1)
cross.draw()
currWindow.flip()
core.wait(infos['durRest'])

# Read Keys while RED Cross
if 'escape' in event.getKeys(keyList=['1', '2', '3', '4', 'escape'],
                             timeStamped=globalClock):
    logging.flush()
    core.quit()

mean = []
correctSeq = []

for nblock in range(0, infos['nbBlocks']):

    #StartPerformance
    currWindow.logOnFlip(str(nblock), level=logging.EXP + 2)
    cross.setLineColor('green')
Пример #44
0
        "psychopy_monitor_name":
        psychopy_mon_name,
        "mcu.iosync.MCU":
        dict(serial_port='auto', monitor_event_types=[
            'DigitalInputEvent',
        ]),
        "experiment_code":
        exp_code,
        "session_code":
        sess_code
    }
    io = launchHubServer(**iohub_config)
    mcu = io.devices.mcu
    kb = io.devices.keyboard

    core.wait(0.5)
    mcu.enableEventReporting(True)
    io.clearEvents("all")
    while not kb.getEvents():
        mcu_events = mcu.getEvents()
        for mcu_evt in mcu_events:
            print '{0}\t{1}'.format(mcu_evt.time, mcu_evt.state)
        core.wait(0.002, 0)
    io.clearEvents('all')
except Exception:
    import traceback
    traceback.print_exc()
finally:
    if mcu:
        mcu.enableEventReporting(False)
    if io:
Пример #45
0
event.waitKeys(keyList=['space'])
event.clearEvents()

clk = core.Clock()
for i in range(len(trial_set)):
    for j in range(33):
        # 开始记录眼动数据
        getEYELINK().startRecording(1, 1, 1, 1)
        # 如果记录的是双眼的数据则改为记录左眼;
        eye_used = getEYELINK().eyeAvailable()
        if eye_used == RIGHT_EYE:
            getEYELINK().sendMessage("EYE_USED 1 RIGHT")
        elif eye_used == LEFT_EYE or eye_used == BINOCULAR:
            getEYELINK().sendMessage("EYE_USED 0 LEFT")
            eye_used = LEFT_EYE
        core.wait(0.1)
        startTime = currentTime()
        getEYELINK().sendMessage("SYNCTIME %d" % (currentTime() - startTime))

        k = 33*i + j
        # 数据
        p_v = trial_set[i][j]['p']
        x1, x2 = trial_set[i][j]['v']
        message = "record_status_message 'Trial n%s, p:%s, x:%s, y:%s'" % (k, p_v, x1, x2)
        getEYELINK().sendCommand(message)
        msg = "TRIALID p:%s. x:%s. y:%s, n:%s" % (p_v, x1, x2, k)
        getEYELINK().sendMessage(msg)
        sure_reward = trial_set[i][j]['sure_reward']
        np.random.shuffle(sure_reward)
        # 注视点
        fix.draw()
Пример #46
0
try:
    ser = serial.Serial('/dev/tty.KeySerial1', 9600, timeout=1)
    ser.write('0')
    time.sleep(0.1)
    ser.write('255')
    ser.close()
except:
    print("SCANNER NOT TRIGGERED")
    pass
# end of trigger code

logging.log(level=logging.DATA, msg="START")

# 8 sec disdaq
fixation.setText("+")
fixation.draw()
win.flip()
core.wait(8)

round = 1
play_round()
holder = 1
round = 2
play_round()

goodbye.setText("Game Over!\nThanks for playing %s." % player_name)
goodbye.draw()
win.flip()
core.wait(7.5)
logging.log(level=logging.DATA, msg="END")
Пример #47
0
    def cycleThroughExperimentTrials(self,
                                     whichPart):  #CHECK OUT PRACTICE STUFF
        curTrialIndex = 0

        if whichPart == 'practice':

            for curTrial in self.practTrialList:
                if curTrialIndex == 0:
                    waitingAnimation(currentExp.win, color="PowderBlue")
                if curTrialIndex > 0 and curTrialIndex % self.experiment.takeBreakEveryXTrials == 0:
                    showText(
                        self.experiment.win,
                        self.experiment.takeBreak,
                        color=(-1, -1, -1),
                        inputDevice=self.experiment.inputDevice)  #take a break
                    waitingAnimation(currentExp.win, color="PowderBlue")
                setAndPresentStimulus(self.experiment.win, [self.fixSpot])
                core.wait(1)
                setAndPresentStimulus(self.experiment.win, [self.fixSpotReady])
                core.wait(.5)
                setAndPresentStimulus(self.experiment.win, [self.fixSpotPlay])
                self.showTestTrial(curTrial, curTrialIndex, whichPart)
                curTrialIndex += 1
                self.experiment.win.flip()
                core.wait(.2)
            #self.experiment.eventTracker.close()
            #self.experiment.testFile.close()

        elif whichPart == 'experiment':
            for curTrial in self.trialList:
                if curTrialIndex == 0:
                    waitingAnimation(currentExp.win, color="PowderBlue")
                if curTrialIndex > 0 and curTrialIndex % self.experiment.takeBreakEveryXTrials == 0:
                    showText(
                        self.experiment.win,
                        self.experiment.takeBreak,
                        color=(-1, -1, -1),
                        inputDevice=self.experiment.inputDevice)  #take a break
                    waitingAnimation(currentExp.win, color="PowderBlue")
                setAndPresentStimulus(self.experiment.win, [self.fixSpot])
                core.wait(1)
                setAndPresentStimulus(self.experiment.win, [self.fixSpotReady])
                core.wait(.5)
                setAndPresentStimulus(self.experiment.win, [self.fixSpotPlay])
                self.showTestTrial(curTrial, curTrialIndex, whichPart)
                curTrialIndex += 1
                self.experiment.win.flip()
                core.wait(.2)
            self.experiment.eventTracker.close()
            self.experiment.testFile.close()
Пример #48
0
        remaining_bubbles = list(all_bubbles)

        # start trial
        el.trial(trial_num)

        #load and show stimulus
        img_num = subject_number
        stim = visual.SimpleImageStim(
            surf,
            image=path_to_fixdur_files + 'stimuli/multi_bubble_images/' +
            bubble_image + '/' + bubble_image + '_' + str(img_num) + '.png')
        stim.draw(surf)
        p.setData(int(5))
        surf.flip()
        p.setData(int(7))
        core.wait(0.07)  ####
        p.setData(int(10))
        #metainfos for tracker
        el.trialmetadata('BUBBLE_IMAGE', bubble_image)
        el.trialmetadata("forced_fix_onset", -1)
        el.trialmetadata("stimulus_onset", -1)
        el.trialmetadata("saccade_offset", -1)
        el.trialmetadata("DISPLAYED_BUBBLES", -1)
        el.trialmetadata("CHOSEN_BUBBLE", -1)
        el.trialmetadata('BUBBLE_DISPLAY_TIME', 6000)

        #fill meta_data
        subtrial_list = [{
            'trial': trial_num,
            'img': bubble_image,
            'img_num': img_num
Пример #49
0
    def run_trial(self, rot, cond, count):
        # for negative start values in staircases, because *.par files only give abs values
        if cond['label'].endswith('m'):
            rot = -rot

        # set two reference
        left = cond['leftRef']
        right = cond['rightRef']

        leftRef = self.patch_ref(left)
        leftRef.pos = self.cfg['leftRef.pos']
        rightRef = self.patch_ref(right)
        rightRef.pos = self.cfg['rightRef.pos']

        # set colors of two stimuli
        standard = cond['standard']  # standard should be fixed
        test = standard + rot

        sPatch = self.patch_stim()
        tPatch = self.patch_stim()
        sPatch.colors, tPatch.colors = self.choose_con(standard, test)

        # randomly assign patch positions: upper (+) or lower (-)
        patchpos = [self.cfg['standard.ylim'], self.cfg['test.ylim']]
        rndpos = patchpos.copy()
        np.random.shuffle(rndpos)

        sPatch.xys = self.patch_pos(self.cfg['standard.xlim'], rndpos[0])
        tPatch.xys = self.patch_pos(self.cfg['test.xlim'], rndpos[1])

        # fixation cross
        fix = visual.TextStim(self.win,
                              text="+",
                              units='deg',
                              pos=[0, 0],
                              height=0.4,
                              color='black',
                              colorSpace=self.ColorSpace)
        # number of trial
        num = visual.TextStim(self.win,
                              text="trial " + str(count),
                              units='deg',
                              pos=[12, -10],
                              height=0.4,
                              color='black',
                              colorSpace=self.ColorSpace)

        trial_time_start = time.time()
        # first present references for 0.5 sec
        fix.draw()
        num.draw()
        leftRef.draw()
        rightRef.draw()
        self.win.flip()
        core.wait(0.5)

        # then present the standard and the test stimuli as well for 1 sec
        fix.draw()
        num.draw()
        leftRef.draw()
        rightRef.draw()
        sPatch.draw()
        tPatch.draw()
        self.win.flip()

        if self.trial_dur:
            # show stimuli for some time
            core.wait(self.trial_dur)

            # refresh the window, clear references and stimuli
            num.draw()
            self.win.flip()

        # get response
        judge = None

        while judge is None:
            allkeys = event.waitKeys()
            for key in allkeys:
                if (key == 'left' and rot * rndpos[0][0] > 0) or (
                        key == 'right' and rot * rndpos[0][0] < 0):
                    judge = 1  # correct
                    thiskey = key
                elif (key == 'left' and rot * rndpos[0][0] < 0) or (
                        key == 'right' and rot * rndpos[0][0] > 0):
                    judge = 0  # incorrect
                    thiskey = key
                elif key == 'escape':
                    breakinfo = 'userbreak'
                    # xrl.add_break(breakinfo)
                    config_tools.write_xrl(self.subject,
                                           break_info='userbreak')
                    core.quit()

        trial_time = time.time() - trial_time_start

        return judge, thiskey, trial_time
 }
 io=launchHubServer(**iohub_config)
 mcu=io.devices.mcu
 kb=io.devices.keyboard
 ain=io.devices.ain
 experiment=io.devices.experiment
 mcu.setDigitalOutputByte(0)
 mcu.enableEventReporting(True)
 ain.enableEventReporting(True)
 delay_offset=ain.getDelayOffset()
 
 print()
 print('>> Running test using a delay_offset of',delay_offset)
 print('>> Please wait.')    
 print()
 core.wait(1.0)
 mcu.getRequestResponse()
 io.clearEvents("all")   
 response_times=[]
 for i,c in enumerate(ttl_bytes):      
     ain_channel_name='AI_%d'%c
     v=np.power(2,c)
     r=mcu.setDigitalOutputByte(v)
     dout_id=r['id']
     found_analog_trigger=False
     found_dout_response=False
     stime=getTime()
     ai_event_results[i,:]=0.0
     while getTime()-stime < 2.0:
         if found_analog_trigger is False:        
             ai_events=ain.getEvents(asType='dict')        
Пример #51
0
display_message(myWin, display_text, fix_MSG)

display_text.setText(
    "Please press s for short, and l for long to judge the oddball duration against the standard"
)
for ix, dur in enumerate(possibleOddballDurations):
    print(oddBallType[ix])
    if oddBallType[ix] == 0:
        oddBallStim = loomingStim
    elif oddBallType[ix] == 1:
        oddBallStim = starStim
    elif oddBallType[ix] == 2:
        oddBallStim = redStim
    fixation.setAutoDraw(True)
    myWin.flip()
    core.wait(1)
    for num in range(1, randint(7, 12)):
        theta = random.uniform(0, 359) * np.pi / 180
        standardStim.setPos([
            eccentricity[ix] * np.cos(theta), eccentricity[ix] * np.sin(theta)
        ])
        standardStim.setAutoDraw(True)
        myWin.flip()
        omega = .01
        direction = np.random.choice([-1, 1])
        changeDirection = np.random.choice([0, 1])
        changeDirection = random.randint(10, 64)
        standardBallClock.reset()
        counter = 0
        #core.wait(Durations) #Standard stimuli for a duration of 1050msec
        while standardBallClock.getTime() < Durations:
Пример #52
0
    def showTestTrial(self, curTrial, trialIndex, whichPart):
        #s=sound.Sound(self.soundMatrix[curTrial['label']])
        print 'trial: ' + str(trialIndex + 1) + ' ' + curTrial['filename']
        responseInfoReminder = visual.TextStim(
            self.experiment.win,
            text=self.experiment.responseInfoReminder,
            pos=(0, -200),
            height=30,
            color="blue")
        questionText = visual.TextStim(self.experiment.win,
                                       text=curTrial['Question'],
                                       pos=(0, 0),
                                       height=30,
                                       color="black")

        if self.experiment.subjVariables['useParallel'] == 'yes':
            playSentenceAndTriggerNonVisual(
                self.experiment.win, self.soundMatrix[curTrial['filename']],
                curTrial['onsetDet'], curTrial['waitForDetOffset'],
                curTrial['waitForNounOffset'], curTrial['waitForEnd'],
                curTrial['trigDet'], curTrial['trigOffsetNoun'],
                self.experiment.eventTracker, curTrial, self.expTimer)
        else:
            playSentenceNoTriggerNonVisual(
                self.experiment.win, self.soundMatrix[curTrial['filename']],
                curTrial['onsetDet'], curTrial['waitForDetOffset'],
                curTrial['waitForNounOffset'], curTrial['waitForEnd'],
                curTrial['trigDet'], curTrial['trigOffsetNoun'],
                self.experiment.eventTracker, curTrial, self.expTimer)

        core.wait(self.experiment.afterSentenceDelay)
        response = -99
        isRight = -99
        rt = -99
        if curTrial['hasQuestion'] == 1:
            setAndPresentStimulus(self.experiment.win,
                                  [responseInfoReminder, questionText])
            (response,
             rt) = getKeyboardResponse(self.experiment.validResponses.keys())

            ### Try getting 'response' locally? since it is assigned in that current loop.
            ### Try assigning a 'dummy'  value to response before the if Question as this may not have been created on the first trial.

            if self.experiment.validResponses[response] == curTrial['yesOrNo']:
                isRight = 1
                print 'correctlyAnswered'
                playAndWait(self.soundMatrix['bleep'])

            else:
                isRight = 0
                print 'incorrectlyAnswered'
                playAndWait(self.soundMatrix['buzz'])

        fieldVars = []
        if whichPart != 'practice':
            for curField in self.fieldNames:
                fieldVars.append(curTrial[curField])
            [header,
             curLine] = createRespNew(self.experiment.optionList,
                                      self.experiment.subjVariables,
                                      self.fieldNames,
                                      fieldVars,
                                      a_expTimer=self.expTimer.getTime(),
                                      b_whichPart=curTrial['part'],
                                      c_trialIndex=trialIndex,
                                      f_response=response,
                                      g_isRight=isRight,
                                      h_rt=rt * 1000)
            writeToFile(self.experiment.testFile, curLine)

        #write the header with col names to the file
        if trialIndex == 0 and curTrial['part'] != 'practice':
            print "Writing header to file..."
            dirtyHack = {}
            dirtyHack['trialNum'] = 1
            writeHeader(dirtyHack, header,
                        'header_test' + self.experiment.expName)
Пример #53
0
    Y_NT1_Dist = round(cm2pix(radius * sin(radians(angle_NT1_Dist))), decimals)
    X_NT2_Dist = round(cm2pix(radius * cos(radians(angle_NT2_Dist))), decimals)
    Y_NT2_Dist = round(cm2pix(radius * sin(radians(angle_NT2_Dist))), decimals)

    #order (1 or 2) and quadrant
    order = trial['order']
    quadrant = trial['quadrant_target']
    axis_response = trial['axis_response']
    horiz_vertical = trial['axis_response']

    ############# Start the display of the task
    #############################
    ############################# ITI Inter-trial-interval
    #############################
    win.flip()
    core.wait(inter_trial_period)
    display_time = core.Clock()
    display_time.reset()

    #Draw the components of the fixation square and the circle
    FIX()
    win.flip()

    #############################
    ############################# Pre cue period (between fixation and presentation of the cue)
    #############################
    core.wait(float(pre_cue_period))

    ############################# #CUE PERIOD (commend this for the WM gratting scroll)
    #############################
    if order != 2:  #==1 or ==0 (controls)
    fixation = visual.GratingStim(win=win,
                                  size=0.01,
                                  pos=[0, 0],
                                  sf=0,
                                  rgb='black')
    fixation.draw()
    event.Mouse(visible=False)

    keys = event.waitKeys(
        keyList=['space'])  # Wait for space bar press to begin trial

    win.flip()

    clock = core.Clock()
    while clock.getTime() < iti_dur:
        core.wait(0.001)

    #%% Present Target color

    # Grab target color
    current_targ_degree = target_color[x - 1]
    current_targ_color = ((degree_rgb['r'][current_targ_degree]),
                          (degree_rgb['g'][current_targ_degree]),
                          (degree_rgb['b'][current_targ_degree]))

    # Draw circle in target color
    circle = visual.Circle(
        win=win,
        units="pix",
        radius=200,  #Adjust circle radius to fit suitable size
        fillColorSpace='rgb255',
Пример #55
0
        if event.getKeys(['escape']):
            core.quit()
    ''' Additional pause after both ratings:
    myItem.draw()
    ratingScale.draw()
    familiarityScale.draw()
    ratingTitle.draw()
    familiarityTitle.draw()
    win.flip(); core.wait(0.35) # Brief pause
    '''
    # assigns response to corresponding image
    ratings[imageList.index(image)] = ratingScale.getRating()
    familiarity[imageList.index(image)] = familiarityScale.getRating()

    win.flip()
    core.wait(0.35)  # brief pause, slightly smoother for the subject

# Write to .csv file with participants name, subj_id, in file name
f = open(subj_id + ' task a results.csv', 'w')
for i in range(0, len(imageList)):
    # Remove filepath from imageList[i] string
    picName = os.path.relpath(
        imageList[i], '..\..\JOCN\task a\images\\')  #..\..\JOCN\task a\images\
    f.write(picName + ',' + ratings[i] + ',' + familiarity[i] + "\n")
f.close()

# Thank participant
thank_you_screen.draw()
win.flip()
core.wait(1.5)
Пример #56
0
#mov._audioStream = testSound
for trl in range(0, 20):
    # mov = visual.MovieStim3(win, r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.mp4',flipVert=False, size=720, flipHoriz=False, loop=False,noAudio=True)
    testSound = sound.Sound(
        r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.wav',
        sampleRate=48000)
    mov = visual.MovieStim3(
        win,
        r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.mp4',
        flipVert=False,
        size=720,
        flipHoriz=False,
        loop=False,
        noAudio=True)
    #mov = visual.MovieStim(win, r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.mp4')
    core.wait(.5)
    print('orig movie size=%s' % (mov.size))
    print('duration=%.2fs' % (mov.duration))
    soundStart = 0
    trig2 = 1
    cnt = 1
    #roundStart = globalClock.getTime()
    #win.flip()
    #mov.seek(.008)
    mov.draw()
    mov.seek(.008)

    portTime = 0
    #    while mov.getCurrentFrameTime() <= 0:
    #        time.sleep(0.001)
    while mov.status != visual.FINISHED:
def run_trials(win,
               line,
               txt,
               mouse,
               distances,
               durations,
               screen_refresh_rate,
               filename,
               username,
               between_stim_and_resp_time,
               time_after_click_before_stim,
               time_after_user_input_is_finalized,
               practice=False):

    # Total number of trials = distances*durations *2, since there are two
    #   types of trials
    number_of_trials = len(distances) * len(durations) * 2

    # Create a list of indicies between 0 and number_of_trials-1
    # This will be looped over and will determine the conditions for
    #       each trial
    trial_indicies = list(range(number_of_trials))

    # Shuffle the order of the trial indicies to randomize
    random.shuffle(trial_indicies)
    print(trial_indicies)

    # Loop through the trial_indicies list, each time through the
    # loop representing one trial
    #   NOTE:
    #       trial_num = 0, 1, 2, 3, ..., ie how many times through
    #                   the loop it's been
    #       trial_id = 17, 93, 23, ..., ie the number from the
    #                   shuffled list trial_indicies, which
    #                   indicates the properties of the trial
    for trial_num, trial_id in enumerate(trial_indicies):
        print(f"Trial number: {trial_num}\nCondition ID: {trial_id}")

        # First, figure out what type of trial it is,
        # ie time or distance, which will determine which type of
        # response will be collected
        this_trial_type = get_trial_type(trial_id, number_of_trials)

        # Get the distance value that will be used
        this_distance = distances[get_distance_index(trial_id, distances,
                                                     number_of_trials)]

        # Get the duration value that will be used
        this_duration = durations[get_duration_index(trial_id, durations)]

        # Get the current duration in frames, rather than seconds
        this_duration_frames = screen_refresh_rate * this_duration

        # Just to make sure everything is working, print out the trial conditions to the console
        # print([this_trial_type, this_distance, this_duration])

        # Present a message indicating which type of trial it will be, ie duration or distance
        trial_type_message_presented = present_trial_type_message(
            win, txt, this_trial_type, practice)

        # Then present a line of length this_distance for a duration this_duration
        stimulus_finished_presenting = present_stimulus(
            win, line, this_distance, this_duration_frames,
            time_after_click_before_stim)

        # Then wait another period of time
        core.wait(between_stim_and_resp_time)

        # Then either collect a time response or duration response, depending on this_trial_type
        if this_trial_type == "distance":
            user_stimulus_estimate = collect_distance_response(
                win, line, mouse, txt, time_after_user_input_is_finalized,
                practice)

            # Write the data from this trial to file
            with open(filename, 'a') as fp:
                writer = csv.writer(fp)
                writer.writerow([
                    trial_num, username, practice,
                    datetime.datetime.now(), stimulus_finished_presenting,
                    trial_id, this_trial_type, this_distance, this_duration,
                    user_stimulus_estimate, 'NaN'
                ])

        else:  # then it's duration
            user_stimulus_estimate = collect_duration_response(
                win, line, mouse, txt, time_after_user_input_is_finalized,
                practice)

            # Write the data from this trial to file
            with open(filename, 'a') as fp:
                writer = csv.writer(fp)
                writer.writerow([
                    trial_num, username, practice,
                    datetime.datetime.now(), stimulus_finished_presenting,
                    trial_id, this_trial_type, this_distance, this_duration,
                    'NaN', user_stimulus_estimate / screen_refresh_rate
                ])
Пример #58
0
event.waitKeys()

##### TTL Pulse trigger
if EEGflag:
    #port.open()
    win.callonFlip(port.setData, startSaveflag)
    #port.write(startSaveflag)
    #port.close()

#### Setting up a global clock to track initiation of experiment to end
#Time_Since_Run = core.MonotonicClock()  # to track the time since experiment started, this way it is very flexible compared to psychopy.clock
RT_clock = core.Clock()
##### 2 seconds Intial fixation
Fix_Cue.draw()
win.flip()
core.wait(Initial_wait_time)

##### Start trials

for trial_num in range(len(Trial_dict)):  #range(len(Trial_dict))

    if Trial_dict[trial_num]['cue'] == 'dcr' or Trial_dict[trial_num][
            'cue'] == 'dpr':
        if Trial_dict[trial_num]['pic'] == 'Face':
            corr_resp = yes_key
        else:
            corr_resp = no_key
    if Trial_dict[trial_num]['cue'] == 'fpr' or Trial_dict[trial_num][
            'cue'] == 'fpb':
        if Trial_dict[trial_num]['pic'] == 'Face':
            corr_resp = yes_key
def collect_distance_response(win, line, mouse, txt,
                              time_after_user_input_is_finalized, practice):

    # Create a line at center of screen, less than minimum distance
    line.start = [0, 0]
    line.end = [0, 0]
    line.lineColor = 'blue'
    line.draw()

    # If it's practice, give some direction messages
    if practice:
        txt.pos = [-300, 250]
        txt.text = 'Move the mouse to change the length of the line.\nOnce you have the line as long as you want, click once to finalize.\nThe line will turn red, and you will move on to the next trial.'
        txt.draw()

    win.flip()

    # Reset mouse
    mouse.clickReset()

    # When getTime is set to true, mouse.getPressed will return a
    #   tuple of size two. The first element will be a list of 3 booleans,
    #   each of which corresponds to whether the mouse button 0,1,2
    #   has been pressed since the last time mouse.clickReset was called.
    #   The second element will be a list of timestamps of when the
    #   mouse was clicked.
    while True not in mouse.getPressed(getTime=True)[0]:

        # Get mouse position
        # mouse.getPos() returns a tuple of size 2, indicating the current
        #   x and y positions -- (0,0) is at the center of the window
        mouse_position = mouse.getPos()

        # Update line start,end to -/+ mouse x-pos
        line.start = [-mouse_position[0], 0]
        line.end = [mouse_position[0], 0]
        line.draw()

        if practice:
            txt.draw()

        win.flip()

    # Store x pos so that it can be returned
    #   Double it, since 0 is at the center of window so the mouse position's
    #   x value only represents half of the length of the line
    user_inputted_length = 2 * abs(line.start[0])

    # Change the color of the line, or something, to indicate its clicked
    line.lineColor = 'red'
    line.draw()
    win.flip()

    # Wait a second or two
    core.wait(time_after_user_input_is_finalized)

    # Flip the screen to make it blank again
    win.flip()

    # Reset the color of the line for future use
    line.lineColor = 'black'

    return user_inputted_length
Пример #60
0
        image=trialInfo.loc[
            thisTrial,
            'scenes'],  # read in the trialInfo for the column scenes in the xlsx file
        size=1,
        pos=(0, 0),
        interpolate=True)
    thisScene.draw()

    # record trial prarameters
    out.loc[thisTrial, 'scene'] = trialInfo.loc[
        thisTrial, 'scenes']  # record which scene is presented in this trial
    out.loc[thisTrial, 'trial'] = thisTrial + 1  # record trial index

    # present fixation for isi duration
    while trialClock.getTime() < isiDur:
        core.wait(.001)

    # then present stimuli after fixation is done
    win.flip()  # this will flip "thisScene"
    stimClock.reset()
    # record when stimulus was prsented
    out.loc[thisTrial, 'stimOn'] = expClock.getTime()

    # set how long is each image presented
    while stimClock.getTime() < sceneDur:
        thisScene.draw()
        win.flip()

    event.clearEvents()  # clear any early response

    # then present question for response