Example #1
0
def prepfortrial(t):
    cue_ind = cueseq[t]
    prb_ind = prbseq[t]
    con_ind = conseq[t]
    timing = frames[timseq[t]]
    # select cue
    cue_buffer = cue_list[con_ind][cue_ind]

    # set target letters
    shuffle(letters)
    stiml = letters[0:4]
    if t in B3TRIALS:
        [
            stim_display[i].setPos((xcoo[i], ycoo[xyconfigs[conseq[t]][i]]))
            for i in range(len(xcoo))
        ]
        [
            mask_display[i].setPos((xcoo[i], ycoo[xyconfigs[conseq[t]][i]]))
            for i in range(len(xcoo))
        ]  # random masks
    [stim_display[i].setText(stiml[i]) for i in range(len(stiml))]
    stim_buffer = visual.BufferImageStim(win, stim=stim_display + [fixation])
    # set masks
    #    mask_buffer = mask_display[con_ind][t%len(mask_display[0])]
    shuffle(mask_imgs)
    [mask_display[i].setImage(mask_imgs[i]) for i in range(len(xcoo))]
    mask_buffer = visual.BufferImageStim(win, stim=mask_display + [fixation])
    # find probe letter
    probe_letter = letters[prb_ind]
    probe_display.setText(probe_letter)

    sts = str(t+1)+"\t"+str(cue_ind)+"\t"+ \
    str(prb_ind)+"\t"+str(timseq[t])+"\t"+str(timing)+"\t"+str(con_ind)+"\tT:"+''.join(stiml)+"\tP:"+probe_letter

    return cue_buffer, stim_buffer, mask_buffer, probe_display, timing, sts
Example #2
0
def setCup(pp):
    global button
    global cupBuffer
    sN = pp
    rtClock = core.Clock()
    while True:
        key = event.getKeys(keyList = ['q','escape'])
        if len(key) > 0:
            core.quit()
        mX, mY = mouse.getPos()
        if mX > buttonX[0] and mX < buttonX[1] and mY > buttonY[0] and mY < buttonY[1]: #Checks to see if "Next" button was pressed
            set_mouse_position(win,0,0)
            break
        if mY < (slotY-(slotHeight/2)) and mY > -(screenY*.2):
            sN1 = getSlot(mX,mY,slotSpread)
            if sN1 !=None:
                sN = sN1
            if sN == 0:
                sN = 1
            elif sN == 40:
                sN = 39
        plinkoTable.draw()
        button.draw()
        buttonText.draw()
        ball.draw()
        drawCup(slotPos[sN][0])
        drawPbar(totalPoints,maxScore)
        win.flip()
    rt = rtClock.getTime()
    plinkoTable.draw()
    drawCup(slotPos[sN][0])
    drawPbar(totalPoints,maxScore)
    cupBuffer = visual.BufferImageStim(win)
    return sN,rt
Example #3
0
    def _targetStim(self, tloc, tdir, flank):
        """Return a complete buffer of specified target at position tloc (top or down) 
        with flankers (congruent, neutral, or incongruent) in direction tdir (left or right)
        ready to flip
        """
        sz = self.arrowSize
        pw = self.allWidthDeg
        p = self.arrowSize + self.arrowSep
        y = self.targetDist if tloc=='top' else -self.targetDist
        if self.runDummy:
            lines = [ self._drawLine((x, y), sz, pw, False, None) for x in (-2*p, -p, 0, p, 2*p) ]
            heads = []
        elif flank=='neutral':
            lines = [ self._drawLine((x, y), sz, pw, False, None) for x in (-2*p, -p, p, 2*p) ]

            lines = lines + [self._drawLine((0, y), sz, pw, True, tdir)]
            heads = [ self._drawHead((0, y), sz, pw, tdir) ]
        elif flank=='congruent':
            lines = [ self._drawLine((x, y), sz, pw, True, tdir) for x in (-2*p, -p, 0, p, 2*p) ]
            heads = [ self._drawHead((x, y), sz, pw, tdir) for x in (-2*p, -p, 0, p, 2*p) ]
        elif flank=='incongruent':
            rdir = 'left' if tdir=='right' else 'right'
            lines = [ self._drawLine((x, y), sz, pw, True, rdir) for x in (-2*p, -p, p, 2*p) ]
            heads = [ self._drawHead((x, y), sz, pw, rdir) for x in (-2*p, -p, p, 2*p) ]

            lines = lines + [ self._drawLine((0, y), sz, pw, True, tdir) ]
            heads = heads + [ self._drawHead((0, y), sz, pw, tdir) ]

        return visual.BufferImageStim(self.win, stim=(lines + heads))
Example #4
0
def doTrial():
    im = []
    for frame in range(240):
        [slot, local] = divmod(frame, framesPerSlot)
        if local == 0:
            piece = []
            for place in range(numPlaces):
                piece.append(
                    visual.ImageStim(win=window,
                                     pos=(posX[place], 0),
                                     image=digImg[digits[slot][place]],
                                     opacity=(frame % freq[place]) /
                                     (freq[place] - 1)))
        else:
            for place in range(numPlaces):
                piece[place].opacity = (frame % freq[place]) / (freq[place] -
                                                                1)
        im.append(visual.BufferImageStim(win=window, stim=piece))

    myTimesB = []
    myTimesA = []
    for frame in range(240):
        myTimesA.append(timer.getTime())
        im[frame].draw()
        myTimesB.append(timer.getTime())
        window.flip()
    del (im)
    return (myTimesA, myTimesB)
def drawConfLines(clines,
                  ctext_stim,
                  ctext,
                  items,
                  cross,
                  positions,
                  prevBeads,
                  trialNum,
                  beadPoses,
                  predText=False,
                  subSlider=False):
    # Draw options to select
    if predText != False:
        predText.draw()
    for i in np.arange(len(items)):
        items[i].setPos(positions[i])
        items[i].draw()
        p = positions[i]

    #Draw and buffer confidence lines
    for cl in clines:
        cl.draw()
    for i in np.arange(len(ctext)):
        ctext_stim[i].setText(ctext[i])
        ctext_stim[i].draw()
    if subSlider != False:
        subSlider.draw()
    drawSeenBeads(trialNum, beadRem, prevBeads, beadPoses)
    cline_image = visual.BufferImageStim(win)
    return (cline_image)
Example #6
0
def prepare_figure(trial, pos=(0, 0)):
    """Prepare a figure given settings in a trial."""
    # A hack to not present figures on auditory-only trials
    if trial['any_visual']:
        # If the figure's current flip-state is not desired, flip it horizontally.
        figure_template.pos = pos
        figure_template.flipped = int(
            not figure_template.flipped)  # Now opposite

        # Draw fill
        figure_template.image = path.join(
            'stimuli', '%(shape)s_%(complexity)s.png' % trial)
        figure_template.color = colors[trial['hue']][
            trial['lightness']]  # Select correct color
        figure_template.draw()

        # Draw outline
        figure_template.color = (255, 255, 255)  # Preserve original color
        figure_template.image = path.join(
            'stimuli', '%(shape)s_%(complexity)s_outline.png' % trial)
        figure_template.draw()

        # Screenshot
        figure = visual.BufferImageStim(win)  # Screenshot

        # Clean up and return
        figure.draw()  # First draw is slower
        win.clearBuffer()  # Wipe screen
        figure_template.pos = (0, 0)  # back to center
        return figure
    else:
        return figure_blank
Example #7
0
def doTrial(cond):
    myCol=[[1,-1,-1],[-1,1,-1]]
    blank=visual.TextStim(win=window,text="")
    cross=visual.TextStim(win=window,text="+")
    
    [targ,flank]=decode(cond)
    start=np.random.randint(-20,20,size=2)
    im=[]
    for x in range(3):
        for y in range(3):
            im.append(visual.ShapeStim(win=window,
                            vertices=makeVertices(pX=posX[x]+start[0],pY=posY[y]+start[1]),
                            fillColor=myCol[flank],
                            lineColor=(0,0,0)))
            
    im[4]=visual.ImageStim(win=window,
                           image=makeArray(targVal[targ],resX,resY),
                           pos=start)        
    full=visual.BufferImageStim(win=window,stim=im)

    correct=int(targVal[targ]>.5)
    
    cross.pos=start
    cross.draw()
    window.flip()
    core.wait(.2)
    blank.draw()
    window.flip()
    core.wait(.5)        
    timer.reset()
    full.draw()
    window.flip()
#    window.getMovieFrame(buffer='front')
    keys=event.waitKeys(keyList=['z','slash','9'],timeStamped=timer)
    blank.draw()
    window.flip()
    resp=keys[0][0]
    rt=keys[0][1]
    if resp=='9':
        window.close()
        core.quit()
        exit()
    if resp=='z':
        respInt=0
    else:
        respInt=1
    if respInt==correct:
        correct1.play()
        core.wait(0.1)
        correct2.play()
    else:
        error.play()
    core.wait(.5)
    return(respInt,rt)
    def make_ring(self, text_size=45):
        """ create the ring
        20210518 - use images instead to match eprime experiment
        makes self.ringimg['rew'] and self.ringimg['neu']
        see
        https://discourse.psychopy.org/t/the-best-way-to-draw-many-text-objects-rsvp/2758
        """

        # color and symbol for ring reward
        cues = {
            'neu': {
                'color': 'gray',
                'sym': '#'
            },
            'rew': {
                'color': 'green',
                'sym': '$'
            }
        }
        n_in_ring = 12
        el_rs = 250  # TODO: make relative to screen size?
        el_thetas = np.linspace(0, 360, n_in_ring, endpoint=False)
        el_xys = np.array(misc.pol2cart(el_thetas, el_rs)).T
        ringtext = visual.TextStim(win=self.win,
                                   units='pix',
                                   bold=True,
                                   height=text_size,
                                   text='$')  # '$' will be changed
        cap_rect_norm = [
            -(text_size / 2.0) / (self.win.size[0] / 2.0),  # left
            +(text_size / 2.0) / (self.win.size[1] / 2.0),  # top
            +(text_size / 2.0) / (self.win.size[0] / 2.0),  # right
            -(text_size / 2.0) / (self.win.size[1] / 2.0)  # bottom
        ]
        for k in ['rew', 'neu']:
            ringtext.text = cues[k]['sym']
            ringtext.color = cues[k]['color']
            buff = visual.BufferImageStim(win=self.win,
                                          stim=[ringtext],
                                          rect=cap_rect_norm)
            # img = (np.flipud(np.array(buff.image)[..., 0]) / 255.0 * 2.0 - 1.0)
            self.ringimg[k] = visual.ElementArrayStim(
                win=self.win,
                units="pix",
                nElements=n_in_ring,
                sizes=buff.image.size,
                xys=el_xys,
                # colors=cues[k]['color'],
                elementMask=None,
                elementTex=buff.image)
Example #9
0
    def test_bufferImage(self):
        """BufferImage inherits from ImageStim, so test .ori. .pos etc there not here
        """
        win = self.win
        gabor = visual.PatchStim(win, mask='gauss', ori=-45,
            pos=[0.6*self.scaleFactor, -0.6*self.scaleFactor],
            sf=2.0/self.scaleFactor, size=2*self.scaleFactor,
            interpolate=True)

        bufferImgStim = visual.BufferImageStim(self.win, stim=[gabor],
            interpolate=True)
        bufferImgStim.draw()
        utils.compareScreenshot('bufferimg_gabor_%s.png' %(self.contextName), win, crit=8)
        win.flip()
Example #10
0
def show_instruction():
    global win
    instr = visual.SimpleImageStim(win,
                                   image='instruction.png',
                                   units='',
                                   pos=(0.0, 0.0),
                                   flipHoriz=False,
                                   flipVert=False,
                                   name=None,
                                   autoLog=None)
    instrStim = visual.BufferImageStim(win, stim=[instr])
    instrStim.draw()
    win.flip()
    while True:
        if len(event.getKeys()) > 0: break
    event.clearEvents()
Example #11
0
def createPsychoGraphicsWindow(io):
        #create a window
        psychoStim = OrderedDict()
        psychoWindow = visual.Window(io.devices.display.getPixelResolution(),
                                    monitor=io.devices.display.getPsychopyMonitorName(),
                                    units=io.devices.display.getCoordinateType(),
                                    color=[128,128,128], colorSpace='rgb255',
                                    fullscr=True, allowGUI=False,
                                    screen=io.devices.display.getIndex()
                        )

        currentPosition= io.devices.mouse.setPosition((0,0))
        psychoWindow.setMouseVisible(False)

        fixation = visual.PatchStim(psychoWindow, size=25, pos=[0,0], sf=0,
                                    color=[-1,-1,-1], colorSpace='rgb')
        title = visual.TextStim(win=psychoWindow,
                                text="ioHub getEvents Delay Test", pos = [0,125],
                                height=36, color=[1,.5,0], colorSpace='rgb',
                                wrapWidth=800.0)

        instr = visual.TextStim(win=psychoWindow,
                                text='Move the mouse around, press keyboard keys and mouse buttons',
                                pos = [0,-125], height=32, color=[-1,-1,-1],
                                colorSpace='rgb', wrapWidth=800.0)

        psychoStim['static'] = visual.BufferImageStim(win=psychoWindow,
                                         stim=(fixation, title, instr))
        psychoStim['grating'] = visual.PatchStim(psychoWindow,
                                        mask="circle", size=75,pos=[-100,0],
                                        sf=.075)
        psychoStim['keytext'] = visual.TextStim(win=psychoWindow,
                                        text='key', pos = [0,300], height=48,
                                        color=[-1,-1,-1], colorSpace='rgb',
                                        wrapWidth=800.0)
        psychoStim['mouseDot'] = visual.GratingStim(win=psychoWindow,
                                        tex=None, mask="gauss",
                                        pos=currentPosition,size=(50,50),
                                        color='purple')
        psychoStim['progress'] = visual.ShapeStim(win=psychoWindow,
                                        vertices=[(0,0),(0,0),(0,0),(0,0)],
                                        pos=(400, -300))

        return psychoWindow, psychoStim
Example #12
0
def doTrial(cond):
    blank = visual.TextStim(win=window, text="")

    [targ, flank] = decode(cond)
    im = []
    for x in range(3):
        for y in range(3):
            t = makeArray(flankVal[flank], resX, resY)
            im.append(
                visual.ImageStim(win=window, pos=(posX[x], posY[y]), image=t))
    im[4].image = makeArray(targVal[targ], resX, resY)
    full = visual.BufferImageStim(win=window, stim=im)

    correct = int(targVal[targ] > .5)

    blank.draw()
    window.flip()
    core.wait(.5)
    timer.reset()
    full.draw()
    window.flip()
    window.getMovieFrame(buffer='front')
    keys = event.waitKeys(keyList=['z', 'slash', '9'], timeStamped=timer)
    blank.draw()
    window.flip()
    resp = keys[0][0]
    rt = keys[0][1]
    if resp == '9':
        window.close()
        core.quit()
        exit()
    if resp == 'z':
        respInt = 0
    else:
        respInt = 1
    if respInt == correct:
        correct1.play()
        core.wait(0.1)
        correct2.play()
    else:
        error.play()
    core.wait(.5)
    return (respInt, rt)
Example #13
0
def play_movie(win, movie, timing, trigger=None):
    mov = visual.MovieStim3(win,
                            'movies/' + movie,
                            size=[1080, 637.5],
                            flipVert=False,
                            flipHoriz=False,
                            loop=False,
                            noAudio=True)

    timer = core.CountdownTimer(timing)
    mov_start = core.getTime()
    if trigger:
        trigger.flicker(1)
    event.clearEvents(eventType='keyboard')

    while mov.status != visual.FINISHED and timer.getTime() > 0:
        mov.draw()
        win.flip(clearBuffer=False)

    last_frame = visual.BufferImageStim(win,
                                        buffer='front',
                                        rect=(-.8, 0.8, 0.8, -0.8))
    last_frame.autoDraw = True
    return mov_start, last_frame
Example #14
0
def doTrial(targ, flank, crit):

    digits = []
    a = list(range(10))
    for i in range(numPlaces):
        random.shuffle(a)
        digits.append(a[-numSlots:])

    for place in range(numPlaces):
        digits[place][crit] = 10 + flank
    digits[center][crit] = 10 + targ

    im = []
    for frame in range(numFrames):
        [slot, local] = divmod(frame, framesPerSlot)
        if local == 0:
            piece = []
            for place in range(numPlaces):
                piece.append(
                    visual.ImageStim(win=window,
                                     pos=(posX[place], 0),
                                     image=digImg[digits[place][slot]],
                                     opacity=(frame % freq[place]) /
                                     (freq[place] - 1)))
        else:
            for place in range(numPlaces):
                piece[place].opacity = (frame % freq[place]) / (freq[place] -
                                                                1)
        im.append(
            visual.BufferImageStim(win=window,
                                   stim=piece,
                                   rect=[-.3, .1, .3, -.1]))

    for a in range(preRSVP):
        if (a // fBlank1 == 0):
            Blank.draw()
            window.flip()
        elif (a // (fBlank1 + fFix) == 0):
            fix.draw()
            window.flip()
        else:
            Blank.draw()
            window.flip()

    myTimesB = []
    myTimesA = []
    timer.reset()
    for frame in range(numFrames):
        myTimesA.append(timer.getTime())
        im[frame].draw()
        myTimesB.append(timer.getTime())
        window.flip()
    for frame in [0, 1]:
        Blank.draw()
        window.flip()
    keys = event.getKeys(keyList=['q', 'p', abortKey], timeStamped=timer)
    if len(keys) == 0:
        Reminder.draw()
        window.flip()
        keys = event.waitKeys(keyList=['q', 'p', abortKey], timeStamped=timer)
    resp = keys[0][0]
    rt = keys[0][1]
    if resp == abortKey:
        for i in range(framesPerSlot):
            Quit.draw()
            window.flip()
        exit()
    if resp == 'q':
        respInt = 0
    else:
        respInt = 1
    if (respInt == targ):
        correct1.play()
        core.wait(0.1)
        correct2.play()
    else:
        error.play()
    frameSkipped = 0
    for a in range(numFrames):
        if myTimesB[a] - myTimesA[a] > (1 / refreshRate - .001):
            frameSkipped += 1
    del (im)

    core.wait(.5)
    return (respInt, rt, frameSkipped)
Example #15
0
for i in range(10):
    fname.append(setDir + '/' + setPre + str(i) + setPost)
    digImg.append(Image.open(fname[i]))

digImg.append(Image.open(setDir + '/' + setPre + 'AH-0.00' + setPost))
digImg.append(Image.open(setDir + '/' + setPre + 'AH-1.00' + setPost))
digImg.append(Image.open(setDir + '/' + setPre + '+' + setPost))

Xfix = []
for place in range(numPlaces):
    Xfix.append(
        visual.ImageStim(win=window,
                         pos=(posX[place], 0),
                         image=digImg[12],
                         opacity=1.0))
fix = visual.BufferImageStim(win=window, stim=Xfix, rect=[-.3, .1, .3, -.1])

Blank = visual.TextStim(window, text="", pos=(0, 0))
Welcome = visual.TextStim(
    window,
    text=
    "Identify the letter that appears at the center of the screen\n\n\n Press the 'q' key for A\n\n Press the 'p' key for H\n\n\n\n Press any key to begin",
    pos=(0, 0))
Pause = visual.TextStim(
    window,
    text="Press any key to continue to the next block of trials",
    pos=(0, 0))
Reminder = visual.TextStim(window,
                           text="Please enter your response",
                           pos=(0, 0))
Quit = visual.TextStim(window, text="Quiting experiment...", pos=(0, 0))
    def setup_visuals(self):
        self.targets = list()
        # all possible targets
        tmp = list(self.static_settings['symbol_options'])

        # compute per-person mapping
        self.subject_rng = np.random.RandomState(
            seed=int(self.settings['subject']))
        self.subject_rng.shuffle(tmp)
        tmp = ''.join(tmp)  # convert from list to string
        self.settings.update({'reordered_symbols': tmp})

        tmp = tmp[:int(self.static_settings['n_choices'])]
        if self.settings['stim_type'] == 'symbol':
            for i in tmp:
                self.targets.append(
                    visual.TextStim(self.win,
                                    i,
                                    height=0.25,
                                    autoLog=True,
                                    font='FreeMono',
                                    name='stim ' + i))
        elif self.settings['stim_type'] == 'letter':
            for i in list(self.static_settings['key_options']):
                self.targets.append(
                    visual.TextStim(self.win,
                                    i,
                                    height=0.25,
                                    autoLog=True,
                                    font='FreeMono',
                                    name='stim ' + i))
        elif self.settings['stim_type'] == 'hand':
            right_hand = visual.ImageStim(self.win,
                                          image='media/hand.png',
                                          size=(0.3, 0.3),
                                          pos=(0.14, 0))
            left_hand = visual.ImageStim(self.win,
                                         image='media/hand.png',
                                         size=(0.3, 0.3),
                                         pos=(-0.14, 0),
                                         flipHoriz=True)
            self.background = visual.BufferImageStim(
                self.win, stim=[left_hand, right_hand])
            # pinky, ring, middle, index, thumb
            pos_l = [[-0.255, 0.0375], [-0.2075, 0.08875], [-0.1575, 0.1125],
                     [-0.095, 0.09], [-0.03, -0.0075]]
            pos_r = [[-x, y] for x, y in pos_l]
            pos_r.reverse()
            pos_l.extend(pos_r)
            pos_l = pos_l[:int(self.static_settings['n_choices'])]

            self.targets = [
                visual.Circle(self.win,
                              fillColor=(1, 1, 1),
                              pos=x,
                              size=0.03,
                              opacity=1.0,
                              name='stim %d' % c) for c, x in enumerate(pos_l)
            ]
        else:
            raise ValueError('Unknown stimulus option...')

        if self.settings['remap']:
            # remap heterologous, homologous, and same-finger pairs?
            # swap the stimuli
            for i, j in self.all_swaps:
                self.targets[j], self.targets[i] = self.targets[
                    i], self.targets[j]

        # push feedback
        self.push_feedback = visual.Rect(self.win,
                                         width=0.6,
                                         height=0.6,
                                         lineWidth=3,
                                         name='push_feedback',
                                         autoLog=False)

        # text
        self.instruction_text = visual.TextStim(self.win,
                                                text='Press a key to start.',
                                                pos=(0, 0),
                                                units='norm',
                                                color=(1, 1, 1),
                                                height=0.1,
                                                alignHoriz='center',
                                                alignVert='center',
                                                name='wait_text',
                                                autoLog=False,
                                                wrapWidth=2)
        self.instruction_text.autoDraw = True

        self.pause_text = visual.TextStim(self.win,
                                          text=u'Take a break!',
                                          pos=(0, 0.8),
                                          units='norm',
                                          color=(1, 1, 1),
                                          height=0.1,
                                          alignHoriz='center',
                                          alignVert='center',
                                          autoLog=True,
                                          name='pause_text')
        self.pause_text2 = visual.TextStim(
            self.win,
            text=u'Press ten times to continue.',
            pos=(0, 0.7),
            units='norm',
            color=(1, 1, 1),
            height=0.1,
            alignHoriz='center',
            alignVert='center',
            autoLog=True,
            name='pause_text')
choice_marker = visual.Rect(win, fillColor='gray', lineColor=None, width=SELECT_SIZE, height=SELECT_SIZE)
instruct = visual.TextStim(win, height=TEXT_HEIGHT, pos=(0, 2))

# Other stuff
writer = csvWriter(str(VARS['subject']), saveFolder=SAVE_FOLDER)  # writer.write(trial) will write individual trials with low latency

# Make grid into a stimulus and make a list of coordinates to present the shapes
grid_coords = []
square_grid = visual.Rect(win, width=GRID_SIZE, height=GRID_SIZE, lineWidth=2, lineColor='white')
for y in range(GRID_LENGTH -1, -1, -1):  # from top to bottom
    for x in range(GRID_LENGTH):  # left to right
        grid_coords += [(GRID_SIZE*(x + 0.5 - 0.5*GRID_LENGTH), 
                         GRID_SIZE*(y + 0.5 - 0.5*GRID_LENGTH))]
        square_grid.pos = grid_coords[-1]  # set to this x, y coordinate
        square_grid.draw()
grid = visual.BufferImageStim(win)  # "screenshot"
win.clearBuffer()  # blank screen - we don't want to show it later


"""
PRESENT STIMULI
"""

def show_choice(volition):
    """ Select color - either yourself (volition==True) or not (volition==False) """
    # Instruction
    instruct.text = TEXT_CHOICE_VOLITION if volition else TEXT_CHOICE_NONVOLITION
    
    # Choose a pair of attributes
    selected = random.randint(0, N_CHOICES - 1)  # pre-select a random color
    options = random.sample(attribute_choices, N_CHOICES)
Example #18
0
rect = [-1, 1, 1, -1]
# rect is what rectangle to grab, here whole-screen (same as default).
# rect is a list of the edges: Left Top Right Bottom, norm units; try [-.5,.5,.5,-.5]
# No need to be symmetrical, but it works better for the demo.
# NB. If you change the rect and there was flickering on screen during the demo,
# it was likely due to alternating between drawing a partial screenshot
# interleaved with drawing all stimuli that went into it (only done for timing purposes).
# Such flicker is NOT intrinsic to BufferImageStim.
# In general, you do need to ensure that you
# take a snapshot of everything you want, which might not be whole-screen.

myClock = core.Clock()
t0 = myClock.getTime()

# and take a screen shot, from the back buffer by default:
screenshot = visual.BufferImageStim(myWin, rect=rect)
t1 = myClock.getTime() - t0  # record set-up time

# set up to display the screen-shot, img, plus moving text
wordsAnim = visual.TextStim(
    myWin,
    text=
    '''everything static is a single .draw()  %s\n\npress any key to quit.''' %
    (str(screenshot.size)))
wordsAnim.pos[0] = -.25

drawTimeSingle = []  # accumulate draw times of the BufferImageStim
drawTimeMulti = []  # draw times of the pieces, as drawn separately (slowly)
frameCounter = 0
step = .002
event.clearEvents()
def doRSVPStim(trial):
    '''
    ONLY DOES ONE CUE AT THE MOMENT

    This function generates the stimuli for each trial. The word "frame" here refers to a set of simultaneous RSVP stimuli. I'll use "refresh" to refer to monitor refreshes.
    Using the parameters for the current trial:
        - Work out the temporal position of the cue(s)
        - Shuffle the order of the letters
        - Calculate the position and cortically-magnified size for each stream
        - draw and buffer preCues
        - Capture each stream's pixels on each frame using bufferImageStim
        - Collate each frame's pixels so that all stimuli are represented by the same matrix of pixels. Put the cue in there if it's the cued frame
        - pass the matrix of pixels to elementarraystim
    '''
    
    global cue
    
    nStreams = trial['nStreams']
    numTargets = trial['numToCue']  
    
    cuedFrame = trial['cue0temporalPos']
    cuedStream = np.random.choice(np.arange(nStreams), 1)

    cue.pos = calcStreamPos(
                trial = trial, 
                cueOffsets = cueOffsets, 
                streami = cuedStream, 
                streamOrNoise = False
                )
    cue = corticalMagnification.corticalMagnification(cue, 0.9810000000000002, cue = True) #this is the cuesize from the original experiment

    preCues = list()
    preCues.append(cue)

    if trial['cueSpatialPossibilities'] == 2:
        preCue = visual.Circle(myWin, 
                     radius=cueRadius,#Martini used circles with diameter of 12 deg
                     lineColorSpace = 'rgb',
                     lineColor=letterColor,
                     lineWidth=2.0, #in pixels
                     units = 'deg',
                     fillColorSpace = 'rgb',
                     fillColor=None, #beware, with convex shapes fill colors don't work
                     pos= [-5,-5], #the anchor (rotation and vertices are position with respect to this)
                     interpolate=True,
                     autoLog=False)#this stim changes too much for autologging to be useful
       
        preCue.pos = calcStreamPos(
            trial = trial,
            cueOffsets = cueOffsets,
            streami = cuedStream-4,
            streamOrNoise = False
        )
        
        preCue = corticalMagnification.corticalMagnification(preCue, 0.9810000000000002, cue = True)
        
        preCues.append(preCue)

    elif trial['cueSpatialPossibilities'] == 8:
        for i in range(1,8):
            preCue = visual.Circle(myWin, 
                     radius=cueRadius,#Martini used circles with diameter of 12 deg
                     lineColorSpace = 'rgb',
                     lineColor=letterColor,
                     lineWidth=2.0, #in pixels
                     units = 'deg',
                     fillColorSpace = 'rgb',
                     fillColor=None, #beware, with convex shapes fill colors don't work
                     pos= [-5,-5], #the anchor (rotation and vertices are position with respect to this)
                     interpolate=True,
                     autoLog=False)#this stim changes too much for autologging to be useful
        
            
            preCue.pos = (calcStreamPos(
                trial = trial,
                cueOffsets = cueOffsets,
                streami = cuedStream-i,
                streamOrNoise = False
            ))
            
            preCue = corticalMagnification.corticalMagnification(preCue, 0.9810000000000002, cue = True)
            preCues.append(preCue)            

    print('cueFrame = ' + str(cuedFrame))

    preCueStim = preTrial[int(baseAngleCWfromEast/anglesMustBeMultipleOf)] + preCues + [fixatnPoint]

    preCueFrame = visual.BufferImageStim(
                    win = myWin,
                    stim = preCueStim)
    
    preCueFrame = np.flipud(np.array(preCueFrame.image)[..., 0]) / 255.0 * 2.0 - 1.0 #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually
        
    preCueFrame = np.pad(
            array=preCueFrame,
            pad_width=pad_amounts, #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0
        )

    preCueFrame =visual.ElementArrayStim(
            win = myWin,
            units = 'pix',
            nElements=1,
            xys = [[0,0]],
            sizes=preCueFrame.shape,
            elementTex=preCueFrame,
            elementMask = 'none'
        )


    streamPositions = list() #Might need to pass this to elementArrayStim as xys. Might not though


    streamLetterIdxs = np.empty( #use an array so I can select columns (frames) for buffering. This is an empty array that will eventually have the
                                 #letter indexes for each stream. Selecting a column gives us all streams at a particular frame. Selecting a row gives us
                                 #all frames for a particular stream
        shape = (nStreams,numLettersToPresent),
        dtype = int
        )

    streamLetterIdentities = np.empty( #Letter identities for these streams
        shape = (nStreams,numLettersToPresent),
        dtype = str
        )

    for thisStream in xrange(nStreams):
        thisSequence = np.arange(24)
        np.random.shuffle(thisSequence)
        theseIdentities = [potentialLetters[idx] for idx in thisSequence]
        streamLetterIdxs[thisStream,:] = thisSequence
        streamLetterIdentities[thisStream,:] = theseIdentities
        #print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

    correctIdx = streamLetterIdxs[cuedStream,cuedFrame] 
    print('correctIdx')
    print(correctIdx)
    correctLetter = alphabetHelpers.numberToLetter(correctIdx, potentialLetters) #potentialLetters is global

    frameStimuli = list() #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

    for thisFrame in xrange(numLettersToPresent):
        theseStimuli = streamLetterIdxs[:,thisFrame] #The alphabetical indexes of stimuli to be shown on this frame
        
        stimuliToDraw = list() #Can pass a list to bufferimageStim!
        stimuliToDraw.append(fixatn)
        stimuliToDrawCounterPhase = list()
        stimuliToDrawCounterPhase.append(fixatnCounterphase)

        for thisStream in xrange(nStreams):
            cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame #If true, draw the cue and capture that too

            thisLetterIdx = theseStimuli[thisStream] #The letter index for this particular stream on this particular frame
            
            thisStreamStimulus = streamTextObjects[thisStream,thisLetterIdx] #The text object for this stream
            
            thisPos = calcStreamPos(
                trial = trial, 
                cueOffsets = cueOffsets, 
                streami = thisStream, 
                streamOrNoise = False
                )

            thisStreamStimulus.pos = thisPos

            stimuliToDraw.append(thisStreamStimulus)
            stimuliToDrawCounterPhase.append(thisStreamStimulus)

            if cueThisFrame and cueType == 'exogenousRing':
                stimuliToDraw.append(cue)
                stimuliToDrawCounterPhase.append(cue)
        
        buff = visual.BufferImageStim( #Buffer these stimuli
            win = myWin,
            stim = stimuliToDraw
            )
        
        
        buff = np.flipud(np.array(buff.image)[..., 0]) / 255.0 * 2.0 - 1.0 #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually
        
        buff = np.pad(
            array=buff,
            pad_width=pad_amounts, #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0
        )
        
        thisFrameStimuli = visual.ElementArrayStim( #A stimulus representing this frame with the fixation at full luminance
            win = myWin,
            units = 'pix',
            nElements=1,
            xys = [[0,0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask = 'none'
            )
            
        buff = visual.BufferImageStim( #Buffer these stimuli
            win = myWin,
            stim = stimuliToDrawCounterPhase
            )
        
        
        buff = np.flipud(np.array(buff.image)[..., 0]) / 255.0 * 2.0 - 1.0 #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually
        
        buff = np.pad(
            array=buff,
            pad_width=pad_amounts, #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0
        )
        

        thisFrameStimuliCounterPhase = visual.ElementArrayStim( #A stimulus representing this frame with the fixation phase reversed
            win = myWin,
            units = 'pix',
            nElements=1,
            xys = [[0,0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask = 'none'
            )        

        frameStimuli.append([thisFrameStimuli, thisFrameStimuliCounterPhase])

    ts = []
    
    waiting = True
    myMouse.setVisible(waiting)
    
    while waiting:
        startTrialStimuli.draw()
        startTrialBox.draw()
        myWin.flip()
        if myMouse.isPressedIn(startTrialBox):
            waiting = False

    myMouse.setVisible(waiting)

    if eyetracking: 
        tracker.startEyeTracking(nDone,True,widthPix,heightPix) #start recording with eyetracker  

    myWin.flip(); myWin.flip()#Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
    preCueFrame.draw()
    myWin.flip()
    core.wait(.25)
    myWin.flip
    core.wait(.5)
    fixatn.draw()
    myWin.flip()
    core.wait(1)
    


    t0 = trialClock.getTime()
    for n in xrange(trialDurFrames):
        oneFrameOfStim(n, frameStimuli)
        myWin.flip()
        ts.append(trialClock.getTime() - t0)
    
    if eyetracking:
        tracker.stopEyeTracking()
        print('stopped tracking')
    return streamLetterIdxs, streamLetterIdentities, correctLetter, ts, cuedStream, cuedFrame
    digImg.append(Image.open(fname[i]))  # png image handle

digImg.append(Image.open(setDir + '/' + setPre + 'AH-0.00' + setPost))
digImg.append(Image.open(setDir + '/' + setPre + 'AH-1.00' + setPost))
digImg.append(Image.open(setDir + '/' + setPre + '+' + setPost))
digImg.append(Image.open(setDir + '/' + setPre + 'rect' + setPost))

Xfix = []
for place in range(numPlaces):
    Xfix.append(
        visual.ImageStim(win=window,
                         pos=(posX[place], -200),
                         image=digImg[12],
                         opacity=0.5))
fix = visual.BufferImageStim(
    win=window, stim=Xfix,
    rect=[-.3, 1, .3, -1])  # create a fast bufferimage for fast rendering

Blank = visual.TextStim(window, text="", pos=(0, 0))
Welcome = visual.TextStim(
    window,
    text=
    "Identify the letter that appears at the center of the screen\n\n\n Press the 'q' key using your LEFT hand for A\n\n Press the 'p' key using your RIGHT hand for H\n\n\n\n Press any key to begin",
    pos=(0, 0))
Pause = visual.TextStim(
    window,
    text="Press any key to continue to the next block of trials",
    pos=(0, 0))
Reminder = visual.TextStim(window,
                           text="Please enter your response",
                           pos=(0, 0))
Example #21
0
    def _config_screen(self):
        width, height = self.win.size
        self._L_squS = round(min(width, height) * 0.9 / 18)
        # define 
        mesh_col, mesh_row = np.meshgrid(np.arange(6), np.arange(6))
        p_col = (-7.5 + 3 * mesh_col) * self._L_squS
        p_row = (6.5 - 3 * mesh_row) * self._L_squS
        self.rect_pos = pos = np.stack((p_col, p_row), axis=2)

        for i in range(pos.shape[0]):
            # i means row
            for j in range(pos.shape[1]):
                # j means col
                rect = visual.Rect(self.win,
                                   width=self._L_squS,
                                   height=self._L_squS,
                                   lineColor='black',
                                   fillColor='white',
                                   pos=(pos[i, j, 0], pos[i, j, 1]),
                                   units='pix')
                self._rects.append(rect)
                text = visual.TextStim(self.win,
                                       text=chr(word[6 * i + j]),
                                       pos=(pos[i, j, 0], pos[i, j, 1]),
                                       color='black',
                                       height=self._L_squS / 3 * 2,
                                       units='pix')
                self._texts.append(text)

        # target string
        # left for horizontal alignment
        display_str = self._split_string(0, self.max_string_display)
        self._target_string = visual.TextStim(self.win,
                                              text=display_str,
                                              font='Courier New',
                                              color='black',
                                              height=50,
                                              pos=(self.rect_pos[0, 0, 0], 0.47 * height),
                                              wrapWidth=self.rect_pos[0, -1, 0] - self.rect_pos[0, 0, 0],
                                              alignHoriz='left')
        self._result_string = visual.TextStim(self.win,
                                              text='',
                                              font='Courier New',
                                              color='red',
                                              height=50,
                                              pos=(self.rect_pos[0, 0, 0], 0.42 * height),
                                              wrapWidth=self.rect_pos[0, -1, 0] - self.rect_pos[0, 0, 0],
                                              alignHoriz='left',
                                              italic=True)
        # left horizontal alignment
        # Update its text when getting new result

        # 6 bars or 12
        bar_num = self.events_per_trial if self._stim_direction is None else self.events_per_trial // 2
        for i in range(bar_num):
            self._bars.append(visual.Line(self.win,
                                          start=(0, 0),
                                          end=(0, -self._L_squS),
                                          lineColor=self.colors[i % len(self.colors)],
                                          lineColorSpace='rgb255',
                                          lineWidth=5))

        self._background = visual.BufferImageStim(self.win, stim=self._rects + self._texts)
Example #22
0
def rate_CMC(feature1, feature2):
    """Show alternative CMCs and ask participants which they prefer"""
    # Instruction
    text = texts['rate_instruct'] % (feature_reminders2[feature1].upper(),
                                     feature_reminders2[feature2].upper())
    any_auditory = 'auditory' in (modalities[feature1], modalities[feature2])
    if any_auditory:
        text += texts['rate_click']

    # Get a simgle trial, just to get it's data structure
    trials = make_trials(task=feature1, inducer=feature2, phase='rate')
    trial = random.choice(trials)  # Use one base stimulus

    # Show a neutral placeholder, even if there is nothing visual.
    if not trial['any_visual']:
        trial['any_visual'] = 1
        trial['shape'] = trial['complexity'] = trial['hue'] = trial[
            'lightness'] = 'neutral'

    # SCREENSHOT
    # Setting actual features and showing stimuli
    # We will address stimuli by their position[column][row]
    order = [[[0, 0], [1, 1]],
             [[0, 1],
              [1,
               0]]]  # Order of the features in the format: order[column][row]
    groups = [0, 1]
    examples = [0, 1]
    for group in groups:
        for example in examples:
            # Set trial features
            trial[feature1] = features[feature1][
                order[group][example][0]]  # Some heavy indexing here!
            trial[feature2] = features[feature2][order[group][example][1]]
            trial['flip'] = 0  # So that figure and outline match

            # Prepare figure and show it
            figure = prepare_figure(trial,
                                    pos=RATING_POSITIONS[group][example])
            figure.draw()

    # Take screenshot before drawing
    instruction.pos = TEXT_POS
    instruction.text = text
    instruction.draw()
    rating_screen = visual.BufferImageStim(win)
    rating_screen.draw()  # First draw is slower
    win.clearBuffer()  # Preparation finished. Delete what's been drawn so far
    win.callOnFlip(clock.reset)  # Set time=0 on first draw hereafter

    # REGISTER RESPONSES WHEN AUDIO IS INVOLVED
    # Mouse clicks on trials with an auditory component
    if any_auditory:
        # Continue until all shapes are pressed
        figure_untouched = [[True, True], [True,
                                           True]]  # not_clicked[group][row]
        awaiting_beeps = True
        awaiting_keyboard = True
        while awaiting_beeps or awaiting_keyboard:
            # Show it. Only show VAS when all sounds have been played
            rating_screen.draw()
            if not awaiting_beeps:
                VAS.draw()
                press = event.getKeys(keyList=['escape'])
                if press:
                    core.quit()

                # Continue until there is a VAS response
                if not VAS.noResponse:
                    awaiting_keyboard = False  # Do not listen for responses anymore
            win.flip()

            # Virtually loop through figures and listen for mouse presses on them
            for group in groups:
                for example in examples:
                    figure_fill.pos = RATING_POSITIONS[group][example]

                    # Play sound and register
                    if mouse.isPressedIn(figure_fill):
                        # Play sound
                        trial[feature1] = features[feature1][order[group][
                            example][0]]  # Some heavy indexing here!
                        trial[feature2] = features[feature2][order[group]
                                                             [example][1]]
                        beep = prepare_beep(trial)
                        beep.play()

                        # Register click
                        figure_untouched[group][example] = False

                        # Wait for mouse release
                        while mouse.isPressedIn(figure_fill):
                            pass

            # Status: how many touched now?
            awaiting_beeps = any(figure_untouched[0]) or any(
                figure_untouched[1])

            # Simulate responses prematurely
            if DIALOGUE['simulation'] == 'yes':
                figure_untouched = [[False, False], [False, False]]
                VAS.noResponse = False
                VAS.FINISHED = True
                VAS.decisionTime = random.random()

    # REGISTER RESPONSES FOR PURELY VISUAL CMCS
    else:
        # Simulate responses prematurely
        if DIALOGUE['simulation'] == 'yes':
            clock.reset(newT=RATING_WAIT + 1)
            VAS.noResponse = False
            VAS.FINISHED = True
            VAS.decisionTime = random.random()

        # Get response: new example or rate it?
        while VAS.noResponse:
            rating_screen.draw()
            if clock.getTime() > RATING_WAIT:
                VAS.draw()
            win.flip()

    # SCORE TRIAL
    trial['key'] = VAS.getRating()
    trial['rt'] = VAS.getRT()
    VAS.reset()  # Ready for next rating

    # Trial info since we messed with these since make_block
    trial['left'] = features[feature1][
        order[0][0][0]] + ':' + features[feature2][order[0][0][1]]
    trial['right'] = features[feature1][
        order[1][0][0]] + ':' + features[feature2][order[1][0][1]]
    trial['CMC_code'] = '%s:%s' % (trial[feature1], trial[feature2]
                                   )  # Update code to match
    writer.write(trial)
    writer.flush()

    # Optionally quit
    if event.getKeys(QUIT_KEYS):
        check_quit(
            QUIT_KEYS[0]
        )  # Convoluted since we know that any response is a quit response; but consistently runs the same code on quit
Example #23
0
    def setup_visuals(self):
        right_hand = visual.ImageStim(self.win,
                                      image='media/hand.png',
                                      size=(0.4, 0.4),
                                      pos=(0.3, 0),
                                      ori=-90)
        left_hand = visual.ImageStim(self.win,
                                     image='media/hand.png',
                                     size=(0.4, 0.4),
                                     pos=(-0.3, 0),
                                     ori=90,
                                     flipHoriz=True)
        self.background = visual.BufferImageStim(self.win,
                                                 stim=[left_hand, right_hand])
        # self.background.autoDraw = True
        # thumb, index, middle, ring, pinky
        pos_r = [[0.3075, -0.1525], [0.1775, -0.06125], [0.14375, 0.02375],
                 [0.1775, 0.0925], [0.2475, 0.1525]]

        pos_l = [[-x[0], x[1]] for x in pos_r]
        pos_l.reverse()
        pos_l.extend(pos_r)

        self.targets = [
            visual.Circle(self.win,
                          fillColor=(0.3, -0.2, -0.2),
                          pos=x,
                          size=0.05,
                          opacity=1.0) for x in pos_l
        ]

        # push feedback
        self.push_feedback = visual.Circle(self.win,
                                           size=0.1,
                                           fillColor=[-1, -1, -1],
                                           pos=(0, 0),
                                           autoDraw=False,
                                           autoLog=False,
                                           name='push_feedback')
        # fixation
        self.fixation = visual.Circle(self.win,
                                      size=0.05,
                                      fillColor=[1, 1, 1],
                                      pos=(0, 0),
                                      autoDraw=False,
                                      name='fixation')

        # text
        self.wait_text = visual.TextStim(self.win,
                                         text='Press a key to start.',
                                         pos=(0, 0),
                                         units='norm',
                                         color=(1, 1, 1),
                                         height=0.2,
                                         alignHoriz='center',
                                         alignVert='center',
                                         name='wait_text',
                                         autoLog=False,
                                         wrapWidth=2)
        self.wait_text.autoDraw = True
        self.good = visual.TextStim(self.win,
                                    text=u'Good timing!',
                                    pos=(0, 0.4),
                                    units='norm',
                                    color=(-1, 1, 0.2),
                                    height=0.1,
                                    alignHoriz='center',
                                    alignVert='center',
                                    autoLog=True,
                                    name='good_text')
        self.too_slow = visual.TextStim(self.win,
                                        text=u'Too slow.',
                                        pos=(0, 0.4),
                                        units='norm',
                                        color=(1, -1, -1),
                                        height=0.1,
                                        alignHoriz='center',
                                        alignVert='center',
                                        autoLog=True,
                                        name='slow_text')
        self.too_fast = visual.TextStim(self.win,
                                        text=u'Too fast.',
                                        pos=(0, 0.4),
                                        units='norm',
                                        color=(1, -1, -1),
                                        height=0.1,
                                        alignHoriz='center',
                                        alignVert='center',
                                        autoLog=True,
                                        name='fast_text')
                           units='pix',
                           bold=True,
                           height=text_size,
                           text='$')  # '$' will be changed
cap_rect_norm = [
    -(text_size / 2.0) / (task.win.size[0] / 2.0),  # left
    +(text_size / 2.0) / (task.win.size[1] / 2.0),  # top
    +(text_size / 2.0) / (task.win.size[0] / 2.0),  # right
    -(text_size / 2.0) / (task.win.size[1] / 2.0)  # bottom
]
ringimg = {}
for k in ['rew', 'neu']:
    ringtext.text = cues[k]['sym']
    ringtext.color = cues[k]['color']
    buff = visual.BufferImageStim(win=task.win,
                                  stim=[ringtext],
                                  rect=cap_rect_norm)
    # img = (np.flipud(np.array(buff.image)[..., 0]) / 255.0 * 2.0 - 1.0)
    ringimg[k] = visual.ElementArrayStim(
        win=task.win,
        units="pix",
        nElements=n_in_ring,
        sizes=buff.image.size,
        xys=el_xys,
        # colors=cues[k]['color'],
        elementMask=None,
        elementTex=buff.image)

# -----  INSTRUCTIONS ------
if settings['instructions']:
    cuesym = cues['rew']['sym']
Example #25
0
def doRSVPStim(trial):
    '''
    ONLY DOES ONE CUE AT THE MOMENT

    This function generates the stimuli for each trial. The word "frame" here refers to a set of simultaneous RSVP stimuli. I'll use "refresh" to refer to monitor refreshes.
    Using the parameters for the current trial:
        - Work out the temporal position of the cue(s)
        - Shuffle the order of the letters
        - Calculate the position and cortically-magnified size for each stream
        - Capture each stream's pixels on each frame using bufferImageStim
        - Collate each frame's pixels so that all stimuli are represented by the same matrix of pixels. Put the cue in there if it's the cued frame
        - pass the matrix of pixels to elementarraystim
    '''

    global cue

    nStreams = trial['nStreams']
    numTargets = trial['numToCue']

    cuedFrame = trial['cue0temporalPos']
    cuedStream = np.random.choice(np.arange(nStreams), 1)

    print('cueFrame = ' + str(cuedFrame))

    streamPositions = list(
    )  #Might need to pass this to elementArrayStim as xys. Might not though

    streamLetterIdxs = np.empty(  #use an array so I can select columns (frames) for buffering. This is an empty array that will eventually have the
        #letter indexes for each stream. Selecting a column gives us all streams at a particular frame. Selecting a row gives us
        #all frames for a particular stream
        shape=(nStreams, numLettersToPresent),
        dtype=int)

    streamLetterIdentities = np.empty(  #Letter identities for these streams
        shape=(nStreams, numLettersToPresent),
        dtype=str)

    for thisStream in xrange(nStreams):
        thisSequence = np.arange(24)
        np.random.shuffle(thisSequence)
        theseIdentities = [potentialLetters[idx] for idx in thisSequence]
        streamLetterIdxs[thisStream, :] = thisSequence
        streamLetterIdentities[thisStream, :] = theseIdentities
        #print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

    correctIdx = streamLetterIdxs[cuedStream, cuedFrame]

    correctLetter = alphabetHelpers.numberToLetter(
        correctIdx, potentialLetters)  #potentialLetters is global

    frameStimuli = list(
    )  #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

    for thisFrame in xrange(numLettersToPresent):
        theseStimuli = streamLetterIdxs[:,
                                        thisFrame]  #The stimuli to be shown on this frame

        thisFramePixels = np.zeros(shape=(screenValues['heightPix'],
                                          screenValues['widthPix']))

        stimuliToDraw = list()  #Can pass a list to bufferimageStim!

        for thisStream in xrange(nStreams):
            cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame  #If true, draw the cue and capture that too

            thisLetterIdx = theseStimuli[
                thisStream]  #The letter index for this particular stream on this particular frame

            thisLtr = alphabetHelpers.numberToLetter(thisLetterIdx,
                                                     potentialLetters)

            thisStreamStimulus = streamTextObjects[
                thisStream]  #The text object for this stream
            thisStreamStimulus.text = thisLtr

            thisPos = calcStreamPos(trial=trial,
                                    cueOffsets=cueOffsets,
                                    streami=thisStream,
                                    streamOrNoise=False)

            thisStreamStimulus.pos = thisPos

            stimuliToDraw.append(thisStreamStimulus)

            if cueThisFrame and cueType == 'exogenousRing':
                cue.setPos(thisPos)
                cue = corticalMagnification.corticalMagnification(
                    cue, 0.9810000000000002, cue=True
                )  #this is the cuesize from the original experiment
                stimuliToDraw.append(cue)

        buff = visual.BufferImageStim(  #Buffer these stimuli
            win=myWin, stim=stimuliToDraw)

        buff = np.flipud(
            np.array(buff.image)[..., 0]
        ) / 255.0 * 2.0 - 1.0  #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually

        new_size = max(  #need to pad out the texture to a power of two to use it in elementArrayStim
            [
                int(np.power(2, np.ceil(np.log(dim_size) / np.log(2))))
                for dim_size in buff.shape
            ])

        pad_amounts = []

        for i_dim in range(2):

            first_offset = int((new_size - buff.shape[i_dim]) / 2.0)
            second_offset = new_size - buff.shape[i_dim] - first_offset

            pad_amounts.append([first_offset, second_offset])

        buff = np.pad(array=buff,
                      pad_width=pad_amounts,
                      mode="constant",
                      constant_values=0.0)

        thisFrameStimuli = visual.ElementArrayStim(  #A stimulus representing this frame
            win=myWin,
            units='pix',
            nElements=1,
            xys=[[0, 0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask='none')

        frameStimuli.append(thisFrameStimuli)

    ts = []
    myWin.flip()
    myWin.flip(
    )  #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
    t0 = trialClock.getTime()
    for n in xrange(trialDurFrames):
        oneFrameOfStim(n, frameStimuli)
        myWin.flip()
        ts.append(trialClock.getTime() - t0)

    return streamLetterIdxs, streamLetterIdentities, correctLetter, ts
Example #26
0
imageList = ['face.jpg', 'beach.jpg']
imageStim = visual.SimpleImageStim(win, imageList[0])
imageStim2 = visual.SimpleImageStim(win, imageList[1], pos=(.300, .20))
wordStim = visual.TextStim(
    win,
    text=
    'Press < escape > to quit.\n\nThere should be no change after 3 seconds.\n\n'
    + 'This is a text stim that is kinda verbose and long, so if it ' +
    'were actually really long it would take a while to render completely.',
    pos=(0, -.2))
stimlist = [imageStim, imageStim2, wordStim]

# Get and save a "screen shot" of everything in stimlist:
rect = (-1, 1, 1, -1)
t0 = clock.getTime()
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1, 1, 1, -1) is whole-screen
# as a list of the edges: Left Top Right Bottom, in norm units.

captureTime = clock.getTime() - t0

instr_buffer = visual.TextStim(win, text='BufferImageStim', pos=(0, .8))
drawTimeBuffer = []  # accumulate draw times of the screenshot
for frameCounter in range(200):
    t0 = clock.getTime()
    screenshot.draw()  # draw the BufferImageStim, fast
    drawTimeBuffer.append(clock.getTime() - t0)
    instr_buffer.draw()
    win.flip()
    if len(event.getKeys(['escape'])):
        core.quit()
Example #27
0
def runCSRecallGrid(trial):
    """Recall faces"""
    # PREPERATION
    # Create a list "itemOptions of items to show as response options. All of targets + some fill-ins
    if trial['stimType'] == 'faces': allItems = faceFiles
    elif trial['stimType'] == 'lettersGrid': allItems = string.ascii_uppercase
    itemFillin = list(set(allItems) - set(
        trial['encode']))  # allowable fill-in response options (non-targets)
    itemFillin = random.sample(itemFillin,
                               len(gridCoords) -
                               trial['span'])  # selected fill-in items
    itemOptions = trial['encode'] + itemFillin  # items for this answer
    if trial['stimType'] == 'faces':
        itemOptions = random.sample(itemOptions,
                                    len(itemOptions))  # randomize order
    if trial['stimType'] == 'lettersGrid':
        itemOptions.sort()  # sort alphabetically

    # Create a BufferImageStim "grid" with an array of target and fill-in faces or letters
    for i, item in enumerate(itemOptions):
        if trial['stimType'] == 'faces':
            recallOption.pos = gridCoords[i]  # set position
            recallOption.image = item
            recallOption.draw()
        elif trial['stimType'] == 'lettersGrid':
            letterStim.pos = gridCoords[i]
            letterStim.text = item
            letterStim.draw()

    textRecall.text = 'Select ' + str(
        trial['span']
    ) + ' items in their correct position in the presented sequence.\nPress SPACE to select and deselect. Press RETURN to submit answer.'
    textRecall.draw()
    grid = visual.BufferImageStim(win)  # take screenshot of back buffer
    win.clearBuffer()  # clear back buffer

    # ACTUAL RECALL
    win.callOnFlip(clock.reset)  # reset clock on next flip
    selectedCurrent = len(
        itemOptions
    ) / 2  # placeholder for current selected face. Starts in center
    selected = []  # placeholder for current answer
    while True:
        # Draw images
        grid.draw()
        grid.draw(
        )  # because it sometimes doesn't work on first draw on bad graphics hardware!

        # Overlay current selection state
        for i, thisSelection in enumerate(selected):
            # Color overlay
            recallSelectedOverlay.pos = gridCoords[thisSelection]
            recallSelectedOverlay.draw()

            # Sequence number
            numCoord = gridCoords[thisSelection] + [
                1, -1
            ] * recallOption.size * 0.3  # location off-center
            recallSelectedNumber.pos = numCoord
            recallSelectedNumber.text = i + 1  # show 1, 2, 3, etc. and not 0, 1, 2
            recallSelectedNumber.draw()

        # Border around current selected image
        recallSelectedCurrent.pos = gridCoords[selectedCurrent]
        recallSelectedCurrent.draw()

        # Show everything!
        win.flip()

        # Wait for response. On every keypress, update the textfield until keysAns is pressed
        response = event.waitKeys(keyList=keysRecallFaces.keys() + keysSelect +
                                  keysAns + keysQuit + keysQuestionnaire)
        eventOnKey(response[0])  # exit or go to questions
        # Move current location
        if response[0] in keysRecallFaces.keys():
            if 0 <= selectedCurrent + keysRecallFaces[response[0]] < len(
                    itemOptions):
                selectedCurrent += keysRecallFaces[response[0]]
        # Add or remove an element to the current selection
        elif response[0] in keysSelect:
            if selectedCurrent in selected:
                selected.remove(selectedCurrent)
            elif len(selected
                     ) < trial['span']:  # only add if series is not filled
                selected.append(selectedCurrent)
        # Answer submitted. Record and score response
        elif response[0] in keysAns and len(selected) == trial['span']:
            trial['recallRT'] = clock.getTime()
            trial['recallAns'] = [
                itemOptions[index] for index in selected
            ]  # convert indicies to image filenames for the datafile

            # Score trial and break out of while loop (finished OSPAN)
            trial = scoreCS(trial)
            break

    return trial
def doRSVPStim(trial):
    '''
    ONLY DOES ONE CUE AT THE MOMENT

    This function generates the stimuli for each trial. The word "frame" here refers to a set of simultaneous RSVP stimuli. I'll use "refresh" to refer to monitor refreshes.
    Using the parameters for the current trial:
        - Work out the temporal position of the cue(s)
        - Shuffle the order of the letters
        - Calculate the position and cortically-magnified size for each stream
        - Capture each stream's pixels on each frame using bufferImageStim
        - Collate each frame's pixels so that all stimuli are represented by the same matrix of pixels. Put the cue in there if it's the cued frame
        - pass the matrix of pixels to elementarraystim
    '''

    global cue
    global tracker

    nStreams = trial['nStreams']
    numTargets = trial['numToCue']

    cuedFrame = trial['cue0temporalPos']
    cuedStream = 0  #np.random.choice(np.arange(nStreams), 1)

    maxCueCount = np.ceil(float(trials.nTotal) / float(nStreams) / 2.)

    indexOfThisNStreams = nStreamsPossibilities.index(nStreams)

    theseCueCounts = countsList[
        indexOfThisNStreams]  #Get the counts of the number of times each position has been cued
    theseCueCountsNotComplete = [
        index for index, value in enumerate(theseCueCounts)
        if value < maxCueCount
    ]  #Find those counts less than the max possible
    print('theseCueCountsNotComplete')
    print(theseCueCountsNotComplete)
    cuedStream = np.random.choice(theseCueCountsNotComplete,
                                  1)[0]  #sample a position, cue it
    countsList[indexOfThisNStreams][cuedStream] += 1  #Increment the count

    print('cuedStream is ' + str(cuedStream))

    print(theseCueCounts)

    print('cueFrame = ' + str(cuedFrame))

    streamPositions = list(
    )  #Might need to pass this to elementArrayStim as xys. Might not though

    streamLetterIdxs = np.empty(  #use an array so I can select columns (frames) for buffering. This is an empty array that will eventually have the
        #letter indexes for each stream. Selecting a column gives us all streams at a particular frame. Selecting a row gives us
        #all frames for a particular stream
        shape=(nStreams, numLettersToPresent),
        dtype=int)

    streamLetterIdentities = np.empty(  #Letter identities for these streams
        shape=(nStreams, numLettersToPresent),
        dtype=str)

    for thisStream in xrange(nStreams):
        thisSequence = np.arange(24)
        np.random.shuffle(thisSequence)
        theseIdentities = [potentialLetters[idx] for idx in thisSequence]
        streamLetterIdxs[thisStream, :] = thisSequence
        streamLetterIdentities[thisStream, :] = theseIdentities
        #print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

    print(streamLetterIdentities)

    correctIdx = streamLetterIdxs[cuedStream, cuedFrame]
    print('correctIdx')
    print(correctIdx)
    correctLetter = alphabetHelpers.numberToLetter(
        correctIdx, potentialLetters)  #potentialLetters is global

    frameStimuli = list(
    )  #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

    for thisFrame in xrange(numLettersToPresent):
        theseStimuli = streamLetterIdxs[:,
                                        thisFrame]  #The alphabetical indexes of stimuli to be shown on this frame

        ### IN DO RSVP STIM ###
        stimuliToDraw = list()  #Can pass a list to bufferimageStim!
        stimuliToDraw.append(fixatn)
        stimuliToDrawCounterPhase = list()
        stimuliToDrawCounterPhase.append(fixatnCounterphase)

        for thisStream in xrange(nStreams):

            thisPos = calcStreamPos(trial=trial,
                                    cueOffsets=cueOffsets,
                                    streami=thisStream,
                                    streamOrNoise=False)

            cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame  #If true, draw the cue and capture that too

            thisLetterIdx = theseStimuli[
                thisStream]  #The letter index for this particular stream on this particular frame

            if nStreams == 2 and max(nStreamsPossibilities) > 2:
                #print('Stream was' + str(thisStream) +', but is now' + str(trial['ring']*streamsPerRing+thisStream))
                thisStreamStimulus = streamTextObjects[trial['ring'] *
                                                       streamsPerRing +
                                                       thisStream,
                                                       thisLetterIdx]
            else:
                thisStreamStimulus = streamTextObjects[thisStream,
                                                       thisLetterIdx]

            thisStreamStimulus.pos = thisPos
            #print('For stream %(thisStream)d the height is: %(letterHeight)s' % {'thisStream':thisStream, 'letterHeight':thisStreamStimulus.height})

            stimuliToDraw.append(thisStreamStimulus)
            stimuliToDrawCounterPhase.append(thisStreamStimulus)

            if cueThisFrame and cueType == 'exogenousRing':
                cue.setPos(thisPos)
                cue = corticalMagnification.corticalMagnification(
                    cue, 0.9810000000000002, cue=True
                )  #this is the cuesize from the original experiment
                stimuliToDraw.append(cue)
                stimuliToDrawCounterPhase.append(cue)

        buff = visual.BufferImageStim(  #Buffer these stimuli
            win=myWin, stim=stimuliToDraw)

        buff = np.flipud(
            np.array(buff.image)[..., 0]
        ) / 255.0 * 2.0 - 1.0  #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually

        buff = np.pad(
            array=buff,
            pad_width=pad_amounts,  #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0)

        thisFrameStimuli = visual.ElementArrayStim(  #A stimulus representing this frame with the fixation at full luminance
            win=myWin,
            units='pix',
            nElements=1,
            xys=[[0, 0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask='none')

        buff = visual.BufferImageStim(  #Buffer these stimuli
            win=myWin, stim=stimuliToDrawCounterPhase)

        buff = np.flipud(
            np.array(buff.image)[..., 0]
        ) / 255.0 * 2.0 - 1.0  #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually

        buff = np.pad(
            array=buff,
            pad_width=pad_amounts,  #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0)

        thisFrameStimuliCounterPhase = visual.ElementArrayStim(  #A stimulus representing this frame with the fixation phase reversed
            win=myWin,
            units='pix',
            nElements=1,
            xys=[[0, 0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask='none')

        frameStimuli.append([thisFrameStimuli, thisFrameStimuliCounterPhase])

    ts = []

    waiting = True
    myMouse.setVisible(waiting)

    while waiting:
        startTrialStimuli.draw()
        startTrialBox.draw()
        myWin.flip()
        if myMouse.isPressedIn(startTrialBox):
            waiting = False

    myMouse.setVisible(waiting)

    if eyetracking:
        tracker.startEyeTracking(nDone, True, widthPix,
                                 heightPix)  #start recording with eyetracker

    ts = []
    myWin.flip()
    myWin.flip(
    )  #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
    fixatn.draw()
    myWin.flip()
    core.wait(1)
    t0 = trialClock.getTime()
    for n in xrange(trialDurFrames):
        oneFrameOfStim(n, frameStimuli)
        myWin.flip()
        ts.append(trialClock.getTime() - t0)

    if eyetracking:
        tracker.stopEyeTracking()
        print('stopped tracking')

    return streamLetterIdxs, streamLetterIdentities, correctLetter, ts, cuedStream, cuedFrame
Example #29
0
def loadvisuals(win,
                cwd,
                masksize,
                stimsize,
                crosssize,
                factor,
                xcoo,
                ycoo,
                xyconfigs,
                trial,
                pos=4):
    #Text
    instructions = visual.TextStim(win=win,
                                   units='norm',
                                   height=0.1,
                                   wrapWidth=1.7,
                                   alignHoriz='center',
                                   text="Press space to begin")
    feedback = visual.TextStim(win=win,
                               units='norm',
                               alignHoriz='center',
                               height=0.1,
                               text="End of block ")
    infoscr = visual.TextStim(
        win=win,
        units='norm',
        height=0.1,
        wrapWidth=1.7,
        alignHoriz='center',
        text="[C]alibration\n[B]uttons\n[T]rial number\n[S]tart " +
        trial[1:].lower())

    # fixation cross
    fixation = visual.ShapeStim(win,
                                vertices=((0, -0.5 * crosssize),
                                          (0, 0.5 * crosssize), (0, 0),
                                          (-0.5 * crosssize,
                                           0), (0.5 * crosssize, 0)),
                                lineWidth=round(crosssize / 8),
                                closeShape=False,
                                lineColor="red")

    # fixation cross
    fixation2 = visual.ShapeStim(win,
                                 vertices=((0, -0.5 * crosssize),
                                           (0, 0.5 * crosssize), (0, 0),
                                           (-0.5 * crosssize,
                                            0), (0.5 * crosssize, 0)),
                                 lineWidth=round(crosssize / 8),
                                 closeShape=False,
                                 lineColor="white")
    ###############################################################################
    # prepare cues
    r = crosssize / 2.5
    cuecolors = [[1, -1, -1], [-1, -0.06, 1],
                 [-1, -1, -1]]  # test for equal luminance Blue [-1,-1,1] 6
    colorsq = [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0],
               [0, 1, 1, 0], [1, 0, 0, 1], [0, 0, 0, 0]]

    ss = 10
    xycoo0 = [[xcoo[0] / ss, ycoo[0] / ss], [xcoo[1] / ss, ycoo[1] / ss],
              [xcoo[2] / ss, ycoo[1] / ss], [xcoo[3] / ss, ycoo[0] / ss]]
    xycoo1 = [[xcoo[0] / ss, ycoo[3] / ss], [xcoo[1] / ss, ycoo[2] / ss],
              [xcoo[2] / ss, ycoo[2] / ss], [xcoo[3] / ss, ycoo[3] / ss]]

    clb0, clb1 = [[visual.BufferImageStim(win)]
                  ] * 7, [[visual.BufferImageStim(win)]] * 7

    for c in range(len(clb0)):
        cuelist0 = [visual.Circle(win=win)] * 4 + [fixation]
        for d in range(4):
            cuelist0[d] = visual.Circle(win=win,
                                        units='pix',
                                        radius=r,
                                        fillColor=cuecolors[colorsq[c][d]],
                                        lineColor=cuecolors[colorsq[c][d]],
                                        pos=(xycoo0[d]))
        clb0[c] = visual.BufferImageStim(win, stim=cuelist0)

    for c in range(len(clb0)):
        cuelist1 = [visual.Circle(win=win)] * 4 + [fixation]
        for d in range(4):
            cuelist1[d] = visual.Circle(win=win,
                                        units='pix',
                                        radius=r,
                                        fillColor=cuecolors[colorsq[c][d]],
                                        lineColor=cuecolors[colorsq[c][d]],
                                        pos=(xycoo1[d]))
        clb1[c] = visual.BufferImageStim(win, stim=cuelist1)

    cue_list = [clb0, clb1]

    ###############################################################################
    # load masks, stimuli, probe
    mask_imgs = []
    for i in os.listdir(cwd + "/masks"):
        if i.startswith("Mask") and i.endswith(".bmp"):
            mask_imgs.append(cwd + "/masks/" + i)

    # create visuals
    mask_display, stim_display = [], []
    for i in range(pos):
        m = visual.ImageStim(win=win, units='pix', size=masksize * factor[i])
        s = visual.TextStim(win=win,
                            units='pix',
                            text='',
                            color='red',
                            height=stimsize * 1.316 * factor[i],
                            font='Arial',
                            bold=False)
        mask_display.append(m)
        stim_display.append(s)
    stim_display.append(fixation)
    mask_display.append(fixation)

    maskcollection = [[], []]
    for c in range(2):
        for m in range(12):
            shuffle(mask_imgs)
            [mask_display[i].setImage(mask_imgs[i]) for i in range(len(xcoo))]
            [
                mask_display[i].setPos((xcoo[i], ycoo[xyconfigs[c][i]]))
                for i in range(len(xcoo))
            ]
            maskcollection[c].append(
                visual.BufferImageStim(win, stim=mask_display))

    probe_display = visual.TextStim(win=win,
                                    units='pix',
                                    color='red',
                                    height=stimsize * 1.316 * factor[-1],
                                    pos=(0, 0),
                                    font='Arial',
                                    bold=False)

    #    return instructions,feedback,infoscr,fixation,fixation2,cue_list,maskcollection,stim_display,probe_display
    return instructions, feedback, infoscr, fixation, fixation2, cue_list, mask_imgs, mask_display, stim_display, probe_display
Example #30
0
w = visual.Window(size=[800, 500], color=[-1, -1, -1], units='norm')
visual.TextStim(w, text='Waiting for a client').draw()
w.flip()  # show init screen

imgs = []  # list of 6 images
for i in range(len(f)):
    (x, y) = (-.75 + i * .3, -.2)
    imgs.append(visual.ImageStim(w, f[i], size=.3, pos=(x, y)))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost', 1234))
s.listen(1)  # allow 1 client
(c, adr) = s.accept()

for t in range(nTrials):
    visual.BufferImageStim(w, stim=imgs).draw()
    visual.TextStim(w, text=instruct).draw()
    w.flip()  # show choices
    key = event.waitKeys(keyList=map(str, range(1, 7)))  # allow 1-6
    w.flip()  # clear screen
    idx = int(key[0]) - 1  # choice as the array index
    with open(f[idx], 'rb') as file:
        data = file.read()  # read in pic file
    c.sendall(
        (corAns[idx] + str(len(data))).encode(code))  # inform ans + pic size
    c.sendall(data)  # send pic file to the client
    visual.TextStim(w, text='Waiting for the client').draw()
    w.flip()  # show waiting
    print(c.recv(1).decode(code))  # waiting for a response from the client

s.close()