def doRSVPStim(trial):
    '''
    ONLY DOES ONE CUE AT THE MOMENT

    This function generates the stimuli for each trial. The word "frame" here refers to a set of simultaneous RSVP stimuli. I'll use "refresh" to refer to monitor refreshes.
    Using the parameters for the current trial:
        - Work out the temporal position of the cue(s)
        - Shuffle the order of the letters
        - Calculate the position and cortically-magnified size for each stream
        - draw and buffer preCues
        - Capture each stream's pixels on each frame using bufferImageStim
        - Collate each frame's pixels so that all stimuli are represented by the same matrix of pixels. Put the cue in there if it's the cued frame
        - pass the matrix of pixels to elementarraystim
    '''
    
    global cue
    
    nStreams = trial['nStreams']
    numTargets = trial['numToCue']  
    
    cuedFrame = trial['cue0temporalPos']
    cuedStream = np.random.choice(np.arange(nStreams), 1)

    cue.pos = calcStreamPos(
                trial = trial, 
                cueOffsets = cueOffsets, 
                streami = cuedStream, 
                streamOrNoise = False
                )
    cue = corticalMagnification.corticalMagnification(cue, 0.9810000000000002, cue = True) #this is the cuesize from the original experiment

    preCues = list()
    preCues.append(cue)

    if trial['cueSpatialPossibilities'] == 2:
        preCue = visual.Circle(myWin, 
                     radius=cueRadius,#Martini used circles with diameter of 12 deg
                     lineColorSpace = 'rgb',
                     lineColor=letterColor,
                     lineWidth=2.0, #in pixels
                     units = 'deg',
                     fillColorSpace = 'rgb',
                     fillColor=None, #beware, with convex shapes fill colors don't work
                     pos= [-5,-5], #the anchor (rotation and vertices are position with respect to this)
                     interpolate=True,
                     autoLog=False)#this stim changes too much for autologging to be useful
       
        preCue.pos = calcStreamPos(
            trial = trial,
            cueOffsets = cueOffsets,
            streami = cuedStream-4,
            streamOrNoise = False
        )
        
        preCue = corticalMagnification.corticalMagnification(preCue, 0.9810000000000002, cue = True)
        
        preCues.append(preCue)

    elif trial['cueSpatialPossibilities'] == 8:
        for i in range(1,8):
            preCue = visual.Circle(myWin, 
                     radius=cueRadius,#Martini used circles with diameter of 12 deg
                     lineColorSpace = 'rgb',
                     lineColor=letterColor,
                     lineWidth=2.0, #in pixels
                     units = 'deg',
                     fillColorSpace = 'rgb',
                     fillColor=None, #beware, with convex shapes fill colors don't work
                     pos= [-5,-5], #the anchor (rotation and vertices are position with respect to this)
                     interpolate=True,
                     autoLog=False)#this stim changes too much for autologging to be useful
        
            
            preCue.pos = (calcStreamPos(
                trial = trial,
                cueOffsets = cueOffsets,
                streami = cuedStream-i,
                streamOrNoise = False
            ))
            
            preCue = corticalMagnification.corticalMagnification(preCue, 0.9810000000000002, cue = True)
            preCues.append(preCue)            

    print('cueFrame = ' + str(cuedFrame))

    preCueStim = preTrial[int(baseAngleCWfromEast/anglesMustBeMultipleOf)] + preCues + [fixatnPoint]

    preCueFrame = visual.BufferImageStim(
                    win = myWin,
                    stim = preCueStim)
    
    preCueFrame = np.flipud(np.array(preCueFrame.image)[..., 0]) / 255.0 * 2.0 - 1.0 #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually
        
    preCueFrame = np.pad(
            array=preCueFrame,
            pad_width=pad_amounts, #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0
        )

    preCueFrame =visual.ElementArrayStim(
            win = myWin,
            units = 'pix',
            nElements=1,
            xys = [[0,0]],
            sizes=preCueFrame.shape,
            elementTex=preCueFrame,
            elementMask = 'none'
        )


    streamPositions = list() #Might need to pass this to elementArrayStim as xys. Might not though


    streamLetterIdxs = np.empty( #use an array so I can select columns (frames) for buffering. This is an empty array that will eventually have the
                                 #letter indexes for each stream. Selecting a column gives us all streams at a particular frame. Selecting a row gives us
                                 #all frames for a particular stream
        shape = (nStreams,numLettersToPresent),
        dtype = int
        )

    streamLetterIdentities = np.empty( #Letter identities for these streams
        shape = (nStreams,numLettersToPresent),
        dtype = str
        )

    for thisStream in xrange(nStreams):
        thisSequence = np.arange(24)
        np.random.shuffle(thisSequence)
        theseIdentities = [potentialLetters[idx] for idx in thisSequence]
        streamLetterIdxs[thisStream,:] = thisSequence
        streamLetterIdentities[thisStream,:] = theseIdentities
        #print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

    correctIdx = streamLetterIdxs[cuedStream,cuedFrame] 
    print('correctIdx')
    print(correctIdx)
    correctLetter = alphabetHelpers.numberToLetter(correctIdx, potentialLetters) #potentialLetters is global

    frameStimuli = list() #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

    for thisFrame in xrange(numLettersToPresent):
        theseStimuli = streamLetterIdxs[:,thisFrame] #The alphabetical indexes of stimuli to be shown on this frame
        
        stimuliToDraw = list() #Can pass a list to bufferimageStim!
        stimuliToDraw.append(fixatn)
        stimuliToDrawCounterPhase = list()
        stimuliToDrawCounterPhase.append(fixatnCounterphase)

        for thisStream in xrange(nStreams):
            cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame #If true, draw the cue and capture that too

            thisLetterIdx = theseStimuli[thisStream] #The letter index for this particular stream on this particular frame
            
            thisStreamStimulus = streamTextObjects[thisStream,thisLetterIdx] #The text object for this stream
            
            thisPos = calcStreamPos(
                trial = trial, 
                cueOffsets = cueOffsets, 
                streami = thisStream, 
                streamOrNoise = False
                )

            thisStreamStimulus.pos = thisPos

            stimuliToDraw.append(thisStreamStimulus)
            stimuliToDrawCounterPhase.append(thisStreamStimulus)

            if cueThisFrame and cueType == 'exogenousRing':
                stimuliToDraw.append(cue)
                stimuliToDrawCounterPhase.append(cue)
        
        buff = visual.BufferImageStim( #Buffer these stimuli
            win = myWin,
            stim = stimuliToDraw
            )
        
        
        buff = np.flipud(np.array(buff.image)[..., 0]) / 255.0 * 2.0 - 1.0 #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually
        
        buff = np.pad(
            array=buff,
            pad_width=pad_amounts, #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0
        )
        
        thisFrameStimuli = visual.ElementArrayStim( #A stimulus representing this frame with the fixation at full luminance
            win = myWin,
            units = 'pix',
            nElements=1,
            xys = [[0,0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask = 'none'
            )
            
        buff = visual.BufferImageStim( #Buffer these stimuli
            win = myWin,
            stim = stimuliToDrawCounterPhase
            )
        
        
        buff = np.flipud(np.array(buff.image)[..., 0]) / 255.0 * 2.0 - 1.0 #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually
        
        buff = np.pad(
            array=buff,
            pad_width=pad_amounts, #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0
        )
        

        thisFrameStimuliCounterPhase = visual.ElementArrayStim( #A stimulus representing this frame with the fixation phase reversed
            win = myWin,
            units = 'pix',
            nElements=1,
            xys = [[0,0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask = 'none'
            )        

        frameStimuli.append([thisFrameStimuli, thisFrameStimuliCounterPhase])

    ts = []
    
    waiting = True
    myMouse.setVisible(waiting)
    
    while waiting:
        startTrialStimuli.draw()
        startTrialBox.draw()
        myWin.flip()
        if myMouse.isPressedIn(startTrialBox):
            waiting = False

    myMouse.setVisible(waiting)

    if eyetracking: 
        tracker.startEyeTracking(nDone,True,widthPix,heightPix) #start recording with eyetracker  

    myWin.flip(); myWin.flip()#Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
    preCueFrame.draw()
    myWin.flip()
    core.wait(.25)
    myWin.flip
    core.wait(.5)
    fixatn.draw()
    myWin.flip()
    core.wait(1)
    


    t0 = trialClock.getTime()
    for n in xrange(trialDurFrames):
        oneFrameOfStim(n, frameStimuli)
        myWin.flip()
        ts.append(trialClock.getTime() - t0)
    
    if eyetracking:
        tracker.stopEyeTracking()
        print('stopped tracking')
    return streamLetterIdxs, streamLetterIdentities, correctLetter, ts, cuedStream, cuedFrame
예제 #2
0
def doRSVPStim(trial):
    '''
    ONLY DOES ONE CUE AT THE MOMENT

    This function generates the stimuli for each trial. The word "frame" here refers to a set of simultaneous RSVP stimuli. I'll use "refresh" to refer to monitor refreshes.
    Using the parameters for the current trial:
        - Work out the temporal position of the cue(s)
        - Shuffle the order of the letters
        - Calculate the position and cortically-magnified size for each stream
        - Capture each stream's pixels on each frame using bufferImageStim
        - Collate each frame's pixels so that all stimuli are represented by the same matrix of pixels. Put the cue in there if it's the cued frame
        - pass the matrix of pixels to elementarraystim
    '''

    global cue

    nStreams = trial['nStreams']
    numTargets = trial['numToCue']

    cuedFrame = trial['cue0temporalPos']
    cuedStream = np.random.choice(np.arange(nStreams), 1)

    print('cueFrame = ' + str(cuedFrame))

    streamPositions = list(
    )  #Might need to pass this to elementArrayStim as xys. Might not though

    streamLetterIdxs = np.empty(  #use an array so I can select columns (frames) for buffering. This is an empty array that will eventually have the
        #letter indexes for each stream. Selecting a column gives us all streams at a particular frame. Selecting a row gives us
        #all frames for a particular stream
        shape=(nStreams, numLettersToPresent),
        dtype=int)

    streamLetterIdentities = np.empty(  #Letter identities for these streams
        shape=(nStreams, numLettersToPresent),
        dtype=str)

    for thisStream in xrange(nStreams):
        thisSequence = np.arange(24)
        np.random.shuffle(thisSequence)
        theseIdentities = [potentialLetters[idx] for idx in thisSequence]
        streamLetterIdxs[thisStream, :] = thisSequence
        streamLetterIdentities[thisStream, :] = theseIdentities
        #print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

    correctIdx = streamLetterIdxs[cuedStream, cuedFrame]

    correctLetter = alphabetHelpers.numberToLetter(
        correctIdx, potentialLetters)  #potentialLetters is global

    frameStimuli = list(
    )  #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

    for thisFrame in xrange(numLettersToPresent):
        theseStimuli = streamLetterIdxs[:,
                                        thisFrame]  #The stimuli to be shown on this frame

        thisFramePixels = np.zeros(shape=(screenValues['heightPix'],
                                          screenValues['widthPix']))

        stimuliToDraw = list()  #Can pass a list to bufferimageStim!

        for thisStream in xrange(nStreams):
            cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame  #If true, draw the cue and capture that too

            thisLetterIdx = theseStimuli[
                thisStream]  #The letter index for this particular stream on this particular frame

            thisLtr = alphabetHelpers.numberToLetter(thisLetterIdx,
                                                     potentialLetters)

            thisStreamStimulus = streamTextObjects[
                thisStream]  #The text object for this stream
            thisStreamStimulus.text = thisLtr

            thisPos = calcStreamPos(trial=trial,
                                    cueOffsets=cueOffsets,
                                    streami=thisStream,
                                    streamOrNoise=False)

            thisStreamStimulus.pos = thisPos

            stimuliToDraw.append(thisStreamStimulus)

            if cueThisFrame and cueType == 'exogenousRing':
                cue.setPos(thisPos)
                cue = corticalMagnification.corticalMagnification(
                    cue, 0.9810000000000002, cue=True
                )  #this is the cuesize from the original experiment
                stimuliToDraw.append(cue)

        buff = visual.BufferImageStim(  #Buffer these stimuli
            win=myWin, stim=stimuliToDraw)

        buff = np.flipud(
            np.array(buff.image)[..., 0]
        ) / 255.0 * 2.0 - 1.0  #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually

        new_size = max(  #need to pad out the texture to a power of two to use it in elementArrayStim
            [
                int(np.power(2, np.ceil(np.log(dim_size) / np.log(2))))
                for dim_size in buff.shape
            ])

        pad_amounts = []

        for i_dim in range(2):

            first_offset = int((new_size - buff.shape[i_dim]) / 2.0)
            second_offset = new_size - buff.shape[i_dim] - first_offset

            pad_amounts.append([first_offset, second_offset])

        buff = np.pad(array=buff,
                      pad_width=pad_amounts,
                      mode="constant",
                      constant_values=0.0)

        thisFrameStimuli = visual.ElementArrayStim(  #A stimulus representing this frame
            win=myWin,
            units='pix',
            nElements=1,
            xys=[[0, 0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask='none')

        frameStimuli.append(thisFrameStimuli)

    ts = []
    myWin.flip()
    myWin.flip(
    )  #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
    t0 = trialClock.getTime()
    for n in xrange(trialDurFrames):
        oneFrameOfStim(n, frameStimuli)
        myWin.flip()
        ts.append(trialClock.getTime() - t0)

    return streamLetterIdxs, streamLetterIdentities, correctLetter, ts
def doRSVPStim(trial):
    '''
    ONLY DOES ONE CUE AT THE MOMENT

    This function generates the stimuli for each trial. The word "frame" here refers to a set of simultaneous RSVP stimuli. I'll use "refresh" to refer to monitor refreshes.
    Using the parameters for the current trial:
        - Work out the temporal position of the cue(s)
        - Shuffle the order of the letters
        - Calculate the position and cortically-magnified size for each stream
        - Capture each stream's pixels on each frame using bufferImageStim
        - Collate each frame's pixels so that all stimuli are represented by the same matrix of pixels. Put the cue in there if it's the cued frame
        - pass the matrix of pixels to elementarraystim
    '''

    global cue
    global tracker

    nStreams = trial['nStreams']
    numTargets = trial['numToCue']

    cuedFrame = trial['cue0temporalPos']
    cuedStream = 0  #np.random.choice(np.arange(nStreams), 1)

    maxCueCount = np.ceil(float(trials.nTotal) / float(nStreams) / 2.)

    indexOfThisNStreams = nStreamsPossibilities.index(nStreams)

    theseCueCounts = countsList[
        indexOfThisNStreams]  #Get the counts of the number of times each position has been cued
    theseCueCountsNotComplete = [
        index for index, value in enumerate(theseCueCounts)
        if value < maxCueCount
    ]  #Find those counts less than the max possible
    print('theseCueCountsNotComplete')
    print(theseCueCountsNotComplete)
    cuedStream = np.random.choice(theseCueCountsNotComplete,
                                  1)[0]  #sample a position, cue it
    countsList[indexOfThisNStreams][cuedStream] += 1  #Increment the count

    print('cuedStream is ' + str(cuedStream))

    print(theseCueCounts)

    print('cueFrame = ' + str(cuedFrame))

    streamPositions = list(
    )  #Might need to pass this to elementArrayStim as xys. Might not though

    streamLetterIdxs = np.empty(  #use an array so I can select columns (frames) for buffering. This is an empty array that will eventually have the
        #letter indexes for each stream. Selecting a column gives us all streams at a particular frame. Selecting a row gives us
        #all frames for a particular stream
        shape=(nStreams, numLettersToPresent),
        dtype=int)

    streamLetterIdentities = np.empty(  #Letter identities for these streams
        shape=(nStreams, numLettersToPresent),
        dtype=str)

    for thisStream in xrange(nStreams):
        thisSequence = np.arange(24)
        np.random.shuffle(thisSequence)
        theseIdentities = [potentialLetters[idx] for idx in thisSequence]
        streamLetterIdxs[thisStream, :] = thisSequence
        streamLetterIdentities[thisStream, :] = theseIdentities
        #print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

    print(streamLetterIdentities)

    correctIdx = streamLetterIdxs[cuedStream, cuedFrame]
    print('correctIdx')
    print(correctIdx)
    correctLetter = alphabetHelpers.numberToLetter(
        correctIdx, potentialLetters)  #potentialLetters is global

    frameStimuli = list(
    )  #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

    for thisFrame in xrange(numLettersToPresent):
        theseStimuli = streamLetterIdxs[:,
                                        thisFrame]  #The alphabetical indexes of stimuli to be shown on this frame

        ### IN DO RSVP STIM ###
        stimuliToDraw = list()  #Can pass a list to bufferimageStim!
        stimuliToDraw.append(fixatn)
        stimuliToDrawCounterPhase = list()
        stimuliToDrawCounterPhase.append(fixatnCounterphase)

        for thisStream in xrange(nStreams):

            thisPos = calcStreamPos(trial=trial,
                                    cueOffsets=cueOffsets,
                                    streami=thisStream,
                                    streamOrNoise=False)

            cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame  #If true, draw the cue and capture that too

            thisLetterIdx = theseStimuli[
                thisStream]  #The letter index for this particular stream on this particular frame

            if nStreams == 2 and max(nStreamsPossibilities) > 2:
                #print('Stream was' + str(thisStream) +', but is now' + str(trial['ring']*streamsPerRing+thisStream))
                thisStreamStimulus = streamTextObjects[trial['ring'] *
                                                       streamsPerRing +
                                                       thisStream,
                                                       thisLetterIdx]
            else:
                thisStreamStimulus = streamTextObjects[thisStream,
                                                       thisLetterIdx]

            thisStreamStimulus.pos = thisPos
            #print('For stream %(thisStream)d the height is: %(letterHeight)s' % {'thisStream':thisStream, 'letterHeight':thisStreamStimulus.height})

            stimuliToDraw.append(thisStreamStimulus)
            stimuliToDrawCounterPhase.append(thisStreamStimulus)

            if cueThisFrame and cueType == 'exogenousRing':
                cue.setPos(thisPos)
                cue = corticalMagnification.corticalMagnification(
                    cue, 0.9810000000000002, cue=True
                )  #this is the cuesize from the original experiment
                stimuliToDraw.append(cue)
                stimuliToDrawCounterPhase.append(cue)

        buff = visual.BufferImageStim(  #Buffer these stimuli
            win=myWin, stim=stimuliToDraw)

        buff = np.flipud(
            np.array(buff.image)[..., 0]
        ) / 255.0 * 2.0 - 1.0  #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually

        buff = np.pad(
            array=buff,
            pad_width=pad_amounts,  #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0)

        thisFrameStimuli = visual.ElementArrayStim(  #A stimulus representing this frame with the fixation at full luminance
            win=myWin,
            units='pix',
            nElements=1,
            xys=[[0, 0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask='none')

        buff = visual.BufferImageStim(  #Buffer these stimuli
            win=myWin, stim=stimuliToDrawCounterPhase)

        buff = np.flipud(
            np.array(buff.image)[..., 0]
        ) / 255.0 * 2.0 - 1.0  #Via djmannion. This converts the pixel values from [0,255] to [-1,1]. I think 0 is middle grey. I'll need to change this to match the background colour eventually

        buff = np.pad(
            array=buff,
            pad_width=pad_amounts,  #See 'Buffered image size dimensions' section
            mode="constant",
            constant_values=0.0)

        thisFrameStimuliCounterPhase = visual.ElementArrayStim(  #A stimulus representing this frame with the fixation phase reversed
            win=myWin,
            units='pix',
            nElements=1,
            xys=[[0, 0]],
            sizes=buff.shape,
            elementTex=buff,
            elementMask='none')

        frameStimuli.append([thisFrameStimuli, thisFrameStimuliCounterPhase])

    ts = []

    waiting = True
    myMouse.setVisible(waiting)

    while waiting:
        startTrialStimuli.draw()
        startTrialBox.draw()
        myWin.flip()
        if myMouse.isPressedIn(startTrialBox):
            waiting = False

    myMouse.setVisible(waiting)

    if eyetracking:
        tracker.startEyeTracking(nDone, True, widthPix,
                                 heightPix)  #start recording with eyetracker

    ts = []
    myWin.flip()
    myWin.flip(
    )  #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
    fixatn.draw()
    myWin.flip()
    core.wait(1)
    t0 = trialClock.getTime()
    for n in xrange(trialDurFrames):
        oneFrameOfStim(n, frameStimuli)
        myWin.flip()
        ts.append(trialClock.getTime() - t0)

    if eyetracking:
        tracker.stopEyeTracking()
        print('stopped tracking')

    return streamLetterIdxs, streamLetterIdentities, correctLetter, ts, cuedStream, cuedFrame
			shape = (nStreams,numLettersToPresent),
			dtype = str
			)

		for thisStream in xrange(nStreams):
			thisSequence = np.arange(24)
			np.random.shuffle(thisSequence)
			theseIdentities = [potentialLetters[idx] for idx in thisSequence]
			streamLetterIdxs[thisStream,:] = thisSequence
			streamLetterIdentities[thisStream,:] = theseIdentities
			#print('For stream %(streamN)d the letters are: %(theseLetters)s' % {'streamN':thisStream, 'theseLetters':''.join(theseIdentities)})

		correctIdx = streamLetterIdxs[cuedStream,cuedFrame] 
		print('correctIdx')
		print(correctIdx)
		correctLetter = alphabetHelpers.numberToLetter(correctIdx, potentialLetters) #potentialLetters is global

		frameStimuli = list() #A list of elementArrayStim objects, each represents a frame. Drawing one of these objects will draw the letters and the cue for that frame

		for thisFrame in xrange(numLettersToPresent):
			theseStimuli = streamLetterIdxs[:,thisFrame] #The alphabetical indexes of stimuli to be shown on this frame
			
			stimuliToDraw = list() #Can pass a list to bufferimageStim!
			stimuliToDraw.append(fixatn)

			for thisStream in xrange(nStreams):
				cueThisFrame = thisStream == cuedStream and thisFrame == cuedFrame #If true, draw the cue and capture that too

				thisLetterIdx = theseStimuli[thisStream] #The letter index for this particular stream on this particular frame
				
				thisStreamStimulus = streamTextObjects[thisStream,thisLetterIdx] #The text object for this stream