Пример #1
0
def processSTA(expName='GNS', spikeFile='GaussianNatScene.spk', dnew={}):
    '''
    Load spikes from spikeFile
    Extract last parameters used from expName in parameters.txt
    Update any parameters according to dnew (in case I want to change something)

    do whatever processing I feel is necessary
    '''
    # load basic dictionary with experimental data and all cells
    print('Loading cells and variables')
    d, cells = divideSpikesPerCondition(*preProcess(expName, spikeFile))
    d.update(dnew)

    # compute PSTHs
    psths = ba.processNested(ba.psth, 0, cells, d['fixationLength'], d['trialsN'], returnFlag=1)
    psthsX = ba.psth(cells[0][0][0], d['fixationLength'], d['trialsN'], returnFlag=2)

    # load Gaussian noise wave to be able to correlate
    print('Loading noise')
    noise = loadNoise(d, (2))

    # create stimulus object
    stim = st.stimulus(noise, d=d)
    
    # upsample stim to about 5ms time bins
    stim = stim.upsamplestim(int(stim.deltaT/.005))

    # compute STA for each condition
    print('Computing stas')
    stas = ba.processNested(ft.getsta, 2, stim.tsa, stim.stim, cells, 100, returnFlag = 1)
    
    # compute one x axis for all stas
    print('Computing stas X axis')
    stasX = ft.getsta(stim.tsa, stim.stim, cells[0][0][0], 100, returnFlag=2)

    # since in GNS filter sign is irrelevant, flip them to have negative peaks (off like filters)
    stas = ba.processNested(flipSTA, 0, stas)

    # since on/off filters are somewhat arbitrary with these stimulus, force them all to be of the same type (peak being off)
    #return d, cells, psths, psthsX
    return d, stim, cells, psths, psthsX, stas, stasX, stim
Пример #2
0
    def __init__(self, d, startT, endT, cells):
        '''
        inputs:
        -------

            startT:     float, defines the start of the time window

            endT:       float, defines the end of the time window
            
            cells:      nested lists of ndarrays with the spikes for all conditions and all cells 
        '''
        self.startT = startT
        self.endT = endT
        self.cells = ba.processNested(limitSpikes, 0, cells, d['fixationLength'], startT, endT)   
Пример #3
0
def processInformation(expName='GNS', spikeFile='GaussianNatScene.spk', dnew={}):
    '''
    Load spikes from spikeFile
    Extract last parameters used from expName in parameters.txt
    Update any parameters according to dnew (in case I want to change something)

    do whatever processing I feel is necessary
    '''
    # load basic dictionary with experimental data, cells and the seq of Home and Targets
    print('Loading cells and variables')
    d, cells, blockSeq, blockStartT, blockEndT = preProcess(expName, spikeFile)
    d.update(dnew)

    # Define a TW for computing basic response properties
    TW_startT = .05
    TW_endT = .20
    binsN = 4
    eventStartT = _np.array(blockStartT) + TW_startT
    eventEndT = _np.array(blockStartT) + TW_endT
    binLength = (TW_endT - TW_startT)/binsN

    # compute Latency and spike count
    print('Processing Latency for all cells')
    latency = ba.processNested(rt.getLatency, 0, cells, eventStartT, eventEndT, noSpikeSymbol = - binLength/2)
    print('Processing Spike Count for all cells')
    spkCnt = ba.processNested(rt.getSpkCnt, 0, cells, eventStartT, eventEndT)

    # convert latency into discrete symbols (for each cell)
    latency = ba.processNested(lambda x: _np.floor(x/binLength), 0, latency)
    
    # combine latency and spkCnt into a unique symbol
    spkCntLat = ba.processNested(lambda x, y: list(zip(x, y)), 0, latency, spkCnt)


    # process information
    print('Processing Information')
    latencyInfo = ba.processNested(info.mi, 0, latency, blockSeq)
    spkCntInfo = ba.processNested(info.mi, 0, spkCnt, blockSeq)
    spkCntLatInfo = [info.mi(spkCntLat[i], blockSeq) for i in range(len(cells))]

    # as a control, process information with a shuffled version of blockSeq
    shuffled = blockSeq.copy()
    _np.random.shuffle(shuffled)
    latencyInfoSh = ba.processNested(info.mi, 0, latency, shuffled)
    spkCntInfoSh = ba.processNested(info.mi, 0, spkCnt, shuffled)
    spkCntLatInfoSh = [info.mi(spkCntLat[i], shuffled) for i in range(len(cells))]
    
    
    # make a nice plot
    print('Plotting information')
    _plt.close('Information')
    _plt.figure('Information')
    _plt.plot(latencyInfo, 'ro', label = 'Latency')
    _plt.plot(spkCntInfo, 'bo', label = 'Spike Count')
    _plt.plot(spkCntLatInfo, 'ko', label = 'Lat and SpkCnt')
    _plt.plot(latencyInfoSh, 'r.')
    _plt.plot(spkCntInfoSh, 'b.')
    _plt.plot(spkCntLatInfoSh, 'k.')
    _plt.ylabel('Information (Bits)')
    _plt.xlabel('Image ID')
    _plt.title('Mutual Information')
    _plt.legend()
    _plt.xlim(-.5, len(cells)+.5)
    _plt.show()

    return d, cells, latency, spkCnt, blockSeq, latencyInfo, spkCntInfo, spkCntLatInfo