コード例 #1
0
    def calculate_noiseburst_response(self, dataframe=None):

        #Use the whole database by default
        if dataframe is None:
            dataframe = self.db

        #Calculate noiseburst response
        #TODO: Response to things other than noise as well??
        noiseZscore = np.empty(len(dataframe))
        noisePval = np.empty(len(dataframe))
        baseRange = [-0.2, 0]
        responseRange = [0, 0.2]
        for indCell, cell in dataframe.iterrows():
            spikeData, eventData = dataloader.get_session_ephys(
                cell, 'noiseburst')
            eventOnsetTimes = eventData.get_event_onset_times()
            if spikeData.timestamps is not None:
                zScore, pVal = self.calculate_response_score(
                    spikeData.timestamps, eventOnsetTimes, baseRange,
                    responseRange)
                noiseZscore[indCell] = zScore
                noisePval[indCell] = pVal
            else:
                noiseZscore[indCell] = None
                noisePval[indCell] = None
        dataframe['noiseZscore'] = noiseZscore
        dataframe['noisePval'] = noisePval
コード例 #2
0
    def calculate_laser_train_ratio(self, dataframe=None):

        #Use the whole database by default
        if dataframe is None:
            dataframe = self.db

        #Laser train response, ratio of pulse avg spikes
        trainRatio = np.empty(len(dataframe))
        timeRange = [-0.1, 1]  #For initial alignment
        baseRange = [0, 0.05]  #Base range is response to first pulse
        responseRange = [0.2, 0.25]  #Response to 3rd pulse
        for indCell, cell in dataframe.iterrows():
            spikeData, eventData = dataloader.get_session_ephys(
                cell, 'lasertrain')
            eventOnsetTimes = eventData.get_event_onset_times()
            eventOnsetTimes = spikesanalysis.minimum_event_onset_diff(
                eventOnsetTimes, 0.5)
            if spikeData.timestamps is not None:
                (spikeTimesFromEventOnset, trialIndexForEachSpike,
                 indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
                     spikeData.timestamps, eventOnsetTimes, timeRange)
                avgSpikesBase = spikesanalysis.spiketimes_to_spikecounts(
                    spikeTimesFromEventOnset, indexLimitsEachTrial,
                    baseRange).mean()
                avgSpikesResp = spikesanalysis.spiketimes_to_spikecounts(
                    spikeTimesFromEventOnset, indexLimitsEachTrial,
                    responseRange).mean()
                ratio = avgSpikesResp / avgSpikesBase
                trainRatio[indCell] = ratio
            else:
                trainRatio[indCell] = None
        dataframe['trainRatio'] = trainRatio
コード例 #3
0
    def calculate_laser_pulse_response(self, dataframe=None):

        #Use the whole database by default
        if dataframe is None:
            dataframe = self.db

        #Laser pulse response
        pulseZscore = np.empty(len(dataframe))
        pulsePval = np.empty(len(dataframe))
        baseRange = [-0.1, 0]
        responseRange = [0, 0.1]
        for indCell, cell in dataframe.iterrows():
            spikeData, eventData = dataloader.get_session_ephys(
                cell, 'laserpulse')
            eventOnsetTimes = eventData.get_event_onset_times()
            if spikeData.timestamps is not None:
                zScore, pVal = self.calculate_response_score(
                    spikeData.timestamps, eventOnsetTimes, baseRange,
                    responseRange)
                pulseZscore[indCell] = zScore
                pulsePval[indCell] = pVal
            else:
                pulseZscore[indCell] = None
                pulsePval[indCell] = None

        dataframe['pulseZscore'] = pulseZscore
        dataframe['pulsePval'] = pulsePval
コード例 #4
0
    def calculate_highest_significant_sync_rate(self, dataframe=None):

        #Use the whole database by default
        if dataframe is None:
            dataframe = self.db

        #Highest significant sync rate
        #TODO: I need to unit test this part
        highestSync = np.empty(len(dataframe))
        for indCell, cell in dataframe.iterrows():
            spikeData, eventData = dataloader.get_session_ephys(cell, 'am')
            bdata = dataloader.get_session_bdata(cell, 'am')
            eventOnsetTimes = eventData.get_event_onset_times(eventChannel=5)
            eventOnsetTimes = spikesanalysis.minimum_event_onset_diff(
                eventOnsetTimes, 0.5)

            if spikeData.samples is not None:

                #NOTE: This is where I am ignoring the onset response. is 50msec sufficient??
                timeRange = [0.05, 0.5]

                freqEachTrial = bdata['currentFreq']
                possibleFreq = np.unique(freqEachTrial)

                (spikeTimesFromEventOnset, trialIndexForEachSpike,
                 indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
                     spikeData.timestamps, eventOnsetTimes, timeRange)
                allRayleighPvals = np.zeros(len(possibleFreq))
                for indFreq, oneFreq in enumerate(possibleFreq):
                    trialsThisFreq = np.flatnonzero(freqEachTrial == oneFreq)
                    spikeTimesThisFreq = spikeTimesFromEventOnset[np.in1d(
                        trialIndexForEachSpike, trialsThisFreq)]

                    #Number of radians in one second for this stimulus frequency
                    radsPerSec = oneFreq * 2 * np.pi
                    period = 1.0 / oneFreq
                    spikeRads = (spikeTimesThisFreq * radsPerSec) % (2 * np.pi)

                    #Compute average vector length and angle
                    strength, phase = signal.vectorstrength(
                        spikeTimesThisFreq, period)

                    #Compute prob for the rayleigh statistic
                    p = self.rayleigh_test(strength, len(spikeTimesThisFreq))
                    allRayleighPvals[indFreq] = p

                if np.any(allRayleighPvals < 0.05):
                    hs = np.max(possibleFreq[allRayleighPvals < 0.05])
                else:
                    hs = 0
                highestSync[indCell] = hs

            else:
                highestSync[indCell] = 0

        dataframe['highestSync'] = highestSync
コード例 #5
0
    def calculate_am_statistics(self, dataframe=None):
        #AM stats - calculates KW anova on number of spikes during stimulus for each AM rate
        amKWstat = np.empty(len(dataframe))
        amKWp = np.empty(len(dataframe))

        for indCell, cell in dataframe.iterrows():
            spikeData, eventData = dataloader.get_session_ephys(cell, 'am')
            eventOnsetTimes = eventData.get_event_onset_times()
            bdata = dataloader.get_session_bdata(cell, 'am')

            if spikeData.timestamps is not None:
                rateEachTrial = bdata[
                    'currentFreq']  #NOTE: bdata uses 'Freq' but this is AM so I'm calling it rate
                possibleRate = np.unique(rateEachTrial)
                timeRange = [0, 0.5]
                respSpikeArrays = [
                ]  #List to hold the arrays of response bin spike counts (not all same number of trials)
                respSpikeInds = [
                ]  #This will hold arrays with the inds for which rate the response spikes came from
                for indRate, thisRate in enumerate(possibleRate):
                    trialsThisRate = np.flatnonzero(rateEachTrial == thisRate)
                    (spikeTimesFromEventOnset, trialIndexForEachSpike,
                     indexLimitsEachTrial
                     ) = spikesanalysis.eventlocked_spiketimes(
                         spikeData.timestamps, eventOnsetTimes[trialsThisRate],
                         timeRange)
                    nspkResp = spikesanalysis.spiketimes_to_spikecounts(
                        spikeTimesFromEventOnset, indexLimitsEachTrial,
                        timeRange)
                    respSpikeArrays.append(nspkResp.ravel())

                try:
                    statistic, pval = stats.kruskal(*respSpikeArrays)
                except ValueError:
                    pval = None
                    statistic = None

                amKWp[indCell] = pval
                amKWstat[indCell] = statistic
            else:
                amKWp[indCell] = None
                amKWstat[indCell] = None

        dataframe['amKWp'] = amKWp
        dataframe['amKWstat'] = amKWstat
コード例 #6
0
for indCell, cell in db.iterrows():
    peakAmplitudes = cell['clusterPeakAmplitudes']
    spikeShapeSD = cell['clusterSpikeSD']
    shapeQuality = abs(peakAmplitudes[1]/spikeShapeSD.mean())
    allShapeQuality[indCell] = shapeQuality
allShapeQuality[allShapeQuality==inf]=0
db['shapeQuality'] = allShapeQuality

#Calculate noiseburst response
#TODO: Response to things other than noise as well??
noiseZscore = np.empty(len(db))
noisePval = np.empty(len(db))
baseRange = [-0.2,0]
responseRange = [0, 0.2]
for indCell, cell in db.iterrows():
    spikeData, eventData = dataloader.get_session_ephys(cell, 'noiseburst')
    if spikeData.timestamps is not None:
        eventOnsetTimes = eventData.get_event_onset_times()
        alignmentRange = [baseRange[0], responseRange[1]]
        (spikeTimesFromEventOnset,
        trialIndexForEachSpike,
        indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeData.timestamps,
                                                                        eventOnsetTimes,
                                                                        alignmentRange)
        nspkBase = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,
                                                            indexLimitsEachTrial,
                                                            baseRange)
        nspkResp = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,
                                                            indexLimitsEachTrial,
                                                            responseRange)
        [zScore, pVal] = stats.ranksums(nspkResp,nspkBase)
コード例 #7
0
    bins = np.arange(left_of_first_bin, right_of_last_bin + d, d)
    return bins


masterdb = pandas.read_hdf(
    '/home/nick/data/database/corticostriatal_master_20170452.h5', 'database')
goodcells = masterdb.query('isiViolations<0.02 and shapeQuality>2')
# cell = masterdb.ix[9] #Not too selective for rate
exampleCell = goodcells.iloc[15]  #Selective for rate

# # To find an example cells

figDir = '/home/nick/data/database/amstats/'
# for indCell, cell in masterdb.iterrows():
for indCell, cell in enumerate([exampleCell]):
    spikeData, eventData = dataloader.get_session_ephys(cell, 'am')
    eventOnsetTimes = eventData.get_event_onset_times()
    bdata = dataloader.get_session_bdata(cell, 'am')
    rateEachTrial = bdata[
        'currentFreq']  #NOTE: bdata uses 'Freq' but this is AM so I'm calling it rate

    plt.clf()
    plt.subplot(121)
    dataplotter.plot_raster(spikeData.timestamps,
                            eventOnsetTimes,
                            sortArray=rateEachTrial,
                            timeRange=[-0.3, 0.8])
    plt.ylabel('AM rate (Hz)')
    plt.xlabel('Time from sound onset (sec)')
    plt.show()
    print indCell
コード例 #8
0
            peakAmplitudes = cell['clusterPeakAmplitudes']
            spikeShapeSD = cell['clusterSpikeSD']
            shapeQuality = abs(peakAmplitudes[1] / spikeShapeSD.mean())
            allShapeQuality[indCell] = shapeQuality
        allShapeQuality[allShapeQuality == inf] = 0
        db['shapeQuality'] = allShapeQuality

        #Calculate noiseburst response
        #TODO: Response to things other than noise as well??
        #DONE: Remove eventresponse dependency
        noiseZscore = np.empty(len(db))
        noisePval = np.empty(len(db))
        baseRange = [-0.2, 0]
        responseRange = [0, 0.2]
        for indCell, cell in db.iterrows():
            spikeData, eventData = dataloader.get_session_ephys(
                cell, 'noiseburst')
            eventOnsetTimes = eventData.get_event_onset_times()
            alignmentRange = [baseRange[0], responseRange[1]]
            (spikeTimesFromEventOnset, trialIndexForEachSpike,
             indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
                 spikeData.timestamps, eventOnsetTimes, alignmentRange)
            nspkBase = spikesanalysis.spiketimes_to_spikecounts(
                spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange)
            nspkResp = spikesanalysis.spiketimes_to_spikecounts(
                spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange)
            [zScore, pVal] = stats.ranksums(nspkResp, nspkBase)
            noiseZscore[indCell] = zScore
            noisePval[indCell] = pVal
        db['noiseZscore'] = noiseZscore
        db['noisePval'] = noisePval
コード例 #9
0
    def calculate_tuning_curve_params(self, dataframe):

        #Use the whole database by default
        if dataframe is None:
            dataframe = self.db

        #Tuning curve estimation
        #DONE: Need to make this method use the continuous curves we fit

        cfs = np.full(len(dataframe), np.nan)
        thresholds = np.full(len(dataframe), np.nan)
        lowerFreqs = np.full(len(dataframe), np.nan)
        upperFreqs = np.full(len(dataframe), np.nan)

        for indCell, cell in dataframe.iterrows():
            try:
                spikeData, eventData = dataloader.get_session_ephys(cell, 'tc')
                bdata = dataloader.get_session_bdata(cell, 'tc')
            except IndexError:  #The cell does not have a tc
                print "No tc for cell {}".format(indCell)
                thresholds[indCell] = None
                cfs[indCell] = None
                lowerFreqs[indCell] = None
                upperFreqs[indCell] = None
                continue

            eventOnsetTimes = eventData.get_event_onset_times()

            if spikeData.timestamps is not None:

                baseRange = [-0.2, 0]
                responseRange = [0, 0.2]
                alignmentRange = [baseRange[0], responseRange[1]]
                freqEachTrial = bdata['currentFreq']
                possibleFreq = np.unique(freqEachTrial)
                intensityEachTrial = bdata['currentIntensity']
                possibleIntensity = np.unique(intensityEachTrial)
                allBaselineCountArrays = []
                aboveBaseline = []
                popts = []

                for indinten, inten in enumerate(possibleIntensity):
                    spks = np.array([])
                    # inds = np.array([])
                    freqs = np.array([])
                    base = np.array([])
                    for indfreq, freq in enumerate(possibleFreq):
                        selectinds = np.flatnonzero((freqEachTrial == freq) & (
                            intensityEachTrial == inten))
                        selectedOnsetTimes = eventOnsetTimes[selectinds]
                        (spikeTimesFromEventOnset, trialIndexForEachSpike,
                         indexLimitsEachTrial
                         ) = spikesanalysis.eventlocked_spiketimes(
                             spikeData.timestamps, selectedOnsetTimes,
                             alignmentRange)
                        nspkBase = spikesanalysis.spiketimes_to_spikecounts(
                            spikeTimesFromEventOnset, indexLimitsEachTrial,
                            baseRange)
                        nspkResp = spikesanalysis.spiketimes_to_spikecounts(
                            spikeTimesFromEventOnset, indexLimitsEachTrial,
                            responseRange)
                        base = np.concatenate([base, nspkBase.ravel()])
                        spks = np.concatenate([spks, nspkResp.ravel()])
                        # inds = np.concatenate([inds, np.ones(len(nspkResp.ravel()))*indfreq])
                        freqs = np.concatenate(
                            [freqs,
                             np.ones(len(nspkResp.ravel())) * freq])

                    allBaselineCountArrays.append(base)

                    #DONE: Finish setting the initial param guesses and bounds for fitting in log2(freq) space
                    try:
                        popt, pcov = optimize.curve_fit(
                            self.gaussian,  #Fit the curve for this intensity
                            np.log2(freqs),
                            spks,
                            p0=[1, np.log2(possibleFreq[7]), 1],
                            bounds=([0, np.log2(possibleFreq[0]),
                                     0], [inf,
                                          np.log2(possibleFreq[-1]), inf]))
                        popts.append(popt)  #Save the curve paramaters
                    except RuntimeError:
                        print "RUNTIME ERROR, Cell {}".format(indCell)
                        thresholds[indCell] = None
                        cfs[indCell] = None
                        lowerFreqs[indCell] = None
                        upperFreqs[indCell] = None
                        break

                    #Save whether the max fitted val of the curve is greater than base+1s.d.
                    #This is discrete, we want to maximize on the continuous function
                    # aboveBaseline.append(max(self.gaussian(inds, *popt)) > base.mean()+base.std())
                    #DONE: This needs to be finished after I change to fitting in log(freq) space
                    fm = lambda x: -self.gaussian(x, *popt)
                    r = optimize.minimize_scalar(
                        fm,
                        bounds=(np.log2(possibleFreq[0]),
                                np.log2(possibleFreq[-1])))
                    # maxX = 2**r["x"] #The max x val is a log2(freq) value, so convert back to freq
                    aboveBaseline.append(
                        self.gaussian(r["x"], *popt) > (base.mean() +
                                                        base.std()))

                aboveBaseline = np.array(aboveBaseline)
                indintenFirstAbove = self.index_all_true_after(aboveBaseline)
                #TODO: I need to find the max for THIS curve and save that as cf
                #TODO: Need to save the intensity
                if indintenFirstAbove is None:
                    #No frequencies pass the threshold
                    threshold = None
                    cf = None
                    continue
                else:
                    threshold = possibleIntensity[indintenFirstAbove]
                    #Find the max for the threshold intensity
                    poptFirstAbove = popts[indintenFirstAbove]
                    fm = lambda x: -self.gaussian(x, *poptFirstAbove)
                    r = optimize.minimize_scalar(
                        fm,
                        bounds=(np.log2(possibleFreq[0]),
                                np.log2(possibleFreq[-1])))
                    cf = 2**r[
                        "x"]  #The max x val is a log2(freq) value, so convert back to freq

                indinten10aboveThresh = indintenFirstAbove + 2
                baselineAllIntensities = np.concatenate(allBaselineCountArrays)

                try:
                    popt10Above = popts[indinten10aboveThresh]
                    #TODO: using this set of popts, find the point where the curve crosses y=base.mean+base.std
                    #We need to find the max using these popts as well so we know the midpoint
                    fm = lambda x: -self.gaussian(x, *popt10Above)
                    r = optimize.minimize_scalar(
                        fm,
                        bounds=(np.log2(possibleFreq[0]),
                                np.log2(possibleFreq[-1])))

                    xMax = r["x"]

                    #The function to find roots for. curve minus the baseline+std
                    fr = lambda x: self.gaussian(
                        x, *popt10Above) - (baselineAllIntensities.mean() +
                                            baselineAllIntensities.std())

                    #Check the inputs first. fr(a) and fr(b) need to be opposite sign for root finding to work
                    #Lower root finding
                    alower = np.log2(possibleFreq[0])  #The minimum x val
                    blower = xMax  #The x value of the function max
                    if np.sign(fr(alower)) != np.sign(fr(blower)):
                        rootLower = optimize.brentq(fr, alower, blower)
                        lowerFreq = 2**rootLower
                    else:
                        lowerFreq = None

                    #Upper root
                    aupper = xMax  #The minimum x val
                    bupper = np.log2(
                        possibleFreq[-1])  #The x value of the function max
                    if np.sign(fr(aupper)) != np.sign(fr(bupper)):
                        rootUpper = optimize.brentq(fr, aupper, bupper)
                        upperFreq = 2**rootUpper
                    else:
                        upperFreq = None
                except IndexError:
                    #We were not able to catch 10db above threshold. In this case, we can still get cf and thresh, but not uF/lF
                    upperFreq = None
                    lowerFreq = None
                    continue

                #Things to save
                thresholds[indCell] = threshold
                cfs[indCell] = cf
                lowerFreqs[indCell] = lowerFreq
                upperFreqs[indCell] = upperFreq

            else:
                thresholds[indCell] = None
                cfs[indCell] = None
                lowerFreqs[indCell] = None
                upperFreqs[indCell] = None

        dataframe['threshold'] = thresholds
        dataframe['cf'] = cfs
        dataframe['lowerFreq'] = lowerFreqs
        dataframe['upperFreq'] = upperFreqs