def freq_tuning_fit(eventOnsetTimes, spikeTimestamps, bdata, timeRange = [-0.2, 0.2], intensityInds = None): # determine the best frequency of the cell by fitting gaussian curve to tuning data gaussFits = [] bestFreqs = [] Rsquareds = [] freqEachTrial = bdata['currentFreq'] intensityEachTrial = bdata['currentIntensity'] numIntensities = np.unique(intensityEachTrial) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) trialsEachInt = behavioranalysis.find_trials_each_type(intensityEachTrial, numIntensities) if intensityInds is None: intensityInds = range(len(numIntensities)) spikeCountMat, window = best_window_freq_tuning(spikeTimesFromEventOnset, indexLimitsEachTrial, freqEachTrial) for intensityInd in intensityInds: trialsThisIntensity = trialsEachInt[:,intensityInd] tuningSpikeRates = (spikeCountMat[trialsThisIntensity].flatten())/(window[1]-window[0]) freqsThisIntensity = freqEachTrial[trialsThisIntensity] gaussFit, Rsquared = response_curve_fit(np.log2(freqsThisIntensity), tuningSpikeRates) bestFreq = 2**gaussFit[0] if gaussFit is not None else None gaussFits.append(gaussFit) bestFreqs.append(bestFreq) Rsquareds.append(Rsquared) return gaussFits, bestFreqs, Rsquareds, window
def tuning_raster(bdata, ephysdata, gs): plt.subplot(gs[0, 1]) freqEachTrial = bdata['currentFreq'] eventOnsetTimes = ephysData['events']['stimOn'] spikeTimeStamps = ephysData['spikeTimes'] timeRange = [-0.3, 0.6] possiblefreqs = np.unique(freqEachTrial) freqLabels = [round(x/1000, 1) for x in possiblefreqs] trialsEachCond = behavioranalysis.find_trials_each_type(freqEachTrial, possiblefreqs) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, timeRange) #print len(freqEachTrial), len(eventOnsetTimes) pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset,indexLimitsEachTrial,timeRange, trialsEachCond=trialsEachCond, labels=freqLabels) xlabel = 'time (s)' ylabel = 'Frequency (kHz)' plt.title('Tuning Curve') plt.xlabel(xlabel) plt.ylabel(ylabel) '''
def plot_raster(self, spikeTimestamps, eventOnsetTimes, sortArray = [], replace = 0, timeRange = [-0.5, 1], ms = 4): ''' Plot a raster given spike timestamps and event onset times This method will take the spike timestamps directly, convert them so that they are relative to the event onset times, and then call the appropriate plotting code. This method should ideally be able to handle making both sortArrayed and unsortArrayed rasters. sortArray (array): An array of parameter values for each trial. Output will be sorted by the possible values of the parameter ''' if len(sortArray)>0: trialsEachCond = behavioranalysis.find_trials_each_type(sortArray, np.unique(sortArray)) else: trialsEachCond = [] spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimestamps,eventOnsetTimes,timeRange) #pdb.set_trace() if replace: #Now using cla() so that it will work with subplots cla() #else: # figure() pRaster,hcond,zline = extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange, trialsEachCond = trialsEachCond) setp(pRaster,ms=ms)
def at_best_freq(spikeTimeStamps, eventOnsetTimes, charFreq, frequencies, timeRange=[0.0,0.1], fullRange = [0.0, 0.7]): atBestFreq = False numFreqs = np.unique(frequencies) spikeArray = np.zeros(len(numFreqs)) trialsEachCond = behavioranalysis.find_trials_each_type(frequencies, numFreqs) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, fullRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseTimeRange = [timeRange[1]+0.2, fullRange[1]] baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseTimeRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseTimeRange[1]-baseTimeRange[0]) baselineSpikeSDev = np.std(baseSpikeCountMat)/(baseTimeRange[1]-baseTimeRange[0]) for freq in range(len(numFreqs)): trialsThisFreq = trialsEachCond[:,freq] if spikeCountMat.shape[0] != len(trialsThisFreq): spikeCountMat = spikeCountMat[:-1,:] print "FIXME: Using bad hack to make event onset times equal number of trials" thisFreqCounts = spikeCountMat[trialsThisFreq].flatten() spikeArray[freq] = np.mean(thisFreqCounts)/(timeRange[1]-timeRange[0]) bestFreqIndex = np.argmax(spikeArray) bestFreq = numFreqs[bestFreqIndex] minIndex = bestFreqIndex-1 if bestFreqIndex>0 else 0 maxIndex = bestFreqIndex+1 if bestFreqIndex<(len(numFreqs)-1) else len(numFreqs)-1 bestFreqs = [numFreqs[minIndex], numFreqs[maxIndex]] if charFreq >= bestFreqs[0] and charFreq <= bestFreqs[1]: if np.max(spikeArray) > (baselineSpikeRate + baselineSpikeSDev): atBestFreq = True return atBestFreq, bestFreq
def band_select(spikeTimeStamps, eventOnsetTimes, amplitudes, bandwidths, timeRange, fullRange = [0.0, 2.0]): numBands = np.unique(bandwidths) numAmps = np.unique(amplitudes) spikeArray = np.zeros((len(numBands), len(numAmps))) errorArray = np.zeros_like(spikeArray) trialsEachCond = behavioranalysis.find_trials_each_combination(bandwidths, numBands, amplitudes, numAmps) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, fullRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseTimeRange = [timeRange[1]+0.5, fullRange[1]] baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseTimeRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseTimeRange[1]-baseTimeRange[0]) plt.hold(True) for amp in range(len(numAmps)): trialsThisAmp = trialsEachCond[:,:,amp] for band in range(len(numBands)): trialsThisBand = trialsThisAmp[:,band] if spikeCountMat.shape[0] != len(trialsThisBand): spikeCountMat = spikeCountMat[:-1,:] print "FIXME: Using bad hack to make event onset times equal number of trials" thisBandCounts = spikeCountMat[trialsThisBand].flatten() spikeArray[band, amp] = np.mean(thisBandCounts) errorArray[band,amp] = stats.sem(thisBandCounts) return spikeArray, errorArray, baselineSpikeRate
def test_event_onsets(self): eventFn = 'all_channels.events' spikesFn = 'Tetrode2.spikes' eventFile = os.path.join(testDataDir,eventFn) spikesFile = os.path.join(testDataDir,spikesFn) eventData = loadopenephys.Events(eventFile) dataSpikes = loadopenephys.DataSpikes(spikesFile) spikeTimestamps = dataSpikes.timestamps eventOnsetTimes = eventData.get_event_onset_times() #convert to seconds samplingRate = eventData.samplingRate spikeTimestamps = spikeTimestamps/samplingRate eventOnsetTimes = eventOnsetTimes/samplingRate assert len(eventOnsetTimes)==513 timeRange = [-0.5, 1.0] #Remove events except from frist pulse in laser train eventOnsetTimes = spikesanalysis.minimum_event_onset_diff(eventOnsetTimes, 0.5) assert len(eventOnsetTimes)==103 (spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimestamps, eventOnsetTimes, timeRange) plt.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, '.')
def AM_vector_strength(spikeTimestamps, eventOnsetTimes, behavData, timeRange): currentFreq = behavData['currentFreq'] possibleFreq = np.unique(currentFreq) vs_array=np.array([]) ral_array=np.array([]) pval_array = np.array([]) timeRange = [0, 0.5] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) for freq in possibleFreq: select = np.flatnonzero(currentFreq==freq) selectspikes = spikeTimesFromEventOnset[np.in1d(trialIndexForEachSpike, select)] selectinds = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, select)] squeezedinds=np.array([list(np.unique(selectinds)).index(x) for x in selectinds]) spikesAfterFirstCycle = selectspikes[selectspikes>(1.0/freq)] indsAfterFirstCycle = selectinds[selectspikes>(1.0/freq)] strength, phase = vectorstrength(spikesAfterFirstCycle, 1.0/freq) vs_array=np.concatenate((vs_array, np.array([strength]))) #Compute the pval for the vector strength radsPerSec=freq*2*np.pi spikeRads = (spikesAfterFirstCycle*radsPerSec)%(2*np.pi) ral_test = circstats.rayleigh_test(spikeRads) pval = np.array([ral_test['pvalue']]) ral =np.array([2*len(spikesAfterFirstCycle)*(strength**2)]) pval_array = np.concatenate((pval_array, pval)) ral_array = np.concatenate((ral_array, ral)) return vs_array, pval_array, ral_array
def average_AM_firing_rate(spikeTimestamps, eventOnsetTimes, behavData, timeRange): currentFreq = behavData['currentFreq'] possibleFreq = np.unique(currentFreq) fr_array=np.array([]) #Only need to calculate this once, the loop then selects for each freq spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) for freq in possibleFreq: select = np.flatnonzero(currentFreq==freq) selectspikes = spikeTimesFromEventOnset[np.in1d(trialIndexForEachSpike, select)] selectinds = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, select)] selectlimits = indexLimitsEachTrial[:, select] numSpikesEachTrial = np.squeeze(np.diff(selectlimits, axis=0)) spikeRateEachTrial = numSpikesEachTrial / float(timeRange[1]-timeRange[0]) averageFR = spikeRateEachTrial.mean() fr_array=np.concatenate((fr_array, np.array([averageFR]))) return fr_array
def plot_separated_rasters(gridspec, xcoords, ycoord, firstSort, secondSort, spikeTimestamps, eventOnsetTimes, timeRange=[-0.2,1.5], ylabel='bandwidth (octaves)', xlabel='Time from sound onset (sec)', titles=None, duplicate=False, colours=None, plotHeight=1): firstSortLabels = ['{}'.format(first) for first in np.unique(firstSort)] numFirst = np.unique(firstSort) numSec = np.unique(secondSort) trialsEachCond = behavioranalysis.find_trials_each_combination(firstSort, numFirst, secondSort, numSec) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) if colours is None: colours = [np.tile(['0.25','0.6'],len(numFirst)/2+1), np.tile(['#4e9a06','#8ae234'],len(numFirst)/2+1)] for ind, secondArrayVal in enumerate(numSec): plt.subplot(gridspec[ycoord+ind*plotHeight:ycoord+ind*plotHeight+plotHeight, xcoords[0]:xcoords[1]]) trialsThisSecondVal = trialsEachCond[:, :, ind] # a dumb workaround specifically for plotting harmonic sessions if duplicate: for ind2, first in enumerate(numFirst): if not any(trialsThisSecondVal[:,ind2]): trialsThisSecondVal[:,ind2]=trialsEachCond[:,ind2,ind+1] pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange, trialsEachCond=trialsThisSecondVal, labels=firstSortLabels, colorEachCond = colours[ind]) plt.setp(pRaster, ms=4) plt.ylabel(ylabel) if ind == len(numSec) - 1: plt.xlabel(xlabel) if titles is not None: plt.title(titles[ind])
def plot_sorted_psth(spikeTimeStamps, eventOnsetTimes, sortArray, timeRange=[-0.5,1], binsize = 50, lw=2, plotLegend=False, *args, **kwargs): ''' Function to accept spike timestamps, event onset times, and a sorting array and plot a PSTH sorted by the sorting array Args: binsize (float) = size of bins for PSTH in ms ''' binsize = binsize/1000.0 # If a sort array is supplied, find the trials that correspond to each value of the array if len(sortArray) > 0: trialsEachCond = behavioranalysis.find_trials_each_type( sortArray, np.unique(sortArray)) else: trialsEachCond = [] # Align spiketimestamps to the event onset times for plotting spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, [timeRange[0]-binsize, timeRange[1]]) binEdges = np.around(np.arange(timeRange[0]-binsize, timeRange[1]+2*binsize, binsize), decimals=2) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, binEdges) pPSTH = extraplots.plot_psth(spikeCountMat/binsize, 1, binEdges[:-1], trialsEachCond, *args, **kwargs) plt.setp(pPSTH, lw=lw) plt.hold(True) zline = plt.axvline(0,color='0.75',zorder=-10) plt.xlim(timeRange) if plotLegend: if len(sortArray)>0: sortElems = np.unique(sortArray) for ind, pln in enumerate(pPSTH): pln.set_label(sortElems[ind]) # ax = plt.gca() # plt.legend(mode='expand', ncol=3, loc='best') plt.legend(ncol=3, loc='best')
def noise_raster(ephysData, gs): plt.subplot(gs[0, 1]) eventOnsetTimes = ephysData['events']['stimOn'] spikeTimeStamps = ephysData['spikeTimes'] timeRange = [-0.3, 0.6] baseTimeRange = [-0.15, -0.05] trialsEachCond = [] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, timeRange) baseSpikeMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseTimeRange) base_avg = np.mean(baseSpikeMat) / (baseTimeRange[1] - baseTimeRange[0]) base_sem = stats.sem(baseSpikeMat) / (baseTimeRange[1] - baseTimeRange[0]) pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset,indexLimitsEachTrial,timeRange, trialsEachCond=trialsEachCond) title = "Noise Bursts (Base FR: {} +/- {} spikes/s)".format(round(base_avg, 2), round(base_sem, 2)) #title = "Noise Bursts (Base FR: {} spikes/s)".format(round(base_avg, 2)) #title = "Noise Bursts" xlabel = 'Time from sound onset (s)' ylabel = 'Trial' plt.title(title, fontsize = 'medium') plt.xlabel(xlabel) plt.ylabel(ylabel) '''
def avg_spikes_in_event_locked_timerange_each_cond(spikeTimestamps, trialsEachCond, timeRange): spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimestamps, eventOnsetTimes, timeRange) spikeArray = avg_locked_spikes_per_condition(indexLimitsEachTrial, trialsEachCond) return spikeArray
def avg_spikes_in_event_locked_timerange_each_cond(self, spikeTimestamps, trialsEachCond, eventOnsetTimes, timeRange): if len(eventOnsetTimes) != np.shape(trialsEachCond)[0]: eventOnsetTimes = eventOnsetTimes[:-1] print "Removing last event onset time to align with behavior data" spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) spikeArray = self.avg_locked_spikes_per_condition(indexLimitsEachTrial, trialsEachCond) return spikeArray
def laser_response(spikeTimeStamps, eventOnsetTimes, timeRange=[0.0, 0.1], baseRange=[0.5, 0.6]): spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, [min(timeRange),max(baseRange)]) zStatsEachRange,pValueEachRange,maxZvalue = spikesanalysis.response_score(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange, timeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseRange[1]-baseRange[0]) laserSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) laserSpikeRate = np.mean(laserSpikeCountMat)/(timeRange[1]-timeRange[0]) return (laserSpikeRate > baselineSpikeRate and pValueEachRange[0] < 0.00001)
def avg_spikes_in_event_locked_timerange_each_cond( spikeTimestamps, trialsEachCond, eventOnsetTimes, timeRange): if len(eventOnsetTimes) != np.shape(trialsEachCond)[0]: eventOnsetTimes = eventOnsetTimes[:-1] print "FIXME: Using bad hack to make event onset times equal number of trials" spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) spikeArray = avg_locked_spikes_per_condition(indexLimitsEachTrial, trialsEachCond) return spikeArray
def calculate_tuning_curve_inputs(spikeTimeStamps, eventOnsetTimes, firstSort, secondSort, timeRange, baseRange=[-1.1,-0.1], errorType = 'sem', info='full'): fullTimeRange = [min(min(timeRange),min(baseRange)), max(max(timeRange),max(baseRange))] numFirst = np.unique(firstSort) numSec = np.unique(secondSort) duration = timeRange[1]-timeRange[0] spikeArray = np.zeros((len(numFirst), len(numSec))) errorArray = np.zeros_like(spikeArray) trialsEachCond = behavioranalysis.find_trials_each_combination(firstSort, numFirst, secondSort, numSec) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, fullTimeRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseRange[1]-baseRange[0]) if errorType == 'sem': baselineError = stats.sem(baseSpikeCountMat)/(baseRange[1]-baseRange[0]) elif errorType == 'std': baselineError = np.std(baseSpikeCountMat)/(baseRange[1]-baseRange[0]) for sec in range(len(numSec)): trialsThisSec = trialsEachCond[:,:,sec] for first in range(len(numFirst)): trialsThisFirst = trialsThisSec[:,first] if spikeCountMat.shape[0] != len(trialsThisFirst): spikeCountMat = spikeCountMat[:-1,:] if any(trialsThisFirst): thisFirstCounts = spikeCountMat[trialsThisFirst].flatten() spikeArray[first,sec] = np.mean(thisFirstCounts)/duration if errorType == 'sem': errorArray[first,sec] = stats.sem(thisFirstCounts)/duration elif errorType == 'std': errorArray[first,sec] = np.std(thisFirstCounts)/duration else: spikeArray[first,sec] = np.nan errorArray[first,sec] = np.nan if info=='full': tuningDict = {'responseArray':spikeArray, 'errorArray':errorArray, 'baselineSpikeRate':baselineSpikeRate, 'baselineSpikeError':baselineError, 'spikeCountMat':spikeCountMat, 'trialsEachCond':trialsEachCond} elif info=='plotting': tuningDict = {'responseArray':spikeArray, 'errorArray':errorArray, 'baselineSpikeRate':baselineSpikeRate, 'baselineSpikeError':baselineError} else: raise NameError('That is not an info type you degenerate') return tuningDict
def laser_tuning_curve(bdata, ephysData, gs): plt.subplot(gs[3, 1]) freqEachTrial = bdata['currentFreq'] laserEachTrial = bdata['laserOn'] intEachTrial = bdata['currentIntensity'] eventOnsetTimes = ephysData['events']['stimOn'] spikeTimeStamps = ephysData['spikeTimes'] timeRange = [0.0, 0.1] possiblefreqs = np.unique(freqEachTrial) freqLabels = [round(x/1000, 1) for x in possiblefreqs] possiblelaser = np.unique(laserEachTrial) possibleInts = np.unique(intEachTrial) laserTrialsEachCond = behavioranalysis.find_trials_each_combination(freqEachTrial, possiblefreqs, laserEachTrial, possiblelaser) intTrialsEachCond = behavioranalysis.find_trials_each_combination(freqEachTrial, possiblefreqs, intEachTrial, possibleInts) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, timeRange) for intInd, inten in enumerate(possibleInts): line = '-' if intInd == 0: line = '--' for indLaser in possiblelaser: color = 'black' if indLaser == 1: color = 'blue' laser = 'No Laser - ' if indLaser == 1: laser = 'Laser - ' curveName = laser + str(inten) + ' dB' trialsEachCond = laserTrialsEachCond[:,:,indLaser] & intTrialsEachCond[:,:,intInd] try: freq_avgs = spikesanalysis.avg_num_spikes_each_condition(trialsEachCond, indexLimitsEachTrial) except: behavIndexLimitsEachTrial = indexLimitsEachTrial[0:2,:-1] freq_avgs = spikesanalysis.avg_num_spikes_each_condition(trialsEachCond, behavIndexLimitsEachTrial) xpoints = [x for x in range(0, len(possiblefreqs))] #plt.semilogx(possiblefreqs, freq_avgs, linestyle = line, color = color, label = curveName) #plt.plot(xvalues, freq_avgs, linestyle = line, color = 'black', label = curveName, xlabels = possiblefreqs) plt.plot(xpoints, freq_avgs, linestyle = line, color = color, marker = 'o', label = curveName) plt.xticks(xpoints, freqLabels) plt.hold(True) plt.title('Frequency Tuning Curve - Laser Session') plt.legend(fontsize = 'x-small') plt.hold(False)
def plot_laser_bandwidth_summary(cell, bandIndex): cellInfo = get_cell_info(cell) loader = dataloader.DataLoader(cell['subject']) plt.clf() gs = gridspec.GridSpec(2, 4) eventData = loader.get_session_events(cellInfo['ephysDirs'][bandIndex]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][bandIndex], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.2, 1.5] bandBData = loader.get_session_behavior(cellInfo['behavDirs'][bandIndex]) bandEachTrial = bandBData['currentBand'] laserTrial = bandBData['laserTrial'] numBands = np.unique(bandEachTrial) numLas = np.unique(laserTrial) firstSortLabels = ['{}'.format(band) for band in np.unique(bandEachTrial)] secondSortLabels = ['no laser','laser'] trialsEachCond = behavioranalysis.find_trials_each_combination(bandEachTrial, numBands, laserTrial, numLas) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) colours = [np.tile(['0.25','0.6'],len(numBands)/2+1), np.tile(['#4e9a06','#8ae234'],len(numBands)/2+1)] for ind, secondArrayVal in enumerate(numLas): plt.subplot(gs[ind, 0:2]) trialsThisSecondVal = trialsEachCond[:, :, ind] pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange, trialsEachCond=trialsThisSecondVal, labels=firstSortLabels, colorEachCond = colours[ind]) plt.setp(pRaster, ms=4) #plt.title(secondSortLabels[ind]) plt.ylabel('bandwidth (octaves)') if ind == len(numLas) - 1: plt.xlabel("Time from sound onset (sec)") # -- plot Yashar plots for bandwidth data -- plt.subplot(gs[0:, 2:]) spikeArray, errorArray, baseSpikeRate = band_select(spikeTimestamps, eventOnsetTimes, laserTrial, bandEachTrial, timeRange = [0.0, 1.0]) band_select_plot(spikeArray, errorArray, baseSpikeRate, numBands, legend=True, labels=secondSortLabels, linecolours=['0.25','#4e9a06'], errorcolours=['0.6','#8ae234']) fig_path = '/home/jarauser/Pictures/cell reports' fig_name = '{0}_{1}_{2}um_TT{3}Cluster{4}.svg'.format(cellInfo['subject'], cellInfo['date'], cellInfo['depth'], cellInfo['tetrode'], cellInfo['cluster']) full_fig_path = os.path.join(fig_path, fig_name) fig = plt.gcf() fig.set_size_inches(16, 8) fig.savefig(full_fig_path, format = 'svg', bbox_inches='tight')
def onset_sustained_spike_proportion(spikeTimeStamps, eventOnsetTimes, onsetTimeRange=[0.0,0.05], sustainedTimeRange=[0.2,1.0]): fullTimeRange = [onsetTimeRange[0], sustainedTimeRange[1]] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimeStamps, eventOnsetTimes, fullTimeRange) onsetSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, onsetTimeRange) sustainedSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, sustainedTimeRange) fullSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, fullTimeRange) propOnset = 1.0*sum(onsetSpikeCountMat)/sum(fullSpikeCountMat) propSustained = 1.0*sum(sustainedSpikeCountMat)/sum(fullSpikeCountMat) return propOnset, propSustained
def am_raster(bdata, ephysData, gs): plt.subplot(gs[1, 1]) freqEachTrial = bdata['currentFreq'] eventOnsetTimes = ephysData['events']['stimOn'] spikeTimeStamps = ephysData['spikeTimes'] timeRange = [-0.3, 0.6] baseTimeRange = [-0.15, -0.05] possiblefreqs = np.unique(freqEachTrial) freqLabels = [round(x/1000, 1) for x in possiblefreqs] trialsEachCond = behavioranalysis.find_trials_each_type(freqEachTrial, possiblefreqs) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, timeRange) #print len(freqEachTrial), len(eventOnsetTimes) baseSpikeMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseTimeRange) base_avg = np.mean(baseSpikeMat) / (baseTimeRange[1] - baseTimeRange[0]) base_sem = stats.sem(baseSpikeMat) / (baseTimeRange[1] - baseTimeRange[0]) # for freqInd, freq in enumerate(possiblefreqs): # freq_spikecounts = spikeMat[trialsEachCond[:,freqInd]==True] # freq_avg = np.mean(freq_spikecounts) / (soundTimeRange[1] - soundTimeRange[0]) # freq_avgs.append(freq_avg) ''' try: base_avgs = spikesanalysis.avg_num_spikes_each_condition(trialsEachCond, baseIndexLimitsEachTrial) except: behavBaseIndexLimitsEachTrial = baseIndexLimitsEachTrial[0:2,:-1] base_avgs = spikesanalysis.avg_num_spikes_each_condition(trialsEachCond, behavBaseIndexLimitsEachTrial) base_avg = np.mean(base_avgs) base_frs = np.divide(base_avg, baseTimeRange[1] - baseTimeRange[0]) #print(base_avg) ''' title = "Tuning Curve (Base FR: {} +/- {} spikes/s)".format(round(base_avg, 2), round(base_sem, 2)) #title = "Tuning Curve (Base FR: {} spikes/s)".format(round(base_avg, 2)) pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset,indexLimitsEachTrial,timeRange, trialsEachCond=trialsEachCond, labels=freqLabels) xlabel = 'Time from sound onset (s)' ylabel = 'Frequency (kHz)' plt.title(title, fontsize = 'medium') plt.xlabel(xlabel) plt.ylabel(ylabel) '''
def plot_bandwidth_noise_amps_comparison(cell, bandIndex=None): cellInfo = get_cell_info(cell) loader = dataloader.DataLoader(cell['subject']) if bandIndex is None: try: bandIndex = cellInfo['bandIndex'][0] except IndexError: print 'no bandwidth session' pass plt.clf() gs = gridspec.GridSpec(1, 2) eventData = loader.get_session_events(cellInfo['ephysDirs'][bandIndex]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][bandIndex], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.2, 1.5] bandBData = loader.get_session_behavior(cellInfo['behavDirs'][bandIndex]) bandEachTrial = bandBData['currentBand'] secondSort = bandBData['currentAmp'] secondSortLabels = ['{} dB'.format(amp) for amp in np.unique(secondSort)] plt.subplot(gs[0,0]) spikeArray, errorArray, baseSpikeRate = band_select(spikeTimestamps, eventOnsetTimes, secondSort, bandEachTrial, timeRange = [0.0, 1.0]) spikeArray = spikeArray[:,-1].reshape((7,1)) errorArray = errorArray[:,-1].reshape((7,1)) band_select_plot(spikeArray, errorArray, np.unique(bandEachTrial), linecolours=['0.25'], errorcolours=['0.6']) plt.title('Bandwidth tuning') ampsBData = loader.get_session_behavior(cellInfo['behavDirs'][cellInfo['ampsIndex'][-1]]) ampEachTrial = ampsBData['currentIntensity'] eventData = loader.get_session_events(cellInfo['ephysDirs'][cellInfo['ampsIndex'][-1]]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][cellInfo['ampsIndex'][-1]], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.2, 1.0] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) numRates = np.unique(ampEachTrial) trialsEachCond = behavioranalysis.find_trials_each_type(ampEachTrial, numRates) plt.subplot(gs[0,1]) spikeArray, errorArray, baseSpikeRate = band_select(spikeTimestamps, eventOnsetTimes, ampsBData['currentFreq'], ampEachTrial, timeRange = [0.0, 0.5]) band_select_plot(spikeArray, errorArray, np.unique(ampEachTrial), linecolours=['0.25'], errorcolours=['0.6']) plt.xlabel('white noise intensity (dB)') plt.ylabel('Average num spikes') plt.title('Intensity tuning') fig_path = '/home/jarauser/Pictures/cell reports' fig_name = '{0}_{1}_{2}um_TT{3}Cluster{4}_noiseAmps.png'.format(cellInfo['subject'], cellInfo['date'], cellInfo['depth'], cellInfo['tetrode'], cellInfo['cluster']) full_fig_path = os.path.join(fig_path, fig_name) fig = plt.gcf() fig.set_size_inches(9, 4) fig.savefig(full_fig_path, format = 'png', bbox_inches='tight')
def plot_clustered_raster(self, session, tetrode, clustersToPlot, timeRange = [-0.5, 1]): '''FIXME: UPDATE THIS TO USE THE NEW RASTER PLOTTING METHOD ''' ephysSession = self.get_session_name(session) animalName = self.animalName #FIXME: These should be object methods, not just specific to this function spike_filename=os.path.join(settings.EPHYS_PATH, animalName, ephysSession, 'Tetrode{0}.spikes'.format(tetrode)) sp=loadopenephys.DataSpikes(spike_filename) clustersDir = os.path.join(settings.EPHYS_PATH,'%s/%s_kk/'%(animalName,ephysSession)) clustersFile = os.path.join(clustersDir,'Tetrode%d.clu.1'%tetrode) sp.set_clusters(clustersFile) event_filename=os.path.join(settings.EPHYS_PATH, animalName, ephysSession, 'all_channels.events') ev=loadopenephys.Events(event_filename) eventTimes=np.array(ev.timestamps)/SAMPLING_RATE evID=np.array(ev.eventID) evChannel = np.array(ev.eventChannel) eventOnsetTimes=eventTimes[(evID==1)&(evChannel==0)] evdiff = np.r_[1.0, np.diff(eventOnsetTimes)] eventOnsetTimes=eventOnsetTimes[evdiff>0.5] #Already divided by the sampling rate in spikesorting allSpkTimestamps = np.array(sp.timestamps)/SAMPLING_RATE #allSpkTimestamps = np.array(oneTT.dataTT.timestamps) spkClusters = sp.clusters figure() for ind, clusterNum in enumerate(clustersToPlot): clusterspikes = allSpkTimestamps[spkClusters==clusterNum] spkTimeStamps = clusterspikes (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange) subplot(len(clustersToPlot), 1, ind+1) plot(spikeTimesFromEventOnset, trialIndexForEachSpike, '.', ms=1) title('Cluster {}'.format(clusterNum)) axvline(x=0, ymin=0, ymax=1, color='r') xlabel('Time (sec)') #tight_layout() draw() show()
def tuning_curve(bdata, ephysData, gs): plt.subplot(gs[3, 0]) freqEachTrial = bdata['currentFreq'] intEachTrial = bdata['currentIntensity'] eventOnsetTimes = ephysData['events']['stimOn'] spikeTimeStamps = ephysData['spikeTimes'] timeRange = [0.0, 0.1] possiblefreqs = np.unique(freqEachTrial) freqLabels = [round(x/1000, 1) for x in possiblefreqs] possibleInts = np.unique(intEachTrial) intTrialsEachCond = behavioranalysis.find_trials_each_combination(freqEachTrial, possiblefreqs, intEachTrial, possibleInts) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, timeRange) for intInd, inten in enumerate(possibleInts): line = '-' if intInd == 0 and len(possibleInts) > 1: line = '--' curveName = str(inten) + ' dB' trialsEachCond = intTrialsEachCond[:,:,intInd] try: freq_avgs = spikesanalysis.avg_num_spikes_each_condition(trialsEachCond, indexLimitsEachTrial) except: behavIndexLimitsEachTrial = indexLimitsEachTrial[0:2,:-1] freq_avgs = spikesanalysis.avg_num_spikes_each_condition(trialsEachCond, behavIndexLimitsEachTrial) xpoints = [x for x in range(0, len(possiblefreqs))] xpointticks = [x for x in range(1, len(possiblefreqs), 2)] freqticks = [freqLabels[x] for x in range(1, len(freqLabels), 2)] #plt.semilogx(possiblefreqs, freq_avgs, linestyle = line, color = 'black', label = curveName) #plt.plot(log(possiblefreqs), freq_avgs, linestyle = line, color = 'black', label = curveName, xlabels = possiblefreqs) plt.plot(xpoints, freq_avgs, linestyle = line, color = 'black', marker = 'o', label = curveName) plt.xticks(xpointticks, freqticks) plt.hold(True) xlabel = 'Frequency (kHz)' ylabel = 'Firing Rate (spikes/s)' plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title('Frequency Tuning Curve') plt.legend(fontsize = 'x-small', loc = 'upper left') plt.hold(False)
def laser_response(eventOnsetTimes, spikeTimeStamps, timeRange=[0.0, 0.1], baseRange=[-0.2, -0.1]): fullTimeRange = [min(min(timeRange),min(baseRange)), max(max(timeRange),max(baseRange))] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimeStamps, eventOnsetTimes, fullTimeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseRange[1]-baseRange[0]) baselineSpikeRateSTD = np.std(baseSpikeCountMat)/(baseRange[1]-baseRange[0]) laserSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) laserSpikeRate = np.mean(laserSpikeCountMat)/(timeRange[1]-timeRange[0]) laserpVal = stats.ranksums(laserSpikeCountMat, baseSpikeCountMat)[1] laserstdFromBase = (laserSpikeRate - baselineSpikeRate)/baselineSpikeRateSTD return laserpVal, laserstdFromBase
def bandwidth_raster_inputs(eventOnsetTimes, spikeTimestamps, bandEachTrial, ampEachTrial, timeRange = [-0.2, 1.5]): numBands = np.unique(bandEachTrial) numAmps = np.unique(ampEachTrial) firstSortLabels = ['{}'.format(band) for band in numBands] trialsEachCond = behavioranalysis.find_trials_each_combination(bandEachTrial, numBands, ampEachTrial, numAmps) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) return spikeTimesFromEventOnset, indexLimitsEachTrial, trialsEachCond, firstSortLabels
def laser_response(ephysData, baseRange = [-0.05,-0.04], responseRange = [0.0, 0.01]): fullTimeRange = [baseRange[0], responseRange[1]] eventOnsetTimes = ephysData['events']['laserOn'] spikeTimestamps = ephysData['spikeTimes'] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps, eventOnsetTimes, fullTimeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) laserSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) [testStatistic, pVal] = stats.ranksums(laserSpikeCountMat, baseSpikeCountMat) return testStatistic, pVal
def plot_sorted_tuning_raster(self, session, tetrode, behavFileIdentifier, cluster = None, replace=0, timeRange = [-0.5, 1], ms = 1): ''' ''' bdata = self.get_session_behav_data(session,behavFileIdentifier) freqEachTrial = bdata['currentFreq'] possibleFreq = np.unique(freqEachTrial) intensityEachTrial = bdata['currentIntensity'] possibleIntensity = np.unique(intensityEachTrial) trialsEachCond = behavioranalysis.find_trials_each_combination(freqEachTrial, possibleFreq, intensityEachTrial, possibleIntensity) spikeData = self.get_session_spike_data_one_tetrode(session, tetrode) eventData = self.get_session_event_data(session) eventOnsetTimes = self.get_event_onset_times(eventData) spikeTimestamps=spikeData.timestamps spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimestamps,eventOnsetTimes,timeRange) freqLabels = ['{0:.1f}'.format(freq/1000.0) for freq in possibleFreq] plotTitle = self.get_session_plot_title(session) fig = plt.figure() for indIntensity, intensity in enumerate(possibleIntensity[::-1]): if indIntensity == 0: fig.add_subplot(len(possibleIntensity), 1, indIntensity+1) plt.title(plotTitle) else: fig.add_subplot(len(possibleIntensity), 1, indIntensity+1, sharex=fig.axes[0], sharey=fig.axes[0]) trialsThisIntensity = trialsEachCond[:, :, len(possibleIntensity)-indIntensity-1] #FIXME: There must be a better way to flip so high intensity is on top pRaster,hcond,zline = extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange, trialsEachCond = trialsThisIntensity, labels = freqLabels) plt.setp(pRaster,ms=ms) plt.ylabel('{:.0f} dB'.format(intensity)) if indIntensity == len(possibleIntensity)-1: plt.xlabel("time (sec)") fig.show()
def band_select_plot(spikeTimeStamps, eventOnsetTimes, amplitudes, bandwidths, timeRange, fullRange = [0.0, 2.0], title=None): numBands = np.unique(bandwidths) numAmps = np.unique(amplitudes) spikeArray = np.zeros((len(numBands), len(numAmps))) errorArray = np.zeros_like(spikeArray) trialsEachCond = behavioranalysis.find_trials_each_combination(bandwidths, numBands, amplitudes, numAmps) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, fullRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseTimeRange = [timeRange[1]+0.5, fullRange[1]] baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseTimeRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseTimeRange[1]-baseTimeRange[0]) plt.hold(True) for amp in range(len(numAmps)): trialsThisAmp = trialsEachCond[:,:,amp] for band in range(len(numBands)): trialsThisBand = trialsThisAmp[:,band] if spikeCountMat.shape[0] != len(trialsThisBand): spikeCountMat = spikeCountMat[:-1,:] print "FIXME: Using bad hack to make event onset times equal number of trials" thisBandCounts = spikeCountMat[trialsThisBand].flatten() spikeArray[band, amp] = np.mean(thisBandCounts) errorArray[band,amp] = stats.sem(thisBandCounts) xrange = range(len(numBands)) plt.plot(xrange, baselineSpikeRate*(timeRange[1]-timeRange[0])*np.ones(len(numBands)), color = '0.75', linewidth = 2) plt.plot(xrange, spikeArray[:,0].flatten(), '-o', color = '#4e9a06', linewidth = 3) plt.fill_between(xrange, spikeArray[:,0].flatten() - errorArray[:,0].flatten(), spikeArray[:,0].flatten() + errorArray[:,0].flatten(), alpha=0.2, edgecolor = '#8ae234', facecolor='#8ae234') plt.plot(range(len(numBands)), spikeArray[:,1].flatten(), '-o', color = '#5c3566', linewidth = 3) plt.fill_between(xrange, spikeArray[:,1].flatten() - errorArray[:,1].flatten(), spikeArray[:,1].flatten() + errorArray[:,1].flatten(), alpha=0.2, edgecolor = '#ad7fa8', facecolor='#ad7fa8') ax = plt.gca() ax.set_xticklabels(numBands) plt.xlabel('bandwidth (octaves)') plt.ylabel('Average num spikes') #patch1 = mpatches.Patch(color='#5c3566', label='0.8') #patch2 = mpatches.Patch(color='#4e9a06', label='0.2') #plt.legend(handles=[patch1, patch2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) if title: plt.title(title)
def laser_tuning_raster(bdata, ephysData, gs): freqEachTrial = bdata['currentFreq'] laserEachTrial = bdata['laserOn'] intEachTrial = bdata['currentIntensity'] eventOnsetTimes = ephysData['events']['stimOn'] spikeTimeStamps = ephysData['spikeTimes'] timeRange = [-0.3, 0.6] possiblefreqs = np.unique(freqEachTrial) freqLabels = [round(x/1000, 1) for x in possiblefreqs] possiblelaser = np.unique(laserEachTrial) possibleInts = np.unique(intEachTrial) #trialsEachCond = behavioranalysis.find_trials_each_type(freqEachTrial, possiblefreqs) laserTrialsEachCond = behavioranalysis.find_trials_each_combination(freqEachTrial, possiblefreqs, laserEachTrial, possiblelaser) intTrialsEachCond = behavioranalysis.find_trials_each_combination(freqEachTrial, possiblefreqs, intEachTrial, possibleInts) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, timeRange) #plt.figure() for intInd, inten in enumerate(possibleInts): for indLaser in possiblelaser: #plt.subplot(2, 1, indLaser) plt.subplot(gs[intInd+1, indLaser]) if indLaser == 0: title = "No Laser " + str(inten) + " dB" else: title = "Laser " + str(inten) + " dB" trialsEachCond = laserTrialsEachCond[:,:,indLaser] & intTrialsEachCond[:,:,intInd] pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset,indexLimitsEachTrial,timeRange, trialsEachCond=trialsEachCond, labels=freqLabels) xlabel = 'time (s)' ylabel = 'Frequency (kHz)' plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) '''
def stim_response(ephysData, baseRange = [-0.05,-0.04], responseRange = [0.0, 0.01], stimType = 'laser'): fullTimeRange = [baseRange[0], responseRange[1]] if stimType == 'laser': eventOnsetTimes = ephysData['events']['laserOn'] elif stimType == 'sound': eventOnsetTimes = get_sound_onset_times(ephysData, 'bandwidth') spikeTimestamps = ephysData['spikeTimes'] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps, eventOnsetTimes, fullTimeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) laserSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) [testStatistic, pVal] = stats.ranksums(laserSpikeCountMat, baseSpikeCountMat) return testStatistic, pVal
def AM_vector_strength(spikeTimeStamps, eventOnsetTimes, behavData, timeRange): currentFreq = behavData['currentFreq'] possibleFreq = np.unique(currentFreq) vs_array = array([]) timeRange = [0, 0.5] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) for freq in possibleFreq: select = flatnonzero(currentFreq == freq) selectspikes = spikeTimesFromEventOnset[np.in1d( trialIndexForEachSpike, select)] selectinds = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, select)] squeezedinds = array( [list(unique(selectinds)).index(x) for x in selectinds]) strength, phase = vectorstrength(selectspikes, 1.0 / freq) vs_array = np.concatenate((vs_array, array([strength]))) return vs_array
def sound_response_any_stimulus(eventOnsetTimes, spikeTimeStamps, trialsEachCond, timeRange=[0.0,1.0], baseRange=[-1.1,-0.1]): '''Determines if there is any combination of parameters that yields a change in firing rate. Inputs: eventOnsetTimes: array of timestamps indicating sound onsets spikeTimeStamps: array of timestamps indicating when spikes occured trialsEachCond: (N trials x N conditions) array indicating which condition occured for each trial. Currently only checks over one parameter used during session. timeRange: time range (relative to sound onset) to be used as response, list of [start time, end time] baseRange: time range (relative to sound onset) to be used as baseline, list of [start time, end time] Outputs: maxzscore: maximum U test statistic found after comparing response for each condition to baseline minpVal: minimum p value found after comparing response for each condition to baseline, NOT CORRECTED FOR MULTIPLE COMPARISONS ''' fullTimeRange = [min(min(timeRange),min(baseRange)), max(max(timeRange),max(baseRange))] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimeStamps, eventOnsetTimes, fullTimeRange) stimSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) minpVal = np.inf maxzscore = -np.inf for cond in range(trialsEachCond.shape[1]): trialsThisCond = trialsEachCond[:,cond] if stimSpikeCountMat.shape[0] == len(trialsThisCond)+1: stimSpikeCountMat = stimSpikeCountMat[:-1,:] if any(trialsThisCond): thisFirstStimCounts = stimSpikeCountMat[trialsThisCond].flatten() thisStimBaseSpikeCouns = baseSpikeCountMat[trialsThisCond].flatten() thiszscore, pValThisFirst = stats.ranksums(thisFirstStimCounts, thisStimBaseSpikeCouns) if pValThisFirst < minpVal: minpVal = pValThisFirst if thiszscore > maxzscore: maxzscore = thiszscore return maxzscore, minpVal
def plot_psth(spikeTimestamps, eventOnsetTimes, sortArray=[], timeRange=[-0.5,1], binsize = 50, lw=2, plotLegend=1, *args, **kwargs): ''' Function to accept spike timestamps, event onset times, and an optional sorting array and plot a PSTH (sorted if the sorting array is passed) This function does not replicate functionality. It allows you to pass spike timestamps and event onset times, which are simple to get. Args: binsize (float) = size of bins for PSTH in ms ''' binsize = binsize/1000.0 # If a sort array is supplied, find the trials that correspond to each value of the array if len(sortArray) > 0: trialsEachCond = behavioranalysis.find_trials_each_type( sortArray, np.unique(sortArray)) else: trialsEachCond = [] # Align spiketimestamps to the event onset times for plotting spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, [timeRange[0]-binsize, timeRange[1]]) binEdges = np.around(np.arange(timeRange[0]-binsize, timeRange[1]+2*binsize, binsize), decimals=2) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, binEdges) pPSTH = extraplots.plot_psth(spikeCountMat/binsize, 1, binEdges[:-1], trialsEachCond, *args, **kwargs) plt.setp(pPSTH, lw=lw) plt.hold(True) zline = plt.axvline(0,color='0.75',zorder=-10) plt.xlim(timeRange) if plotLegend: if len(sortArray)>0: sortElems = np.unique(sortArray) for ind, pln in enumerate(pPSTH): pln.set_label(sortElems[ind]) # ax = plt.gca() # plt.legend(mode='expand', ncol=3, loc='best') plt.legend(ncol=3, loc='best')
def onset_sustained_spike_proportion(spikeTimeStamps, eventOnsetTimes, onsetTimeRange=[0.0,0.05], sustainedTimeRange=[0.2,1.0]): '''Calculates proportion of spikes that occur at sound onset. Averaged across all bandwidths. Inputs: spikeTimeStamps: array of timestamps indicating when spikes occurred eventOnsetTimes: array of timestamps indicating sound onsets onsetTimeRange: time range (relative to sound onset) to be used for onset response, list of [start time, end time] sustainedTimeRange: time range (relative to sound onset) to be used for sustained response, list of [start time, end time] Outputs: propOnset: float, proportion of cell's total spikes that occur within indicated onset time range propSustained: float, proportion of cell's total spikes that occur within indicated sustained time range ''' fullTimeRange = [onsetTimeRange[0], sustainedTimeRange[1]] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(spikeTimeStamps, eventOnsetTimes, fullTimeRange) onsetSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, onsetTimeRange) sustainedSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, sustainedTimeRange) fullSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, fullTimeRange) propOnset = 1.0*sum(onsetSpikeCountMat)/sum(fullSpikeCountMat) propSustained = 1.0*sum(sustainedSpikeCountMat)/sum(fullSpikeCountMat) return propOnset, propSustained
def AM_vector_strength(spikeTimestamps, eventOnsetTimes, behavData, timeRange, ignoreBefore=0.03): currentFreq = behavData['currentFreq'] possibleFreq = np.unique(currentFreq) vs_array = np.array([]) ral_array = np.array([]) pval_array = np.array([]) timeRange = [0, 0.5] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) for freq in possibleFreq: select = np.flatnonzero(currentFreq == freq) selectspikes = spikeTimesFromEventOnset[np.in1d( trialIndexForEachSpike, select)] selectinds = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, select)] squeezedinds = np.array( [list(np.unique(selectinds)).index(x) for x in selectinds]) spikesAfterFirstCycle = selectspikes[selectspikes > ignoreBefore] indsAfterFirstCycle = selectinds[selectspikes > ignoreBefore] strength, phase = vectorstrength(spikesAfterFirstCycle, 1.0 / freq) vs_array = np.concatenate((vs_array, np.array([strength]))) #Compute the pval for the vector strength radsPerSec = freq * 2 * np.pi spikeRads = (spikesAfterFirstCycle * radsPerSec) % (2 * np.pi) ral_test = circstats.rayleigh_test(spikeRads) pval = np.array([ral_test['pvalue']]) ral = np.array([2 * len(spikesAfterFirstCycle) * (strength**2)]) pval_array = np.concatenate((pval_array, pval)) ral_array = np.concatenate((ral_array, ral)) return vs_array, pval_array, ral_array
def freq_tuning_fit(eventOnsetTimes, spikeTimestamps, bdata, timeRange=[-0.2, 0.2], intensityInds=None): # determine the best frequency of the cell by fitting gaussian curve to tuning data gaussFits = [] bestFreqs = [] Rsquareds = [] freqEachTrial = bdata['currentFreq'] intensityEachTrial = bdata['currentIntensity'] numIntensities = np.unique(intensityEachTrial) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) trialsEachInt = behavioranalysis.find_trials_each_type( intensityEachTrial, numIntensities) if intensityInds is None: intensityInds = range(len(numIntensities)) spikeCountMat, window = best_window_freq_tuning(spikeTimesFromEventOnset, indexLimitsEachTrial, freqEachTrial) for intensityInd in intensityInds: trialsThisIntensity = trialsEachInt[:, intensityInd] tuningSpikeRates = (spikeCountMat[trialsThisIntensity].flatten()) / ( window[1] - window[0]) freqsThisIntensity = freqEachTrial[trialsThisIntensity] gaussFit, Rsquared = response_curve_fit(np.log2(freqsThisIntensity), tuningSpikeRates) bestFreq = 2**gaussFit[0] if gaussFit is not None else None gaussFits.append(gaussFit) bestFreqs.append(bestFreq) Rsquareds.append(Rsquared) return gaussFits, bestFreqs, Rsquareds, window
def inactivated_cells_baselines(spikeTimeStamps, eventOnsetTimes, laserEachTrial, baselineRange=[-0.05, 0.0]): '''For cells recorded during inhibitory cell inactivation, calculates baseline firing rate with and without laser. Inputs: spikeTimeStamps: array of timestamps indicating when spikes occurred eventOnsetTimes: array of timestamps indicating sound onsets firstSort: array of length N trials indicating whether laser was presented for each trial baselineRange: time range (relative to sound onset) to be used as baseline, list of [start time, end time] Outputs: baselineSpikeRates: array of length (N laser trial types), indicating baseline firing rate for each type of laser trial baselineSEMs: like baselineSpikeRates, gives s.e.m. for each value in baselineSpikeRates ''' bandSpikeTimesFromEventOnset, trialIndexForEachSpike, bandIndexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, baselineRange) numLaser = np.unique(laserEachTrial) baselineDuration = baselineRange[1]-baselineRange[0] baselineSpikeRates = np.zeros(len(numLaser)) baselineSEMs = np.zeros_like(baselineSpikeRates) trialsEachLaser = behavioranalysis.find_trials_each_type(laserEachTrial, numLaser) baselineSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(bandSpikeTimesFromEventOnset, bandIndexLimitsEachTrial, baselineRange) for las in range(len(numLaser)): trialsThisLaser = trialsEachLaser[:,las] baselineCounts = baselineSpikeCountMat[trialsThisLaser].flatten() baselineMean = np.mean(baselineCounts)/baselineDuration baselineSEM = stats.sem(baselineCounts)/baselineDuration baselineSpikeRates[las] = baselineMean baselineSEMs[las] = baselineSEM return baselineSpikeRates, baselineSEMs
def plot_switching_raster(oneCell, freqToPlot='middle', alignment='sound', timeRange=[-0.5, 1], byBlock=True): ''' Plots raster for 2afc switching task with different alignment at time 0 and different choices for what frequencies to include in the plot. Arguments: oneCell is a CellInfo object. freqToPlot is a string; 'middle' means plotting only the middle frequency, 'all' means plotting all three frequenciens. alignment should be a string with possible values: 'sound', 'center-out','side-in'. timeRange is a list of two floats, indicating the start and end of plot range. byBlock is a boolean, indicates whether to split the plot into behavior blocks. ''' # -- calls load_remote_2afc_data(oneCell) to get the data, then plot raster -- # (eventData, spikeData, oldBdata) = load_remote_2afc_data(oneCell) spikeTimestamps = spikeData.timestamps # -- Get trialsEachCond and colorEachCond for plotting -- # (bdata, trialsEachCond, colorEachCond) = get_trials_each_cond_switching( oneCell=oneCell, freqToPlot=freqToPlot, byBlock=byBlock) #bdata generated here removed missing trials # -- Calculate eventOnsetTimes based on alignment parameter -- # eventOnsetTimes = np.array(eventData.timestamps) if alignment == 'sound': soundOnsetEvents = (eventData.eventID == 1) & (eventData.eventChannel == soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] elif alignment == 'center-out': soundOnsetEvents = (eventData.eventID == 1) & (eventData.eventChannel == soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] diffTimes = bdata['timeCenterOut'] - bdata['timeTarget'] EventOnsetTimes += diffTimes elif alignment == 'side-in': soundOnsetEvents = (eventData.eventID == 1) & (eventData.eventChannel == soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] diffTimes = bdata['timeSideIn'] - bdata['timeTarget'] EventOnsetTimes += diffTimes # -- Calculate matrix of spikes per trial for plotting -- # (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps,EventOnsetTimes,timeRange) # -- Plot raster -- # #plt.figure() extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange, trialsEachCond=trialsEachCond, colorEachCond=colorEachCond, fillWidth=None, labels=None) #plt.ylim() plt.xlabel('Time from {} (s)'.format(alignment), fontsize=18) plt.ylabel('Trials', fontsize=18) plt.xlim(timeRange[0] + 0.1, timeRange[1]) plt.title('{0}_{1}_T{2}c{3}_{4}_{5} frequency'.format( oneCell.animalName, oneCell.behavSession, oneCell.tetrode, oneCell.cluster, alignment, freqToPlot)) #plt.fill([0,0.1,0.1,0],[plt.ylim()[0],plt.ylim()[0],plt.ylim()[1],plt.ylim()[1]],color='0.75') extraplots.set_ticks_fontsize(plt.gca(), fontSize=16)
def plot_tuning_PSTH_one_intensity(oneCell, intensity=50.0, timeRange=[-0.5, 1], binWidth=0.010, halfFreqs=True): #calls load_remote_tuning_data(oneCell) to get the data, then plot raster eventOnsetTimes, spikeTimestamps, bdata = load_remote_tuning_data( oneCell, BEHAVDIR_MOUNTED, EPHYSDIR_MOUNTED) freqEachTrial = bdata['currentFreq'] intensityEachTrial = bdata['currentIntensity'] possibleIntensity = np.unique(intensityEachTrial) if len(possibleIntensity) != 1: intensity = intensity #50dB is the stimulus intensity used in 2afc task ###Just select the trials with a given intensity### trialsThisIntensity = [intensityEachTrial == intensity] freqEachTrial = freqEachTrial[trialsThisIntensity] intensityEachTrial = intensityEachTrial[trialsThisIntensity] eventOnsetTimes = eventOnsetTimes[trialsThisIntensity] possibleFreq = np.unique(freqEachTrial) if halfFreqs: possibleFreq = possibleFreq[ 1::3] #slice to get every other frequence presented numFreqs = len(possibleFreq) #print len(intensityEachTrial),len(eventOnsetTimes),len(spikeTimestamps) trialsEachFreq = behavioranalysis.find_trials_each_type( freqEachTrial, possibleFreq) #pdb.set_trace() #for debug #colormap = plt.cm.gist_ncar #colorEachFreq = [colormap(i) for i in np.linspace(0, 0.9, numFreqs)] #from jaratoolbox.colorpalette import TangoPalette as Tango #colorEachFreq = [Tango['Aluminium3'], Tango['Orange2'],Tango['Chameleon1'],Tango['Plum1'],Tango['Chocolate1'],Tango['SkyBlue2'],Tango['ScarletRed1'],'k'] #Creat colorEachCond from color map from matplotlib import cm cm_subsection = np.linspace(0.0, 1.0, numFreqs) colorEachFreq = [cm.winter(x) for x in cm_subsection] #Create legend import matplotlib.patches as mpatches handles = [] for (freq, color) in zip(possibleFreq, colorEachFreq): patch = mpatches.Patch(color=color, label=str(freq) + ' Hz') handles.append(patch) (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps,eventOnsetTimes,timeRange) timeVec = np.arange(timeRange[0], timeRange[-1], binWidth) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, timeVec) smoothWinSize = 3 #plt.figure() extraplots.plot_psth(spikeCountMat / binWidth, smoothWinSize, timeVec, trialsEachCond=trialsEachFreq, colorEachCond=colorEachFreq, linestyle=None, linewidth=2, downsamplefactor=1) extraplots.set_ticks_fontsize(ax=plt.gca(), fontSize=14) plt.xlim(timeRange[0] + 0.1, timeRange[1]) plt.legend(handles=handles, loc=2) plt.xlabel('Time from sound onset (s)', fontsize=18) plt.ylabel('Firing rate (spk/sec)', fontsize=18)
def bandwidth_suppression_from_peak(spikeTimeStamps, eventOnsetTimes, firstSort, secondSort, timeRange=[0.2, 1.0], baseRange=[-1.0, -0.2], subtractBaseline=False, zeroBWBaseline=True): '''Calculates suppression stats from raw data (no model). Inputs: spikeTimeStamps: array of timestamps indicating when spikes occurred eventOnsetTimes: array of timestamps indicating sound onsets firstSort: array of length N trials indicating value of first parameter for each trial (ex. bandwidths) secondSort: array of length N trials indicating value of second parameter for each trial. Second parameter should be manipulation being done (ex. laser), as it is used to calculate separate indices and baselines. timeRange: time period over which to calculate cell responses subtractBaseline: boolean, whether baseline firing rate should be subtracted from responses when calculating stats Output: suppressionIndex: suppression index for cell for each condition (e.g. for each amplitude, for each laser trial type) suppressionpVal: p value for each value in suppressionIndex facilitationIndex: facilitation index for cell for each condition facilitationpVal: p value for each value in facilitationIndex peakInd: index of responseArray containing largest firing rate (to calculate preferred bandwidth) spikeArray: array of size N condition 1 x N condition 2, average spike rates for each condition used to calculate suppression stats ''' fullTimeRange = [baseRange[0], timeRange[1]] trialsEachCond = behavioranalysis.find_trials_each_combination( firstSort, np.unique(firstSort), secondSort, np.unique(secondSort)) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, fullTimeRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) trialsEachSecondSort = behavioranalysis.find_trials_each_type( secondSort, np.unique(secondSort)) spikeArray, errorArray = calculate_tuning_curve_inputs( spikeCountMat, firstSort, secondSort) spikeArray = spikeArray / (timeRange[1] - timeRange[0] ) #convert spike counts to firing rate suppressionIndex = np.zeros(spikeArray.shape[1]) facilitationIndex = np.zeros_like(suppressionIndex) peakInds = np.zeros_like(suppressionIndex) suppressionpVal = np.zeros_like(suppressionIndex) facilitationpVal = np.zeros_like(suppressionIndex) for ind in range(len(suppressionIndex)): trialsThisSecondVal = trialsEachSecondSort[:, ind] thisCondResponse = spikeArray[:, ind] thisCondBaseline = np.mean( baseSpikeCountMat[trialsThisSecondVal].flatten()) / (baseRange[1] - baseRange[0]) if zeroBWBaseline: thisCondResponse[0] = thisCondBaseline if not subtractBaseline: thisCondBaseline = 0 spikeArray[:, ind] = thisCondResponse suppressionIndex[ind] = (max(thisCondResponse) - thisCondResponse[-1] ) / (max(thisCondResponse) - thisCondBaseline) facilitationIndex[ind] = (max(thisCondResponse) - thisCondResponse[0]) / ( max(thisCondResponse) - thisCondBaseline) peakInd = np.argmax(thisCondResponse) peakInds[ind] = peakInd fullTrialsThisSecondVal = trialsEachCond[:, :, ind] if zeroBWBaseline: if peakInd == 0: peakSpikeCounts = baseSpikeCountMat[ trialsThisSecondVal].flatten() else: peakSpikeCounts = spikeCountMat[ fullTrialsThisSecondVal[:, peakInd]].flatten() zeroBWSpikeCounts = baseSpikeCountMat[trialsThisSecondVal].flatten( ) else: peakSpikeCounts = spikeCountMat[ fullTrialsThisSecondVal[:, peakInd]].flatten() zeroBWSpikeCounts = spikeCountMat[ fullTrialsThisSecondVal[:, 0]].flatten() whiteNoiseSpikeCounts = spikeCountMat[ fullTrialsThisSecondVal[:, -1]].flatten() suppressionpVal[ind] = stats.ranksums(peakSpikeCounts, whiteNoiseSpikeCounts)[1] facilitationpVal[ind] = stats.ranksums(peakSpikeCounts, zeroBWSpikeCounts)[1] return suppressionIndex, suppressionpVal, facilitationIndex, facilitationpVal, peakInds, spikeArray
outputDir = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME, 'reports') # -- Load the database of cells -- celldb = celldatabase.load_hdf(dbFilename) number_of_clusters = len(celldb) - 1 for indRow, dbRow in celldb[266:267].iterrows(): oneCell = ephyscore.Cell(dbRow) timeRange = [-0.3, 0.8] # In seconds ephysDataTuning, bdataTuning = oneCell.load('tc') spikeTimes = ephysDataTuning['spikeTimes'] eventOnsetTimes = ephysDataTuning['events']['stimOn'] (spikeTimesFromEventOnsetTuning, trialIndexForEachSpikeTuning, indexLimitsEachTrialTuning) = spikesanalysis.eventlocked_spiketimes( spikeTimes, eventOnsetTimes, timeRange) frequenciesEachTrialTuning = bdataTuning['currentFreq'] numberOfTrialsTuning = len(frequenciesEachTrialTuning) arrayOfFrequenciesTuning = np.unique(bdataTuning['currentFreq']) arrayOfFrequenciesTuningkHz = arrayOfFrequenciesTuning / 1000 labelsForYaxis = [ '%.1f' % f for f in arrayOfFrequenciesTuningkHz ] # Generating a label of the behavior data for the y-axis trialsEachCondTuning = behavioranalysis.find_trials_each_type( frequenciesEachTrialTuning, arrayOfFrequenciesTuning) extraplots.raster_plot(spikeTimesFromEventOnsetTuning, indexLimitsEachTrialTuning, timeRange,
plt.show() aboveBaseline = [] popts = [] for indinten, inten in enumerate(possibleIntensity): spks = np.array([]) inds = np.array([]) base = np.array([]) for indfreq, freq in enumerate(possibleFreq): selectinds = np.flatnonzero((freqEachTrial==freq)&(intensityEachTrial==inten)) selectedOnsetTimes = eventOnsetTimes[selectinds] (spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(clusterSpikeTimes, selectedOnsetTimes, alignmentRange) nspkBase = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) nspkResp = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) base = np.concatenate([base, nspkBase.ravel()]) spks = np.concatenate([spks, nspkResp.ravel()]) inds = np.concatenate([inds, np.ones(len(nspkResp.ravel()))*indfreq]) plt.subplot(212) plt.plot(inds, spks, 'k.')
def evaluate_movement_selectivity_celldb(cellDb, movementTimeRange, removeSideIn=False): ''' Analyse 2afc data: calculate movement selectivity index and significance; movementTimeRange should be a list (in seconds, time from center-out). ''' print 'Calculating movement selectivity index for 2afc session in time range: ' + str( movementTimeRange) movementModI = pd.Series(np.zeros(len(cellDb)), index=cellDb.index) #default value 0 movementModS = pd.Series(np.ones(len(cellDb)), index=cellDb.index) #default value 1 for indCell, cell in cellDb.iterrows(): cellObj = ephyscore.Cell(cell) sessiontype = 'behavior' #2afc behavior #ephysData, bata = cellObj.load(sessiontype) sessionInd = cellObj.get_session_inds(sessiontype)[0] bdata = cellObj.load_behavior_by_index(sessionInd) possibleFreq = np.unique(bdata['targetFrequency']) numFreqs = len(possibleFreq) try: ephysData = cellObj.load_ephys_by_index(sessionInd) except ValueError: continue eventsDict = ephysData['events'] spikeTimestamps = ephysData['spikeTimes'] if spikeTimestamps.ndim == 0: #There is only one spike, ! spikesanalysis.eventlocked_spiketimes cannot handle only one spike ! continue soundOnsetTimes = eventsDict['{}On'.format(soundChannelType)] soundOnsetTimeBehav = bdata['timeTarget'] # -- Check to see if ephys and behav recordings have same number of trials, remove missing trials from behav file -- # # Find missing trials missingTrials = behavioranalysis.find_missing_trials( soundOnsetTimes, soundOnsetTimeBehav) # Remove missing trials bdata.remove_trials(missingTrials) if len(soundOnsetTimes) != len( bdata['timeTarget'] ): #some error not handled by remove missing trials continue # -- calculate movement selectivity -- # rightward = bdata['choice'] == bdata.labels['choice']['right'] leftward = bdata['choice'] == bdata.labels['choice']['left'] diffTimes = bdata['timeCenterOut'] - bdata['timeTarget'] movementOnsetTimes = soundOnsetTimes + diffTimes responseTimesEachTrial = bdata['timeSideIn'] - bdata['timeCenterOut'] responseTimesEachTrial[np.isnan(responseTimesEachTrial)] = 0 sideInTrials = (responseTimesEachTrial <= movementTimeRange[-1]) if removeSideIn: trialsToUseRight = rightward & (~sideInTrials) trialsToUseLeft = leftward & (~sideInTrials) else: trialsToUseRight = rightward trialsToUseLeft = leftward trialsEachCond = [trialsToUseLeft, trialsToUseRight] (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps,movementOnsetTimes,timeRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, movementTimeRange) spikeCountEachTrial = spikeCountMat.flatten() spikeAvgLeftward = np.mean(spikeCountEachTrial[leftward]) spikeAvgRightward = np.mean(spikeCountEachTrial[rightward]) if ((spikeAvgRightward + spikeAvgLeftward) == 0): movementModI[indCell] = 0.0 movementModS[indCell] = 1.0 else: movementModI[indCell] = ((spikeAvgRightward - spikeAvgLeftward) / (spikeAvgRightward + spikeAvgLeftward)) movementModS[indCell] = spikesanalysis.evaluate_modulation( spikeTimesFromEventOnset, indexLimitsEachTrial, movementTimeRange, trialsEachCond)[1] return movementModI, movementModS
missingTrials = behavioranalysis.find_missing_trials( soundOnsetTimeEphys, soundOnsetTimeBehav) # Remove missing trials bdata.remove_trials(missingTrials) # -- Calculate and store intermediate data for tuning raster -- # freqEachTrial = bdata['targetFrequency'] valid = bdata['valid'].astype('bool') possibleFreq = np.unique(freqEachTrial) trialsEachFreq = behavioranalysis.find_trials_each_type( freqEachTrial, possibleFreq) trialsEachFreq = trialsEachFreq & valid[:, np. newaxis] #Use only valid trials where sound is played in full (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps,soundOnsetTimeEphys,timeRange) ### Save raster data ### #outputDir = os.path.join(settings.FIGURESDATA, figparams.STUDY_NAME) outputFile = 'example_freq_tuning_2afc_raster_{}_{}_T{}_c{}.npz'.format( oneCell.animalName, oneCell.behavSession, oneCell.tetrode, oneCell.cluster) outputFullPath = os.path.join(dataDir, outputFile) np.savez(outputFullPath, spikeTimestamps=spikeTimestamps, eventOnsetTimes=eventOnsetTimes, possibleFreq=possibleFreq, spikeTimesFromEventOnset=spikeTimesFromEventOnset, movementTimesFromEventOnset=diffTimes, indexLimitsEachTrial=indexLimitsEachTrial, timeRange=timeRange,
def plot_rew_change_per_cell(oneCell,trialLimit=[],alignment='sound'): ''' Plots raster and PSTH for one cell during reward_change_freq_dis task, split by block; alignment parameter should be set to either 'sound', 'center-out', or 'side-in'. ''' bdata = load_behav_per_cell(oneCell) (spikeTimestamps,waveforms,eventOnsetTimes,eventData) = load_ephys_per_cell(oneCell) # -- Check to see if ephys has skipped trials, if so remove trials from behav data soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) soundOnsetTimeEphys = eventOnsetTimes[soundOnsetEvents] soundOnsetTimeBehav = bdata['timeTarget'] # Find missing trials missingTrials = behavioranalysis.find_missing_trials(soundOnsetTimeEphys,soundOnsetTimeBehav) # Remove missing trials bdata.remove_trials(missingTrials) currentBlock = bdata['currentBlock'] blockTypes = [bdata.labels['currentBlock']['same_reward'],bdata.labels['currentBlock']['more_left'],bdata.labels['currentBlock']['more_right']] #blockLabels = ['more_left', 'more_right'] if(not len(trialLimit)): validTrials = np.ones(len(currentBlock),dtype=bool) else: validTrials = np.zeros(len(currentBlock),dtype=bool) validTrials[trialLimit[0]:trialLimit[1]] = 1 trialsEachType = behavioranalysis.find_trials_each_type(currentBlock,blockTypes) if alignment == 'sound': soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] elif alignment == 'center-out': soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] diffTimes=bdata['timeCenterOut']-bdata['timeTarget'] EventOnsetTimes+=diffTimes elif alignment == 'side-in': soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] diffTimes=bdata['timeSideIn']-bdata['timeTarget'] EventOnsetTimes+=diffTimes freqEachTrial = bdata['targetFrequency'] possibleFreq = np.unique(freqEachTrial) rightward = bdata['choice']==bdata.labels['choice']['right'] leftward = bdata['choice']==bdata.labels['choice']['left'] invalid = bdata['outcome']==bdata.labels['outcome']['invalid'] correct = bdata['outcome']==bdata.labels['outcome']['correct'] incorrect = bdata['outcome']==bdata.labels['outcome']['error'] ######Split left and right trials into correct and incorrect categories to look at error trials######### rightcorrect = rightward&correct&validTrials leftcorrect = leftward&correct&validTrials #righterror = rightward&incorrect&validTrials #lefterror = leftward&incorrect&validTrials rightcorrectBlockSameReward = rightcorrect&trialsEachType[:,0] rightcorrectBlockMoreLeft = rightcorrect&trialsEachType[:,1] rightcorrectBlockMoreRight = rightcorrect&trialsEachType[:,2] leftcorrectBlockSameReward = leftcorrect&trialsEachType[:,0] leftcorrectBlockMoreLeft = leftcorrect&trialsEachType[:,1] leftcorrectBlockMoreRight = leftcorrect&trialsEachType[:,2] trialsEachCond = np.c_[leftcorrectBlockMoreLeft,rightcorrectBlockMoreLeft,leftcorrectBlockMoreRight,rightcorrectBlockMoreRight,leftcorrectBlockSameReward,rightcorrectBlockSameReward] colorEachCond = ['g','r','m','b','y','darkgray'] #trialsEachCond = np.c_[invalid,leftcorrect,rightcorrect,lefterror,righterror] #colorEachCond = ['0.75','g','r','b','m'] (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps,EventOnsetTimes,timeRange) ###########Plot raster and PSTH################# plt.figure() ax1 = plt.subplot2grid((8,5), (0, 0), rowspan=4,colspan=5) extraplots.raster_plot(spikeTimesFromEventOnset,indexLimitsEachTrial,timeRange,trialsEachCond=trialsEachCond,colorEachCond=colorEachCond,fillWidth=None,labels=None) plt.ylabel('Trials') plt.xlim(timeRange) plt.title('{0}_{1}_TT{2}_c{3}_{4}'.format(oneCell.animalName,oneCell.behavSession,oneCell.tetrode,oneCell.cluster,alignment)) timeVec = np.arange(timeRange[0],timeRange[-1],binWidth) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,indexLimitsEachTrial,timeVec) smoothWinSize = 3 ax2 = plt.subplot2grid((8,5), (4, 0),colspan=5,rowspan=2,sharex=ax1) extraplots.plot_psth(spikeCountMat/binWidth,smoothWinSize,timeVec,trialsEachCond=trialsEachCond,colorEachCond=colorEachCond,linestyle=None,linewidth=3,downsamplefactor=1) plt.xlabel('Time from {0} onset (s)'.format(alignment)) plt.ylabel('Firing rate (spk/sec)') # -- Plot ISI histogram -- plt.subplot2grid((8,5), (6,0), rowspan=1, colspan=2) spikesorting.plot_isi_loghist(spikeTimestamps) plt.ylabel('c%d'%oneCell.cluster,rotation=0,va='center',ha='center') plt.xlabel('') # -- Plot waveforms -- plt.subplot2grid((8,5), (7,0), rowspan=1, colspan=3) spikesorting.plot_waveforms(waveforms) # -- Plot projections -- plt.subplot2grid((8,5), (6,2), rowspan=1, colspan=3) spikesorting.plot_projections(waveforms) # -- Plot events in time -- plt.subplot2grid((8,5), (7,3), rowspan=1, colspan=2) spikesorting.plot_events_in_time(spikeTimestamps) plt.subplots_adjust(wspace = 0.7) #plt.show() #fig_path = #fig_name = 'TT{0}Cluster{1}{2}.png'.format(tetrode, cluster, '_2afc plot_each_type') #full_fig_path = os.path.join(fig_path, fig_name) #print full_fig_path plt.gcf().set_size_inches((8.5,11))
def main(): global behavSession global subject global tetrode global cluster global tuningBehavior #behavior file name of tuning curve global tuningEphys #ephys session name of tuning curve global bdata global eventOnsetTimes global spikeTimesFromEventOnset global indexLimitsEachTrial global spikeTimesFromMovementOnset global indexLimitsEachMovementTrial global titleText print "switch_tuning_block_allfreq_report" for cellID in range(0, numOfCells): oneCell = allcells.cellDB[cellID] try: if (behavSession != oneCell.behavSession): subject = oneCell.animalName behavSession = oneCell.behavSession ephysSession = oneCell.ephysSession tuningSession = oneCell.tuningSession ephysRoot = os.path.join(ephysRootDir, subject) tuningBehavior = oneCell.tuningBehavior tuningEphys = oneCell.tuningSession print behavSession # -- Load Behavior Data -- behaviorFilename = loadbehavior.path_to_behavior_data( subject=subject, paradigm=paradigm, sessionstr=behavSession) bdata = loadbehavior.FlexCategBehaviorData(behaviorFilename) #bdata = loadbehavior.BehaviorData(behaviorFilename) numberOfTrials = len(bdata['choice']) # -- Load event data and convert event timestamps to ms -- ephysDir = os.path.join(ephysRoot, ephysSession) eventFilename = os.path.join(ephysDir, 'all_channels.events') events = loadopenephys.Events( eventFilename) # Load events data eventTimes = np.array( events.timestamps ) / SAMPLING_RATE #get array of timestamps for each event and convert to seconds by dividing by sampling rate (Hz). matches with eventID and soundOnsetEvents = (events.eventID == 1) & ( events.eventChannel == soundTriggerChannel) eventOnsetTimes = eventTimes[soundOnsetEvents] soundOnsetTimeBehav = bdata['timeTarget'] # Find missing trials missingTrials = behavioranalysis.find_missing_trials( eventOnsetTimes, soundOnsetTimeBehav) # Remove missing trials bdata.remove_trials(missingTrials) bdata.find_trials_each_block() ############################################################################################### centerOutTimes = bdata[ 'timeCenterOut'] #This is the times that the mouse goes out of the center port soundStartTimes = bdata[ 'timeTarget'] #This gives an array with the times in seconds from the start of the behavior paradigm of when the sound was presented for each trial timeDiff = centerOutTimes - soundStartTimes if (len(eventOnsetTimes) < len(timeDiff)): timeDiff = timeDiff[:-1] eventOnsetTimesCenter = eventOnsetTimes + timeDiff elif (len(eventOnsetTimes) > len(timeDiff)): eventOnsetTimesCenter = eventOnsetTimes[:-1] + timeDiff else: eventOnsetTimesCenter = eventOnsetTimes + timeDiff ############################################################################################### tetrode = oneCell.tetrode cluster = oneCell.cluster # -- Load Spike Data From Certain Cluster -- spkData = ephyscore.CellData(oneCell) spkTimeStamps = spkData.spikes.timestamps (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange) (spikeTimesFromMovementOnset,movementTrialIndexForEachSpike,indexLimitsEachMovementTrial) = \ spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimesCenter,timeRange) plt.clf() if (len(spkTimeStamps) > 0): ax1 = plt.subplot2grid((numRows, numCols), ((numRows - sizeClusterPlot), 0), colspan=(numCols / 3)) spikesorting.plot_isi_loghist(spkData.spikes.timestamps) ax3 = plt.subplot2grid( (numRows, numCols), ((numRows - sizeClusterPlot), (numCols / 3) * 2), colspan=(numCols / 3)) spikesorting.plot_events_in_time(spkTimeStamps) samples = spkData.spikes.samples.astype(float) - 2**15 samples = (1000.0 / spkData.spikes.gain[0, 0]) * samples ax2 = plt.subplot2grid( (numRows, numCols), ((numRows - sizeClusterPlot), (numCols / 3)), colspan=(numCols / 3)) spikesorting.plot_waveforms(samples) ############################################################################### ax4 = plt.subplot2grid((numRows, numCols), (0, 0), colspan=(numCols / 2), rowspan=3 * sizeRasters) #plt.setp(ax4.get_xticklabels(), visible=False) #fig.axes.get_xaxis().set_visible(False) raster_tuning(ax4) axvline(x=0, ymin=0, ymax=1, color='r') plt.gca().set_xlim(tuning_timeRange) ax6 = plt.subplot2grid((numRows, numCols), (0, (numCols / 2)), colspan=(numCols / 2), rowspan=sizeRasters) plt.setp(ax6.get_xticklabels(), visible=False) plt.setp(ax6.get_yticklabels(), visible=False) raster_sound_block_switching() plt.title( 'sound aligned, Top: middle freq in blocks, Bottom: all freqs') ax7 = plt.subplot2grid((numRows, numCols), (sizeRasters, (numCols / 2)), colspan=(numCols / 2), rowspan=sizeHists, sharex=ax6) hist_sound_block_switching(ax7) #plt.setp(ax7.get_yticklabels(), visible=False) ax7.yaxis.tick_right() ax7.yaxis.set_ticks_position('both') plt.setp(ax7.get_xticklabels(), visible=False) plt.gca().set_xlim(timeRange) ax10 = plt.subplot2grid((numRows, numCols), ((sizeRasters + sizeHists), (numCols / 2)), colspan=(numCols / 2), rowspan=sizeRasters) plt.setp(ax10.get_xticklabels(), visible=False) plt.setp(ax10.get_yticklabels(), visible=False) raster_sound_allFreq_switching() ax11 = plt.subplot2grid( (numRows, numCols), ((2 * sizeRasters + sizeHists), (numCols / 2)), colspan=(numCols / 2), rowspan=sizeHists, sharex=ax10) hist_sound_allFreq_switching(ax11) ax11.yaxis.tick_right() ax11.yaxis.set_ticks_position('both') ax11.set_xlabel('Time (sec)') #plt.setp(ax11.get_yticklabels(), visible=False) plt.gca().set_xlim(timeRange) ############################################################################### #plt.tight_layout() modulation_index_switching() plt.suptitle(titleText) tetrodeClusterName = 'T' + str(oneCell.tetrode) + 'c' + str( oneCell.cluster) plt.gcf().set_size_inches((8.5, 11)) figformat = 'png' #'png' #'pdf' #'svg' filename = reportname + '_%s_%s_%s.%s' % ( subject, behavSession, tetrodeClusterName, figformat) fulloutputDir = outputDir + subject + '/' fullFileName = os.path.join(fulloutputDir, filename) directory = os.path.dirname(fulloutputDir) if not os.path.exists(directory): #makes sure output folder exists os.makedirs(directory) #print 'saving figure to %s'%fullFileName plt.gcf().savefig(fullFileName, format=figformat) except: if (oneCell.behavSession not in badSessionList): badSessionList.append(oneCell.behavSession) print 'error with sessions: ' for badSes in badSessionList: print badSes
baseRange = [-0.1, 0] responseRange = [0, 0.1] alignmentRange = [baseRange[0], responseRange[1]] response = np.empty((len(possibleIntensity), len(possibleFreq))) base = np.empty((len(possibleIntensity), len(possibleFreq))) for indfreq, freq in enumerate(possibleFreq): for indinten, inten in enumerate(possibleIntensity): selectinds = np.flatnonzero((freqEachTrial == freq) & (intensityEachTrial == inten)) selectedOnsetTimes = eventOnsetTimes[selectinds] (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(clusterSpikeTimes,selectedOnsetTimes,alignmentRange) nspkBase = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) nspkResp = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) response[indinten, indfreq] = nspkResp.mean() base[indinten, indfreq] = nspkBase.mean() fig = plt.figure() ax = fig.gca(projection='3d') X, Y = np.meshgrid(np.arange(len(possibleFreq)), np.arange(len(possibleIntensity))) Z = response
oneCell = ephyscore.Cell(dbRow) numOfCellsOddballParadigm = numOfCellsOddballParadigm + 1 if oneCell.get_session_inds('standard') != []: try: ephysDataStd, bdataStd = oneCell.load('standard') except ValueError as verror: continue spikeTimesStd = ephysDataStd['spikeTimes'] eventOnsetTimesStd = ephysDataStd['events']['stimOn'] if len(eventOnsetTimesStd) == len(bdataStd['currentFreq']) + 1: eventOnsetTimesStd = eventOnsetTimesStd[:-1] (spikeTimesFromEventOnsetStd,trialIndexForEachSpikeStd,indexLimitsEachTrialStd) = \ spikesanalysis.eventlocked_spiketimes(spikeTimesStd, eventOnsetTimesStd, timeRange) frequenciesEachTrialStd = bdataStd['currentFreq'] arrayOfFrequenciesStd = np.unique(bdataStd['currentFreq']) trialsEachCondStd = behavioranalysis.find_trials_each_type( frequenciesEachTrialStd, arrayOfFrequenciesStd) # -- Oddball session -- if oneCell.get_session_inds('oddball') != []: try: ephysDataOdd, bdataOdd = oneCell.load('oddball') except ValueError as verror: continue spikeTimesOdd = ephysDataOdd['spikeTimes'] eventOnsetTimesOdd = ephysDataOdd['events']['stimOn']
def inactivation_base_stats(db, filename=''): laserTestStatistic = np.empty(len(db)) laserPVal = np.empty(len(db)) soundResponseTestStatistic = np.empty(len(db)) soundResponsePVal = np.empty(len(db)) onsetSoundResponseTestStatistic = np.empty(len(db)) onsetSoundResponsePVal = np.empty(len(db)) sustainedSoundResponseTestStatistic = np.empty(len(db)) sustainedSoundResponsePVal = np.empty(len(db)) gaussFit = [] tuningTimeRange = [] Rsquared = np.empty(len(db)) prefFreq = np.empty(len(db)) octavesFromPrefFreq = np.empty(len(db)) bestBandSession = np.empty(len(db)) for indRow, (dbIndex, dbRow) in enumerate(db.iterrows()): cellObj = ephyscore.Cell(dbRow) print "Now processing", dbRow['subject'], dbRow['date'], dbRow[ 'depth'], dbRow['tetrode'], dbRow['cluster'] # --- Determine laser responsiveness of each cell (using first 100 ms of noise-in-laser trials) --- try: laserEphysData, noBehav = cellObj.load('lasernoisebursts') except IndexError: print "No laser pulse session for this cell" testStatistic = np.nan pVal = np.nan changeFR = np.nan else: testStatistic, pVal, changeFR = funcs.laser_response( laserEphysData, baseRange=[-0.3, -0.2], responseRange=[0.0, 0.1]) laserTestStatistic[indRow] = testStatistic laserPVal[indRow] = pVal # --- Determine sound responsiveness during bandwidth sessions and calculate baseline firing rates with and without laser--- #done in a kind of stupid way because regular and control sessions are handled the same way if any(session in dbRow['sessionType'] for session in ['laserBandwidth', 'laserBandwidthControl']): if 'laserBandwidth' in dbRow['sessionType']: bandEphysData, bandBehavData = cellObj.load('laserBandwidth') behavSession = 'laserBandwidth' db.at[dbIndex, 'controlSession'] = 0 elif 'laserBandwidthControl' in dbRow['sessionType']: bandEphysData, bandBehavData = cellObj.load( 'laserBandwidthControl') behavSession = 'laserBandwidthControl' db.at[dbIndex, 'controlSession'] = 1 bandEventOnsetTimes = funcs.get_sound_onset_times( bandEphysData, 'bandwidth') bandSpikeTimestamps = bandEphysData['spikeTimes'] bandEachTrial = bandBehavData['currentBand'] secondSort = bandBehavData['laserTrial'] numBands = np.unique(bandEachTrial) numSec = np.unique(secondSort) trialsEachComb = behavioranalysis.find_trials_each_combination( bandEachTrial, numBands, secondSort, numSec) trialsEachBaseCond = trialsEachComb[:, :, 0] #using no laser trials to determine sound responsiveness testStatistic, pVal = funcs.sound_response_any_stimulus( bandEventOnsetTimes, bandSpikeTimestamps, trialsEachBaseCond, [0.0, 1.0], [-1.2, -0.2]) onsetTestStatistic, onsetpVal = funcs.sound_response_any_stimulus( bandEventOnsetTimes, bandSpikeTimestamps, trialsEachBaseCond, [0.0, 0.05], [-0.25, 0.2]) sustainedTestStatistic, sustainedpVal = funcs.sound_response_any_stimulus( bandEventOnsetTimes, bandSpikeTimestamps, trialsEachBaseCond, [0.2, 1.0], [-1.0, 0.2]) pVal *= len(numBands) #correction for multiple comparisons onsetpVal *= len(numBands) sustainedpVal *= len(numBands) #pdb.set_trace() #find baselines with and without laser baselineRange = [-0.05, 0.0] baselineRates, baselineSEMs = funcs.inactivated_cells_baselines( bandSpikeTimestamps, bandEventOnsetTimes, secondSort, baselineRange) db.at[dbIndex, 'baselineFRnoLaser'] = baselineRates[0] db.at[dbIndex, 'baselineFRLaser'] = baselineRates[1] db.at[dbIndex, 'baselineFRnoLaserSEM'] = baselineSEMs[0] db.at[dbIndex, 'baselineFRLaserSEM'] = baselineSEMs[1] db.at[dbIndex, 'baselineChangeFR'] = baselineRates[1] - baselineRates[0] else: print "No bandwidth session for this cell" testStatistic = np.nan pVal = np.nan onsetTestStatistic = np.nan onsetpVal = np.nan sustainedTestStatistic = np.nan sustainedpVal = np.nan #pdb.set_trace() soundResponseTestStatistic[indRow] = testStatistic soundResponsePVal[indRow] = pVal onsetSoundResponseTestStatistic[indRow] = onsetTestStatistic onsetSoundResponsePVal[indRow] = onsetpVal sustainedSoundResponseTestStatistic[indRow] = sustainedTestStatistic sustainedSoundResponsePVal[indRow] = sustainedpVal # --- Determine frequency tuning of cells --- try: tuningEphysData, tuningBehavData = cellObj.load('tuningCurve') except IndexError: print "No tuning session for this cell" freqFit = np.full(4, np.nan) thisRsquared = np.nan bestFreq = np.nan tuningWindow = np.full(2, np.nan) octavesFromBest = np.nan bandIndex = np.nan else: tuningEventOnsetTimes = funcs.get_sound_onset_times( tuningEphysData, 'tuningCurve') tuningSpikeTimestamps = tuningEphysData['spikeTimes'] freqEachTrial = tuningBehavData['currentFreq'] intensityEachTrial = tuningBehavData['currentIntensity'] numFreqs = np.unique(freqEachTrial) numIntensities = np.unique(intensityEachTrial) timeRange = [-0.2, 0.2] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( tuningSpikeTimestamps, tuningEventOnsetTimes, timeRange) trialsEachType = behavioranalysis.find_trials_each_type( intensityEachTrial, numIntensities) trialsHighInt = trialsEachType[:, -1] trialsEachComb = behavioranalysis.find_trials_each_combination( freqEachTrial, numFreqs, intensityEachTrial, numIntensities) trialsEachFreqHighInt = trialsEachComb[:, :, -1] tuningWindow = funcs.best_window_freq_tuning( spikeTimesFromEventOnset, indexLimitsEachTrial, trialsEachFreqHighInt) tuningWindow = np.array(tuningWindow) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, tuningWindow) tuningSpikeRates = (spikeCountMat[trialsHighInt].flatten()) / ( tuningWindow[1] - tuningWindow[0]) freqsThisIntensity = freqEachTrial[trialsHighInt] freqFit, thisRsquared = funcs.gaussian_tuning_fit( np.log2(freqsThisIntensity), tuningSpikeRates) if freqFit is not None: bestFreq = 2**freqFit[0] bandIndex, octavesFromBest = funcs.best_index( cellObj, bestFreq, behavSession) else: freqFit = np.full(4, np.nan) bestFreq = np.nan bandIndex = np.nan octavesFromBest = np.nan gaussFit.append(freqFit) tuningTimeRange.append(tuningWindow) Rsquared[indRow] = thisRsquared prefFreq[indRow] = bestFreq octavesFromPrefFreq[indRow] = octavesFromBest bestBandSession[indRow] = bandIndex db['laserPVal'] = laserPVal db['laserUStat'] = laserTestStatistic db['soundResponseUStat'] = soundResponseTestStatistic db['soundResponsePVal'] = soundResponsePVal db['onsetSoundResponseUStat'] = onsetSoundResponseTestStatistic db['onsetSoundResponsePVal'] = onsetSoundResponsePVal db['sustainedSoundResponseUStat'] = sustainedSoundResponseTestStatistic db['sustainedSoundResponsePVal'] = sustainedSoundResponsePVal db['gaussFit'] = gaussFit db['tuningTimeRange'] = tuningTimeRange db['tuningFitR2'] = Rsquared db['prefFreq'] = prefFreq db['octavesFromPrefFreq'] = octavesFromPrefFreq db['bestBandSession'] = bestBandSession if len(filename) != 0: celldatabase.save_hdf(db, filename) print filename + " saved" return db
baseRange = [-0.1, -0.05] # responseRange = [0, 0.05, 0.1] responseRange = [cellLatency, cellLatency + 0.05, 0.1 + cellLatency] # if dbRow['brainArea']=='rightAC': # # responseRange = [0.02, 0.07, 0.12] # responseRange = [0.02, 0.07, 0.1] # elif dbRow['brainArea']=='rightThal': # # responseRange = [0.005, 0.015, 0.105] # responseRange = [0.005, 0.015, 0.1] alignmentRange = [baseRange[0], responseRange[-1]] # Align spikes just to the selected event onset times (spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes( spikeTimes, eventsThisFreqHighIntensity, alignmentRange) nspkBase = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) nspkResp = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) avgResponse = nspkResp.mean(axis=0) onsetSpikes = avgResponse[0] sustainedSpikes = avgResponse[1] onsetRate = onsetSpikes / (responseRange[1] - responseRange[0]) sustainedRate = sustainedSpikes / (responseRange[2] - responseRange[1]) baseSpikes = nspkBase.mean() baseRate = baseSpikes / (baseRange[1] - baseRange[0])
def raster_tuning(ax): fullbehaviorDir = behaviorDir + subject + '/' behavName = subject + '_tuning_curve_' + tuningBehavior + '.h5' tuningBehavFileName = os.path.join(fullbehaviorDir, behavName) tuning_bdata = loadbehavior.BehaviorData(tuningBehavFileName, readmode='full') freqEachTrial = tuning_bdata['currentFreq'] possibleFreq = np.unique(freqEachTrial) numberOfTrials = len(freqEachTrial) # -- The old way of sorting (useful for plotting sorted raster) -- sortedTrials = [] numTrialsEachFreq = [ ] #Used to plot lines after each group of sorted trials for indf, oneFreq in enumerate( possibleFreq ): #indf is index of this freq and oneFreq is the frequency indsThisFreq = np.flatnonzero( freqEachTrial == oneFreq) #this gives indices of this frequency sortedTrials = np.concatenate( (sortedTrials, indsThisFreq)) #adds all indices to a list called sortedTrials numTrialsEachFreq.append( len(indsThisFreq)) #finds number of trials each frequency has sortingInds = argsort( sortedTrials) #gives array of indices that would sort the sortedTrials # -- Load event data and convert event timestamps to ms -- tuning_ephysDir = os.path.join(settings.EPHYS_PATH, subject, tuningEphys) tuning_eventFilename = os.path.join(tuning_ephysDir, 'all_channels.events') tuning_ev = loadopenephys.Events( tuning_eventFilename) #load ephys data (like bdata structure) tuning_eventTimes = np.array( tuning_ev.timestamps ) / SAMPLING_RATE #get array of timestamps for each event and convert to seconds by dividing by sampling rate (Hz). matches with eventID and tuning_evID = np.array( tuning_ev.eventID ) #loads the onset times of events (matches up with eventID to say if event 1 went on (1) or off (0) tuning_eventOnsetTimes = tuning_eventTimes[ tuning_evID == 1] #array that is a time stamp for when the chosen event happens. #ev.eventChannel woul load array of events like trial start and sound start and finish times (sound event is 0 and trial start is 1 for example). There is only one event though and its sound start while (numberOfTrials < len(tuning_eventOnsetTimes)): tuning_eventOnsetTimes = tuning_eventOnsetTimes[:-1] ####################################################################################################### ###################THIS IS SUCH A HACK TO GET SPKDATA FROM EPHYSCORE################################### ####################################################################################################### thisCell = celldatabase.CellInfo( animalName=subject, ############################################ ephysSession=tuningEphys, tuningSession='DO NOT NEED THIS', tetrode=tetrode, cluster=cluster, quality=1, depth=0, tuningBehavior='DO NOT NEED THIS', behavSession=tuningBehavior) tuning_spkData = ephyscore.CellData(thisCell) tuning_spkTimeStamps = tuning_spkData.spikes.timestamps (tuning_spikeTimesFromEventOnset, tuning_trialIndexForEachSpike, tuning_indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes( tuning_spkTimeStamps, tuning_eventOnsetTimes, tuning_timeRange) #print 'numTrials ',max(tuning_trialIndexForEachSpike)##################################### ''' Create a vector with the spike timestamps w.r.t. events onset. (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = eventlocked_spiketimes(timeStamps,eventOnsetTimes,timeRange) timeStamps: (np.array) the time of each spike. eventOnsetTimes: (np.array) the time of each instance of the event to lock to. timeRange: (list or np.array) two-element array specifying time-range to extract around event. spikeTimesFromEventOnset: 1D array with time of spikes locked to event. o trialIndexForEachSpike: 1D array with the trial corresponding to each spike. The first spike index is 0. indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that the range is from firstSpike to lastSpike+1 (like in python slices) spikeIndices ''' tuning_sortedIndexForEachSpike = sortingInds[ tuning_trialIndexForEachSpike] #Takes values of trialIndexForEachSpike and finds value of sortingInds at that index and makes array. This array gives an array with the sorted index of each trial for each spike # -- Calculate tuning -- #nSpikes = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,indexLimitsEachTrial,responseRange) #array of the number of spikes in range for each trial '''Count number of spikes on each trial in a given time range. spikeTimesFromEventOnset: vector of spikes timestamps with respect to the onset of the event. indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial. timeRange: time range to evaluate. Spike times exactly at the limits are not counted. returns nSpikes ''' ''' meanSpikesEachFrequency = np.empty(len(possibleFreq)) #make empty array of same size as possibleFreq # -- This part will be replace by something like behavioranalysis.find_trials_each_type -- trialsEachFreq = [] for indf,oneFreq in enumerate(possibleFreq): trialsEachFreq.append(np.flatnonzero(freqEachTrial==oneFreq)) #finds indices of each frequency. Appends them to get an array of indices of trials sorted by freq # -- Calculate average firing for each freq -- for indf,oneFreq in enumerate(possibleFreq): meanSpikesEachFrequency[indf] = np.mean(nSpikes[trialsEachFreq[indf]]) ''' #clf() #if (len(tuning_spkTimeStamps)>0): #ax1 = plt.subplot2grid((4,4), (3, 0), colspan=1) #spikesorting.plot_isi_loghist(spkData.spikes.timestamps) #ax3 = plt.subplot2grid((4,4), (3, 3), colspan=1) #spikesorting.plot_events_in_time(tuning_spkTimeStamps) #samples = tuning_spkData.spikes.samples.astype(float)-2**15 #samples = (1000.0/tuning_spkData.spikes.gain[0,0]) *samples #ax2 = plt.subplot2grid((4,4), (3, 1), colspan=2) #spikesorting.plot_waveforms(samples) #ax4 = plt.subplot2grid((4,4), (0, 0), colspan=3,rowspan = 3) plot(tuning_spikeTimesFromEventOnset, tuning_sortedIndexForEachSpike, '.', ms=3) #axvline(x=0, ymin=0, ymax=1, color='r') #The cumulative sum of the list of specific frequency presentations, #used below for plotting the lines across the figure. numTrials = cumsum(numTrialsEachFreq) #Plot the lines across the figure in between each group of sorted trials for indf, num in enumerate(numTrials): ax.axhline(y=num, xmin=0, xmax=1, color='0.90', zorder=0) tickPositions = numTrials - mean(numTrialsEachFreq) / 2 tickLabels = [ "%0.2f" % (possibleFreq[indf] / 1000) for indf in range(len(possibleFreq)) ] ax.set_yticks(tickPositions) ax.set_yticklabels(tickLabels) ax.set_ylim([-1, numberOfTrials]) ylabel('Frequency Presented (kHz), {} total trials'.format(numTrials[-1])) #title(ephysSession+' T{}c{}'.format(tetrodeID,clusterID)) xlabel('Time (sec)') ''' ax5 = plt.subplot2grid((4,4), (0, 3), colspan=1,rowspan=3) ax5.set_xscale('log') plot(possibleFreq,meanSpikesEachFrequency,'o-') ylabel('Avg spikes in window {0}-{1} sec'.format(*responseRange)) xlabel('Frequency') ''' #show() '''
#Find the event onset times. This method converts the times to seconds unless you tell it not to. #This method also excludes event onset times that are seperated by less than 0.5sec, and this is #hard coded for now. eventOnsetTimes = ex0624.get_event_onset_times(eventDataNB) #Plot a raster plot for the noise burst session spikeTimestamps = spikeDataNB.timestamps #The spikeData object will have clusters if clustering has been performed, so we can limit to a single cluster here. #Lets look at the sound and laser responsive cluster TT6c3 spikeTimestamps = spikeTimestamps[spikeDataNB.clusters == 3] timeRange = [-0.5, 1] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) plt.figure() plt.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, 'k.', ms=1) plt.show() #Will plot the waveforms of spikes in a certain time range after an event from jaratoolbox import spikesorting indexTR = [0, 0.1] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial, spikeIndex = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, indexTR, spikeindex=True) waves = spikeDataNB.samples[spikeIndex] figure()
figdb[0].get_session_types() clusterfuncs.plot_cluster_tuning(figdb[1], 3) spikeData, eventData, behavData = loader.get_cluster_data(figdb[1], 3) currentFreq = behavData['currentFreq'] possibleFreq = np.unique(currentFreq) currentIntensity = behavData['currentIntensity'] possibleIntensity = np.unique(currentIntensity) spikeTimestamps = spikeData.timestamps eventOnsetTimes = loader.get_event_onset_times(eventData) timeRange = [-0.5, 1] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) def num_spikes_in_timerange_select_trials(spikeTimestamps, eventOnsetTimes, timeRange, selectInds): spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) numSpikesEachTrial = squeeze(diff(indexLimitsEachTrial, axis=0)) numSpikesSelectTrials = numSpikesEachTrial[selectInds] return numSpikesSelectTrials ##################################################### ### Code to plot the auroc versus baseline for all times, all freqs from sklearn.metrics import roc_curve, auc
def plot_bandwidth_report(cell, bandIndex): cellInfo = get_cell_info(cell) #pdb.set_trace() loader = dataloader.DataLoader(cell['subject']) if len(cellInfo['laserIndex'])>0: laser = True gs = gridspec.GridSpec(13, 6) else: laser = False gs = gridspec.GridSpec(9, 6) offset = 4*laser gs.update(left=0.15, right=0.85, top = 0.96, wspace=0.7, hspace=1.0) # -- plot bandwidth rasters -- plt.clf() eventData = loader.get_session_events(cellInfo['ephysDirs'][bandIndex]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][bandIndex], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.2, 1.5] bandBData = loader.get_session_behavior(cellInfo['behavDirs'][bandIndex]) bandEachTrial = bandBData['currentBand'] ampEachTrial = bandBData['currentAmp'] charfreq = str(np.unique(bandBData['charFreq'])[0]/1000) modrate = str(np.unique(bandBData['modRate'])[0]) numBands = np.unique(bandEachTrial) numAmps = np.unique(ampEachTrial) firstSortLabels = ['{}'.format(band) for band in np.unique(bandEachTrial)] secondSortLabels = ['Amplitude: {}'.format(amp) for amp in np.unique(ampEachTrial)] spikeTimesFromEventOnset, indexLimitsEachTrial, trialsEachCond, firstSortLabels = bandwidth_raster_inputs(eventOnsetTimes, spikeTimestamps, bandEachTrial, ampEachTrial) colours = [np.tile(['#4e9a06','#8ae234'],len(numBands)/2+1), np.tile(['#5c3566','#ad7fa8'],len(numBands)/2+1)] for ind, secondArrayVal in enumerate(numAmps): plt.subplot(gs[5+2*ind+offset:7+2*ind+offset, 0:3]) trialsThisSecondVal = trialsEachCond[:, :, ind] pRaster, hcond, zline = extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange, trialsEachCond=trialsThisSecondVal, labels=firstSortLabels, colorEachCond = colours[ind]) plt.setp(pRaster, ms=4) plt.title(secondSortLabels[ind]) plt.ylabel('bandwidth (octaves)') if ind == len(np.unique(ampEachTrial)) - 1: plt.xlabel("Time from sound onset (sec)") # -- plot Yashar plots for bandwidth data -- plt.subplot(gs[5+offset:, 3:]) spikeArray, errorArray, baseSpikeRate = band_select(spikeTimestamps, eventOnsetTimes, ampEachTrial, bandEachTrial, timeRange = [0.0, 1.0]) band_select_plot(spikeArray, errorArray, baseSpikeRate, numBands, legend=True) # -- plot frequency tuning heat map -- tuningBData = loader.get_session_behavior(cellInfo['behavDirs'][cellInfo['tuningIndex'][-1]]) freqEachTrial = tuningBData['currentFreq'] intEachTrial = tuningBData['currentIntensity'] eventData = loader.get_session_events(cellInfo['ephysDirs'][cellInfo['tuningIndex'][-1]]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][cellInfo['tuningIndex'][-1]], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps plt.subplot(gs[2+offset:4+offset, 0:3]) dataplotter.two_axis_heatmap(spikeTimestamps=spikeTimestamps, eventOnsetTimes=eventOnsetTimes, firstSortArray=intEachTrial, secondSortArray=freqEachTrial, firstSortLabels=["%.0f" % inten for inten in np.unique(intEachTrial)], secondSortLabels=["%.1f" % freq for freq in np.unique(freqEachTrial)/1000.0], xlabel='Frequency (kHz)', ylabel='Intensity (dB SPL)', plotTitle='Frequency Tuning Curve', flipFirstAxis=False, flipSecondAxis=False, timeRange=[0, 0.1]) plt.ylabel('Intensity (dB SPL)') plt.xlabel('Frequency (kHz)') plt.title('Frequency Tuning Curve') # -- plot frequency tuning raster -- plt.subplot(gs[0+offset:2+offset, 0:3]) freqLabels = ["%.1f" % freq for freq in np.unique(freqEachTrial)/1000.0] dataplotter.plot_raster(spikeTimestamps, eventOnsetTimes, sortArray=freqEachTrial, timeRange=[-0.1, 0.5], labels=freqLabels) plt.xlabel('Time from sound onset (sec)') plt.ylabel('Frequency (kHz)') plt.title('Frequency Tuning Raster') # -- plot AM PSTH -- amBData = loader.get_session_behavior(cellInfo['behavDirs'][cellInfo['amIndex'][-1]]) rateEachTrial = amBData['currentFreq'] eventData = loader.get_session_events(cellInfo['ephysDirs'][cellInfo['amIndex'][-1]]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][cellInfo['amIndex'][-1]], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.2, 1.5] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimestamps, eventOnsetTimes, timeRange) colourList = ['b', 'g', 'y', 'orange', 'r'] numRates = np.unique(rateEachTrial) trialsEachCond = behavioranalysis.find_trials_each_type(rateEachTrial, numRates) plt.subplot(gs[2+offset:4+offset, 3:]) dataplotter.plot_psth(spikeTimestamps, eventOnsetTimes, rateEachTrial, timeRange = [-0.2, 0.8], binsize = 25, colorEachCond = colourList) plt.xlabel('Time from sound onset (sec)') plt.ylabel('Firing rate (Hz)') plt.title('AM PSTH') # -- plot AM raster -- plt.subplot(gs[0+offset:2+offset, 3:]) rateLabels = ["%.0f" % rate for rate in np.unique(rateEachTrial)] dataplotter.plot_raster(spikeTimestamps, eventOnsetTimes, sortArray=rateEachTrial, timeRange=[-0.2, 0.8], labels=rateLabels, colorEachCond=colourList) plt.xlabel('Time from sound onset (sec)') plt.ylabel('Modulation Rate (Hz)') plt.title('AM Raster') # -- plot laser pulse and laser train data (if available) -- if laser: # -- plot laser pulse raster -- plt.subplot(gs[0:2, 0:3]) eventData = loader.get_session_events(cellInfo['ephysDirs'][cellInfo['laserIndex'][-1]]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][cellInfo['laserIndex'][-1]], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.1, 0.4] dataplotter.plot_raster(spikeTimestamps, eventOnsetTimes, timeRange=timeRange) plt.xlabel('Time from sound onset (sec)') plt.title('Laser Pulse Raster') # -- plot laser pulse psth -- plt.subplot(gs[2:4, 0:3]) dataplotter.plot_psth(spikeTimestamps, eventOnsetTimes, timeRange = timeRange, binsize = 10) plt.xlabel('Time from sound onset (sec)') plt.ylabel('Firing Rate (Hz)') plt.title('Laser Pulse PSTH') # -- didn't record laser trains for some earlier sessions -- if len(cellInfo['laserTrainIndex']) > 0: # -- plot laser train raster -- plt.subplot(gs[0:2, 3:]) eventData = loader.get_session_events(cellInfo['ephysDirs'][cellInfo['laserTrainIndex'][-1]]) spikeData = loader.get_session_spikes(cellInfo['ephysDirs'][cellInfo['laserTrainIndex'][-1]], cellInfo['tetrode'], cluster=cellInfo['cluster']) eventOnsetTimes = loader.get_event_onset_times(eventData) spikeTimestamps = spikeData.timestamps timeRange = [-0.2, 1.0] dataplotter.plot_raster(spikeTimestamps, eventOnsetTimes, timeRange=timeRange) plt.xlabel('Time from sound onset (sec)') plt.title('Laser Train Raster') # -- plot laser train psth -- plt.subplot(gs[2:4, 3:]) dataplotter.plot_psth(spikeTimestamps, eventOnsetTimes, timeRange = timeRange, binsize = 10) plt.xlabel('Time from sound onset (sec)') plt.ylabel('Firing Rate (Hz)') plt.title('Laser Train PSTH') # -- show cluster analysis -- tsThisCluster, wavesThisCluster = load_cluster_waveforms(cellInfo) # -- Plot ISI histogram -- plt.subplot(gs[4+offset, 0:2]) spikesorting.plot_isi_loghist(tsThisCluster) plt.ylabel('c%d'%cellInfo['cluster'],rotation=0,va='center',ha='center') plt.xlabel('') # -- Plot waveforms -- plt.subplot(gs[4+offset, 2:4]) spikesorting.plot_waveforms(wavesThisCluster) # -- Plot events in time -- plt.subplot(gs[4+offset, 4:6]) spikesorting.plot_events_in_time(tsThisCluster) plt.suptitle('{0}, {1}, {2}um, Tetrode {3}, Cluster {4}, {5}kHz, {6}Hz modulation'.format(cellInfo['subject'], cellInfo['date'], cellInfo['depth'], cellInfo['tetrode'], cellInfo['cluster'], charfreq, modrate)) fig_path = '/home/jarauser/Pictures/cell reports' fig_name = '{0}_{1}_{2}um_TT{3}Cluster{4}.png'.format(cellInfo['subject'], cellInfo['date'], cellInfo['depth'], cellInfo['tetrode'], cellInfo['cluster']) full_fig_path = os.path.join(fig_path, fig_name) fig = plt.gcf() fig.set_size_inches(20, 25) fig.savefig(full_fig_path, format = 'png', bbox_inches='tight')
oneEvent = eventChannel == eventID #This picks out which channel you care about if there is more that one event eventOnset = multipleEventOnset * oneEvent #This keeps the correct size of the array to match eventTimes and picks out the onset of the channel you want while (numberOfTrials < np.sum(eventOnset)): eventOnset = eventOnset[:-1] eventOnsetTimes = eventTimes[ eventOnset == 1] #This gives only the times of the onset of the channel you want eventOnsetTimesTrials1 = eventOnsetTimes[trialsToUse1 == 1] eventOnsetTimesTrials2 = eventOnsetTimes[trialsToUse2 == 1] # -- Convert spike data into np.array's -- (spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes( spkTimeStamps, eventOnsetTimes, timeRange) (spikeTimesFromEventOnsetTrials1, trialIndexForEachSpikeTrials1, indexLimitsEachTrialTrials1) = spikesanalysis.eventlocked_spiketimes( spkTimeStamps, eventOnsetTimesTrials1, timeRange) (spikeTimesFromEventOnsetTrials2, trialIndexForEachSpikeTrials2, indexLimitsEachTrialTrials2) = spikesanalysis.eventlocked_spiketimes( spkTimeStamps, eventOnsetTimesTrials2, timeRange) ''' spikesanalysis.eventlocked_spiketimes Create a vector with the spike timestamps w.r.t. events onset. (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = eventlocked_spiketimes(timeStamps,eventOnsetTimes,timeRange) timeStamps: (np.array) the time of each spike. eventOnsetTimes: (np.array) the time of each instance of the event to lock to. timeRange: (list or np.array) two-element array specifying time-range to extract around event.
freqFit = np.zeros(4) thisRsquared = np.nan bestFreq = np.nan tuningWindow = [0, 0] octavesFromBest = np.nan bandIndex = np.nan else: tuningEventOnsetTimes = get_sound_onset_times( tuningEphysData, 'tuningCurve') tuningSpikeTimestamps = tuningEphysData['spikeTimes'] freqEachTrial = tuningBehavData['currentFreq'] intensityEachTrial = tuningBehavData['currentIntensity'] numFreqs = np.unique(freqEachTrial) numIntensities = np.unique(intensityEachTrial) timeRange = [-0.2, 0.2] spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( tuningSpikeTimestamps, tuningEventOnsetTimes, timeRange) trialsEachType = behavioranalysis.find_trials_each_type( intensityEachTrial, numIntensities) trialsHighInt = trialsEachType[:, -1] trialsEachComb = behavioranalysis.find_trials_each_combination( freqEachTrial, numFreqs, intensityEachTrial, numIntensities) trialsEachFreqHighInt = trialsEachComb[:, :, -1] tuningWindow = best_window_freq_tuning( spikeTimesFromEventOnset, indexLimitsEachTrial, trialsEachFreqHighInt) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts( spikeTimesFromEventOnset, indexLimitsEachTrial, tuningWindow) tuningSpikeRates = (spikeCountMat[trialsHighInt].flatten()) / ( tuningWindow[1] - tuningWindow[0])
baseRange = [-0.1, 0] responseRange = [0, 0.1] alignmentRange = [baseRange[0], responseRange[1]] meanSpikesAllInten = np.empty(len(possibleIntensity)) maxSpikesAllInten = np.empty(len(possibleIntensity)) baseSpikesAllInten = np.empty(len(possibleIntensity)) for indInten, inten in enumerate(possibleIntensity): # print inten trialsThisIntensity = intenThisFreq==inten eventsThisCombo = eventsThisFreq[trialsThisIntensity] (spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimes, eventsThisCombo, alignmentRange) nspkBase = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) nspkResp = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) spikesThisInten = nspkResp[:,0] baselineThisInten = nspkBase[:,0] # print spikesThisInten try: meanSpikesThisInten = np.mean(spikesThisInten) meanBaselineSpikesThisInten = np.mean(baselineThisInten) maxSpikesThisInten = np.max(spikesThisInten)
#Calculate noiseburst response #TODO: Response to things other than noise as well?? noiseZscore = np.empty(len(db)) noisePval = np.empty(len(db)) baseRange = [-0.2,0] responseRange = [0, 0.2] for indCell, cell in db.iterrows(): spikeData, eventData = dataloader.get_session_ephys(cell, 'noiseburst') if spikeData.timestamps is not None: eventOnsetTimes = eventData.get_event_onset_times() alignmentRange = [baseRange[0], responseRange[1]] (spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeData.timestamps, eventOnsetTimes, alignmentRange) nspkBase = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseRange) nspkResp = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, responseRange) [zScore, pVal] = stats.ranksums(nspkResp,nspkBase) else: zScore=0 pVal=0 noiseZscore[indCell] = zScore noisePval[indCell] = pVal db['noiseZscore'] = noiseZscore db['noisePval'] = noisePval
# -- Load Spike Data From Certain Cluster -- spkData = ephyscore.CellData(oneCell) spkTimeStamps = spkData.spikes.timestamps clusterNumber = (oneCell.tetrode-1)*clusNum+(oneCell.cluster-1) for Freq in possibleFreq: oneFreq = targetFreqs == Freq trialsToUseRight = rightward & oneFreq trialsToUseLeft = leftward & oneFreq #print 'behavior ',behavSession,' tetrode ',oneCell.tetrode,' cluster ',oneCell.cluster,'freq',Freq (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,indexLimitsEachTrial,countTimeRange) spikeCountEachTrial = spikeCountMat.flatten() spikeAvgRight = sum(spikeCountEachTrial[trialsToUseRight])/float(sum(trialsToUseRight)) spikeAvgLeft = sum(spikeCountEachTrial[trialsToUseLeft])/float(sum(trialsToUseLeft)) if ((spikeAvgRight + spikeAvgLeft) == 0): modIDict[behavSession][Freq][clusterNumber]=0.0 else: modIDict[behavSession][Freq][clusterNumber]=((spikeAvgRight - spikeAvgLeft)/(spikeAvgRight + spikeAvgLeft)) #print spikeAvgRight,' ', spikeAvgLeft, ' ',modIDict[behavSession][Freq][clusterNumber] except:
def plot_rew_change_byblock_per_cell(oneCell,trialLimit=[],alignment='sound',choiceSide='both'): ''' Plots ephys data during behavior (reward_change_freq_discrim paradigm), data split according to the block in behavior and the choice (left or right). only plotting correct trials. oneCell is an CellInfo object as in celldatabase. 'trialLimit' (e.g. [0, 600]) is the indecies of trials wish to be plotted. 'choiceSide' is a string, either 'left' or 'right', to plot leftward and rightward trials, respectively. If not provided, will plot both sides. 'alignment' selects which event to align spike data to, should be 'sound', 'center-out', or 'side-in'. ''' SAMPLING_RATE=30000.0 soundTriggerChannel = 0 # channel 0 is the sound presentation, 1 is the trial binWidth = 0.010 # Size of each bin in histogram in seconds #timeRange = [-0.2,0.8] # In seconds. Time range for rastor plot to plot spikes (around some event onset as 0) #timeRange = [-0.25,1.0] timeRange = [-0.4,1.2] bdata = load_behav_per_cell(oneCell) (spikeTimestamps,waveforms,eventOnsetTimes,eventData)=load_ephys_per_cell(oneCell) # -- Check to see if ephys has skipped trials, if so remove trials from behav data soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) soundOnsetTimeEphys = eventOnsetTimes[soundOnsetEvents] soundOnsetTimeBehav = bdata['timeTarget'] # Find missing trials missingTrials = behavioranalysis.find_missing_trials(soundOnsetTimeEphys,soundOnsetTimeBehav) # Remove missing trials bdata.remove_trials(missingTrials) currentBlock = bdata['currentBlock'] if(not len(trialLimit)): validTrials = np.ones(len(currentBlock),dtype=bool) else: validTrials = np.zeros(len(currentBlock),dtype=bool) validTrials[trialLimit[0]:trialLimit[1]] = 1 bdata.find_trials_each_block() trialsEachBlock = bdata.blocks['trialsEachBlock'] #print trialsEachBlock nBlocks = bdata.blocks['nBlocks'] #blockLabels = ['more_left', 'more_right'] rightward = bdata['choice']==bdata.labels['choice']['right'] leftward = bdata['choice']==bdata.labels['choice']['left'] invalid = bdata['outcome']==bdata.labels['outcome']['invalid'] correct = bdata['outcome']==bdata.labels['outcome']['correct'] incorrect = bdata['outcome']==bdata.labels['outcome']['error'] ######Split left and right trials into correct and incorrect categories to look at error trials######### rightcorrect = rightward&correct&validTrials leftcorrect = leftward&correct&validTrials #righterror = rightward&incorrect&validTrials #lefterror = leftward&incorrect&validTrials colorEachCond=[] ####construct trialsEachCond and colorEachCond for ploting#### for block in range(nBlocks): rightcorrectThisBlock = rightcorrect&trialsEachBlock[:,block] leftcorrectThisBlock = leftcorrect&trialsEachBlock[:,block] #trialTypeVec = leftcorrect*1+rightcorrect*2 #trialTypePossibleValues = [1,2] #1 stands for left correct, 2 stands for right correct firstIndexThisBlock=np.nonzero(trialsEachBlock[:,block])[0][0] if currentBlock[firstIndexThisBlock]==bdata.labels['currentBlock']['more_left']: if choiceSide=='right': colorThisCond='r' elif choiceSide=='left': colorThisCond='g' elif choiceSide=='both': colorThisCond=['g','r'] if currentBlock[firstIndexThisBlock]==bdata.labels['currentBlock']['more_right']: if choiceSide=='right': colorThisCond='b' elif choiceSide=='left': colorThisCond='m' elif choiceSide=='both': colorThisCond=['m','b'] if currentBlock[firstIndexThisBlock]==bdata.labels['currentBlock']['same_reward']: if choiceSide=='right': colorThisCond='darkgray' elif choiceSide=='left': colorThisCond='y' elif choiceSide=='both': colorThisCond=['y','darkgray'] #trialsEachTypeEachBlock = behavioranalysis.find_trials_each_type_each_block(trialTypeVec, trialTypePossibleValues,currentBlock,blockTypes) if block==0: #trialsEachCond=np.c_[leftcorrectThisBlock,rightcorrectThisBlock] if choiceSide=='right': trialsEachCond=np.c_[rightcorrectThisBlock] elif choiceSide=='left': trialsEachCond=np.c_[leftcorrectThisBlock] elif choiceSide=='both': trialsEachCond=np.c_[leftcorrectThisBlock,rightcorrectThisBlock] else: if choiceSide=='right': trialsEachCond=np.c_[trialsEachCond,rightcorrectThisBlock] elif choiceSide=='left': trialsEachCond=np.c_[trialsEachCond,leftcorrectThisBlock] elif choiceSide=='both': trialsEachCond=np.c_[trialsEachCond,leftcorrectThisBlock,rightcorrectThisBlock] colorEachCond.append(colorThisCond) if alignment == 'sound': soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] elif alignment == 'center-out': soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] diffTimes=bdata['timeCenterOut']-bdata['timeTarget'] EventOnsetTimes+=diffTimes elif alignment == 'side-in': soundOnsetEvents = (eventData.eventID==1) & (eventData.eventChannel==soundTriggerChannel) EventOnsetTimes = eventOnsetTimes[soundOnsetEvents] diffTimes=bdata['timeSideIn']-bdata['timeTarget'] EventOnsetTimes+=diffTimes freqEachTrial = bdata['targetFrequency'] possibleFreq = np.unique(freqEachTrial) (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \ spikesanalysis.eventlocked_spiketimes(spikeTimestamps,EventOnsetTimes,timeRange) plt.figure() ###########Plot raster and PSTH################# ax1 = plt.subplot2grid((3,1), (0, 0), rowspan=2) pRaster,hcond,zline =extraplots.raster_plot(spikeTimesFromEventOnset,indexLimitsEachTrial,timeRange,trialsEachCond=trialsEachCond, colorEachCond=colorEachCond,fillWidth=None,labels=None) #plt.setp(pRaster, ms=0.8) plt.ylabel('Trials') plt.xlim(timeRange) fig_title='{0}_{1}_TT{2}_c{3}_{4}_{5}'.format(oneCell.animalName,oneCell.behavSession,oneCell.tetrode,oneCell.cluster,alignment,choiceSide) plt.title(fig_title) timeVec = np.arange(timeRange[0],timeRange[-1],binWidth) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset,indexLimitsEachTrial,timeVec) smoothWinSize = 3 ax2 = plt.subplot2grid((3,1), (2, 0), sharex=ax1) extraplots.plot_psth(spikeCountMat/binWidth,smoothWinSize,timeVec,trialsEachCond=trialsEachCond, colorEachCond=colorEachCond,linestyle=None,linewidth=1.5,downsamplefactor=1) plt.xlabel('Time from sound onset (s)') plt.ylabel('Firing rate (spk/sec)') #plt.show() #fig_path= #full_fig_path = os.path.join(fig_path, fig_title) #print full_fig_path #plt.tight_layout() plt.gcf().set_size_inches((8.5,11))