def plot(self, nbins=None, rate=False, figsize=(7.5, 6.5)): """style can be 'rate', but defaults to count""" if nbins == None: nbins = intround(np.sqrt(len(self.dts))) # good heuristic dts = self.dts / 1000 # in ms, converts to float64 array trange = self.trange / 1000 # in ms, converts to float64 array nbins = max(20, nbins) # enforce min nbins nbins = min(200, nbins) # enforce max nbins t = np.linspace(start=trange[0], stop=trange[1], num=nbins, endpoint=True) n = np.histogram(dts, bins=t, density=False)[0] binwidth = t[1] - t[0] # all should be equal width if rate: # normalize by binwidth and convert to float: n = n / float(binwidth) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=n, width=binwidth) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('ISI (ms)') if rate: a.set_ylabel('spike rate (Hz)') else: a.set_ylabel('count') #a.set_title('n%d spikes relative to n%d spikes' % (self.n1.id, self.n0.id)) title = lastcmd() + ', binwidth: %.2f ms' % binwidth a.set_title(title) gcfm().window.setWindowTitle(title) f.tight_layout(pad=0.3) # crop figure to contents self.f = f return self
def pospdf(self, neurons=None, dim='y', nbins=10, a=None, stats=False, figsize=(7.5, 6.5)): """Plot PDF of cell positions ('x' or 'y') along the polytrode to get an idea of how cells are distributed in space""" if neurons == 'all': neurons = self.alln.values() elif neurons == 'quiet': neurons = self.qn.values() else: neurons = self.n.values() dimi = {'x':0, 'y':1}[dim] p = [ n.pos[dimi] for n in neurons ] # all position values nbins = max(nbins, 2*intround(np.sqrt(self.nneurons))) n, p = np.histogram(p, bins=nbins) # p includes rightmost bin edge binwidth = p[1] - p[0] # take width of first bin in p if stats: mean = np.mean(p) median = np.median(p) argmode = n.argmax() mode = p[argmode] + binwidth / 2 # middle of tallest bin stdev = np.std(p) if a == None: f = pl.figure(figsize=figsize) a = f.add_subplot(111) else: # add to existing axes a.hold(True) f = pl.gcf() # use CLUSTERCOLOURDICT for familiarity with len 10 1-based id to colour mapping #color = CLUSTERCOLOURDICT[int(self.id)] color = 'k' # exclude rightmost bin edge in p a.bar(left=p[:-1], height=n, width=binwidth, bottom=0, color=color, ec=color, yerr=None, xerr=None, capsize=3) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) a.set_xlabel('neuron %s position (um)' % dim) a.set_ylabel('neuron count') if stats: # add stuff to top right of plot: uns = get_ipython().user_ns a.text(0.99, 0.99, 'mean = %.3f\n' 'median = %.3f\n' 'mode = %.3f\n' 'stdev = %.3f\n' 'minrate = %.2f Hz\n' 'nneurons = %d\n' 'dt = %d min' % (mean, median, mode, stdev, uns['MINRATE'], self.nneurons, intround(self.dtmin)), transform = a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents f.canvas.draw() # this is needed if a != None when passed as arg return a
def mua_si_lfp_si(source, layers=False, ms=1, figsize=(7.5, 6.5)): """Pool recording.mua_si_lfp_si() results across recordings specified by source, plot the result""" uns = get_ipython().user_ns recs, tracks = parse_source(source) lfpsis, muasis = [], [] for rec in recs: print(rec.absname) lfpsi, muasi, t = rec.mua_si_lfp_si(ms=ms, layers=layers, plot=False, plotseries=False, figsize=figsize) lfpsis.append(lfpsi) muasis.append(muasi) lfpsi = np.hstack(lfpsis) muasi = np.hstack(muasis) # plot: f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.plot([-1, 1], [-1, 1], 'e--') # underplot y=x line a.plot(lfpsi, muasi[0], 'e.', ms=ms) if layers: a.plot(lfpsi, muasi[1], 'r.', ms=ms) a.plot(lfpsi, muasi[2], 'g.', ms=ms) a.plot(lfpsi, muasi[3], 'b.', ms=ms) a.set_xlabel('LFP SI (%s)' % uns['LFPSIKIND']) a.set_ylabel('MUA SI (%s)' % uns['MUASIKIND']) a.set_xlim(-1, 1) a.set_ylim(-1, 1) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) f.tight_layout(pad=0.3) # crop figure to contents
def plot_psth(psthparams, nid, fmt='k-', alpha=0.8, ms=6, mew=2, ymax=None, yticks=None, figsize=(24, 7)): """Plot nid's corresponding PSTH in psthparams""" t, psth, thresh, baseline, peakis, lis, ris = psthparams[nid] pl.figure(figsize=figsize) pl.plot(t, psth, fmt) # plot thresh and baseline levels: pl.axhline(y=thresh, c='r', ls='--') pl.axhline(y=baseline, c='e', ls='--') # mark peaks and their edges: if len(lis) > 0: pl.plot(t[lis], psth[lis], 'g+', alpha=alpha, ms=ms * 1.5, mew=mew) # left edges if len(ris) > 0: pl.plot(t[ris - 1], psth[ris - 1], 'mx', alpha=alpha, ms=ms, mew=mew) # right edges if len(peakis) > 0: pl.plot(t[peakis], psth[peakis], 'ko', alpha=alpha, ms=ms, mec='none') # peaks pl.xlim(xmax=t[-1]) pl.ylim(ymin=0, ymax=ymax) if yticks: pl.yticks(yticks) titlestr = 'n%d, thresh=%g, baseline=%g' % (nid, thresh, baseline) gcfm().window.setWindowTitle(titlestr) pl.gcf().tight_layout(pad=0.3) # crop figure to contents
def __init__(self, odsfname, i=[]): print '============================================' print '=== Mouse left click: Profiles ' print '=== Mouse right click: Horizontal scatter ' print '============================================' self.ods = ods(odsfname) self.odsfname = odsfname self.fignum = 2 self.figure = pl.figure(num=1, figsize=(18, 10)) self.axis = self.figure.add_subplot(111) self.tooltip = wx.ToolTip( tip='tip with a long %s line and a newline\n' % (' ' * 100)) gcfm().canvas.SetToolTip(self.tooltip) self.tooltip.Enable(False) self.tooltip.SetDelay(0) self.figure.canvas.mpl_connect('motion_notify_event', self._onMotion) self.figure.canvas.mpl_connect('button_press_event', self._onClick) self.dataX = np.squeeze(self.ods.lon) self.dataY = np.squeeze(self.ods.lat) map0 = draw_map(lonl=-180, lonr=180) x, y = map0(self.dataX, self.dataY) self.X = x self.Y = y kxi = 0 for KX in np.unique(self.ods.kx): I = np.where(self.ods.kx == KX) #if ( (KX==507) | (KX==522) | (KX==523)| (KX==507) | (KX==514) | (KX==515) | (KX==517) | (KX==516) | (KX==520) | (KX==518) | (KX==521)| (KX==509) |(KX==525) ): if ((KX == 507) | (KX == 522) | (KX == 523) | (KX == 507) | (KX == 514) | (KX == 515) | (KX == 517) | (KX == 516) | (KX == 518) | (KX == 521) | (KX == 509) | (KX == 525)): STD = np.std(self.ods.omf[I]) #self.figure.add_subplot(211) self.axis.scatter(x[I], y[I], 5, c=self.ods.omf[I], cmap=cm.bwr, vmin=-2 * STD, vmax=2 * STD, edgecolor=None, lw=0) #self.figure.add_subplot(212) #self.axis.scatter(x[kx==KX], y[kx==KX], 5, c=oma[kx==KX], cmap=cm.bwr,vmin=-2*STD,vmax=2*STD,edgecolor=None,lw=0) #plt.colorbar(orientation='vertical',extend='both',shrink=0.7) elif (KX == 520): pass else: self.axis.plot(x[I], y[I], linestyle='None', marker='.', markersize=3, label='myplot', color=COLORS[kxi]) kxi += 1
def __init__(self, iodafname): #,i=[]): self.ioda = ioda(iodafname) self.iodafname = iodafname self.fignum = 2 self.figure = pl.figure(num=1, figsize=(18, 10)) self.axis = self.figure.add_subplot(111) self.tooltip = wx.ToolTip( tip='tip with a long %s line and a newline\n' % (' ' * 100)) gcfm().canvas.SetToolTip(self.tooltip) self.tooltip.Enable(False) self.tooltip.SetDelay(0) self.figure.canvas.mpl_connect('motion_notify_event', self._onMotion) self.figure.canvas.mpl_connect('button_press_event', self._onClick) self.dataX = np.squeeze(self.ioda.lon) self.dataY = np.squeeze(self.ioda.lat) map0 = draw_map(lonl=-180, lonr=180) x, y = map0(self.dataX, self.dataY) self.X = x self.Y = y cnt = 0 alpha = 1.0 for inst in [508, 517, 520, 521, 525]: msize = 1.0 if (inst == 520): alpha = 0.5 msize = 1.0 elif (inst == 521): alpha = 0.5 msize = 1.0 elif (inst == 525): alpha = 0.5 msize = 1.0 elif (inst == 517): alpha = 1.0 msize = 2.0 else: alpha = 1.0 msize = 5.0 # Plot obs loc #I=np.where( np.logical_and( (self.ioda.instid==inst), (self.ioda.postqc==0), (self.ioda.preqc==0) ) ) #I=np.where( self.ioda.instid==inst ) #print('++++++++++++++ ',x) #I=np.where( (self.ioda.postqc==0) ) #I=np.where( np.logical_and( (self.ioda.instid==inst) , (self.ioda.postqc==0) )) I = np.where(self.ioda.instid == inst) #I=np.where(self.ioda.postqc==0) self.axis.plot(x[I], y[I], linestyle='None', marker='.', markersize=msize, label='myplot', color=COLORS[cnt], alpha=alpha) cnt += 1
def plot_psd(datas, cs=None, ylims=None, titlestr=''): f = figure(figsize=figsize) for data, c in zip(datas, cs): data = filter.notch(data)[ 0] # remove 60 Hz mains noise, as for SI calc # convert data from uV to mV. I think P is in mV^2?: P, freqs = mpl.mlab.psd(data / 1e3, NFFT=NFFT, Fs=SAMPFREQ, noverlap=NOVERLAP) # keep only freqs between F0 and F1: f0, f1 = F0, F1 # need to set different local names, since they're not read-only if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where( P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo( np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (P0, P1) dB if P0 != None: P[P < P0] = P0 if P1 != None: P[P > P1] = P1 plot(freqs, P, c=c, ls='-', marker=None) # demarcate SI power ratio frequency bands with horizontal lines: hlines(y=-50, xmin=LFPPRLOBAND[0], xmax=LFPPRLOBAND[1], colors='e', linestyles='-', lw=5) hlines(y=-30, xmin=LFPPRHIBAND[0], xmax=LFPPRHIBAND[1], colors='e', linestyles='-', lw=5) axis('tight') xscale(XSCALE) ylim(ylims) xlabel("frequency (Hz)") ylabel("power (dB)") gcfm().window.setWindowTitle(titlestr + ' ' + XSCALE) f.tight_layout(pad=0.3) # crop figure to contents
def scstim(self, method='mean', width=None, tres=None, figsize=(7.5, 6.5)): """Scatter plot some summary statistic of spike correlations of each recording, classified by the stimulus group each recording falls into. width and tres dictate tranges to split recordings up into, if any""" ## TODO: for each pair of recordings, find common subset of active neurons and ## calculate pairwise corrs for each recording in that pair using just those neurons ## TODO: maybe limit to visually responsive cells uns = get_ipython().user_ns if width == None: width = uns['SCWIDTH'] if tres == None: tres = width blankmseqrids = uns['BSRIDS'][self.absname] + uns['MSRIDS'][ self.absname] movdriftrids = uns['NSRIDS'][self.absname] + uns['DBRIDS'][ self.absname] blankmseqcorrs = [] movdriftcorrs = [] for rid in (blankmseqrids + movdriftrids): r = self.r[rid] print('%s: %s' % (r.absname, r.name)) spikecorr = r.sc(width=width, tres=tres) sc = spikecorr.sct(method=method)[0] sc = sc[ 0] # pull out the spike correlation values that span all laminae if rid in blankmseqrids: blankmseqcorrs.append(sc) else: movdriftcorrs.append(sc) blankmseqcorrs = np.hstack(blankmseqcorrs) movdriftcorrs = np.hstack(movdriftcorrs) # repeat each element in blankmseqcorrs len(movdriftcorrs) times: x = np.repeat(blankmseqcorrs, len(movdriftcorrs)) # tile movdriftcorrs len(blankmseqcorrs) times: y = np.tile(movdriftcorrs, len(blankmseqcorrs)) f = pl.figure(figsize=figsize) a = f.add_subplot(111) lim = min([x.min(), y.min(), 0]), max([x.max(), y.max()]) a.plot(lim, lim, c='e', ls='--', marker=None) # y=x line a.plot(x, y, 'k.') #a.set_xlim(lim) #a.set_ylim(lim) a.set_xlabel('%s spike correlations: blankscreen and mseq' % method) a.set_ylabel('%s spike correlations: movie and drift bar' % method) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) f.tight_layout(pad=0.3) # crop figure to contents f.show()
def scstim(self, method='mean', width=None, tres=None, figsize=(7.5, 6.5)): """Scatter plot some summary statistic of spike correlations of each recording, classified by the stimulus group each recording falls into. width and tres dictate tranges to split recordings up into, if any""" ## TODO: for each pair of recordings, find common subset of active neurons and calculate ## pairwise corrs for each recording in that pair using just those neurons ## TODO: maybe limit to visually responsive cells uns = get_ipython().user_ns if width == None: width = uns['SCWIDTH'] if tres == None: tres = width blankmseqrids = uns['BSRIDS'][self.absname] + uns['MSRIDS'][self.absname] movdriftrids = uns['NSRIDS'][self.absname] + uns['DBRIDS'][self.absname] blankmseqcorrs = [] movdriftcorrs = [] for rid in (blankmseqrids + movdriftrids): r = self.r[rid] print('%s: %s' % (r.absname, r.name)) spikecorr = r.sc(width=width, tres=tres) sc = spikecorr.sct(method=method)[0] sc = sc[0] # pull out the spike correlation values that span all laminae if rid in blankmseqrids: blankmseqcorrs.append(sc) else: movdriftcorrs.append(sc) blankmseqcorrs = np.hstack(blankmseqcorrs) movdriftcorrs = np.hstack(movdriftcorrs) # repeat each element in blankmseqcorrs len(movdriftcorrs) times: x = np.repeat(blankmseqcorrs, len(movdriftcorrs)) # tile movdriftcorrs len(blankmseqcorrs) times: y = np.tile(movdriftcorrs, len(blankmseqcorrs)) f = pl.figure(figsize=figsize) a = f.add_subplot(111) lim = min([x.min(), y.min(), 0]), max([x.max(), y.max()]) a.plot(lim, lim, c='e', ls='--', marker=None) # y=x line a.plot(x, y, 'k.') #a.set_xlim(lim) #a.set_ylim(lim) a.set_xlabel('%s spike correlations: blankscreen and mseq' % method) a.set_ylabel('%s spike correlations: movie and drift bar' % method) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) f.tight_layout(pad=0.3) # crop figure to contents f.show()
def __init__(self): self.figure = pl.figure() self.axis = self.figure.add_subplot(111) # create a long tooltip with newline to get around wx bug (in v2.6.3.3) # where newlines aren't recognized on subsequent self.tooltip.SetTip() calls self.tooltip = wx.ToolTip(tip="tip with a long %s line and a newline\n" % (" " * 100)) gcfm().canvas.SetToolTip(self.tooltip) self.tooltip.Enable(False) self.tooltip.SetDelay(0) self.figure.canvas.mpl_connect("motion_notify_event", self._onMotion) self.dataX = np.arange(0, 100) self.dataY = [random.random() * 100.0 for x in xrange(len(self.dataX))] self.axis.plot(self.dataX, self.dataY, linestyle="-", marker="o", markersize=10, label="myplot")
def __init__(self): #Setting properties of graph self.figure = pl.figure() self.axis = self.figure.add_subplot(111) self.tooltip = wx.ToolTip(tip='') gcfm().canvas.SetToolTip(self.tooltip) self.tooltip.Enable(False) self.tooltip.SetDelay(0) self.figure.canvas.mpl_connect('button_press_event', self._onClick) self.figure.canvas.mpl_connect('motion_notify_event', self._onMotion) self.dataX = range(1,53) self.dataY = average self.axis.set_xlabel('Weeks') self.axis.set_ylabel('Ratings') self.axis.plot(self.dataX, self.dataY, linestyle='-', marker='o', markersize=10, label='ratingsPlot')
def meanratepdf(self, bins=None, figsize=(7.5, 6.5)): """Plot histogram of mean firing rates""" f = pl.figure(figsize=figsize) a = f.add_subplot(111) if bins == None: bins = np.arange(0, 1, 0.05) n, mr = np.histogram(self.meanrates, bins=bins, density=False) binwidth = mr[1] - mr[0] # take width of first bin a.bar(left=mr[:-1], height=n, width=binwidth, bottom=0, color='k', ec='k') titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) a.set_xlabel('mean firing rate (Hz)') a.set_ylabel('neuron count') f.tight_layout(pad=0.3) # crop figure to contents
def sc_ising_vs_cch(source, ms=5, figsize=(7.5, 6.5)): """Scatter plot spike corrs calculated from Ising matrix against those calculated from CCH. INCOMPLETE. - find tracks in common, get allnids from each track - how to deal with big time gaps between experiments in a single recording? I constrain to the set of tranges of each experiment in rec.codes() - maybe i can convert the core.SpikeCorr object to take a source argument instead of recording/experiment objects - do all the spikecorr analyses make sense for multiple recordings, or for recordings from different tracks? - for each track absname """ isingscs = {} cchscs = {} # init a dict # for each rec, find out which track it's from recs, tracks = parse_source(source) isingscs, cchscs = [], [] for rec in recs: print(rec.absname) sc = rec.sc() sc.calc() isingscs.append(sc.corrs) cchscs.append(rec.sc_cch()) # for something... isingsc = np.hstack(isingscs) cchsc = np.hstack(cchscs) # plot: f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.plot(isingsc, cchsc, 'e.', ms=ms) a.set_xlabel('Ising spike corrs') a.set_ylabel('CCH spike corrs') a.set_xlim(-0.05, 0.2) a.set_ylim(-0.5, 1) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) f.tight_layout(pad=0.3) # crop figure to contents
def std(self, t0=None, t1=None, chani=-1, width=None, tres=None, fmt='k-', title=True, figsize=(20, 3.5)): """Plot standard deviation of LFP signal from t0 to t1 on chani, using bins of width and tres""" uns = get_ipython().user_ns self.get_data() data = self.data[chani] ts = self.get_tssec() if t0 == None: t0 = ts[0] if t1 == None: t1 = ts[-1] if width == None: width = uns['LFPSIWIDTH'] # sec if tres == None: tres = uns['LFPSITRES'] # sec tranges = split_tranges([(t0, t1)], width, tres) stds = [] for trange in tranges: ti0, ti1 = ts.searchsorted(trange) stds.append(data[ti0:ti1].std()) stds = np.hstack(stds) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.plot(tranges[:, 0], stds, fmt) a.autoscale(enable=True, tight=True) a.set_xlim(xmin=0) # ADC clock starts at t=0 a.set_xlabel('time (s)') a.set_ylabel('LFP $\sigma$ ($\mu$V)') titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) if title: a.set_title(titlestr) f.tight_layout(pad=0.3) self.f = f return stds
def __init__(self, m, ctrlStatus,labls, colors, Td=False): self.figure = pl.figure() self.axis = self.figure.add_subplot(111) if not Td else self.figure.add_subplot(111, projection ='3d') #ATTENTION NE MARCHE PAS EN TROIS D # create a long tooltip with newline to get around wx bug (in v2.6.3.3) # where newlines aren't recognized on subsequent self.tooltip.SetTip() calls self.tooltip = wx.ToolTip(tip='Pas de points ici\n') gcfm().canvas.SetToolTip(self.tooltip) self.tooltip.Enable(False) self.tooltip.SetDelay(0) self.figure.canvas.mpl_connect('motion_notify_event', self._onMotion) self.dataX = m[:,0] self.dataY = m[:,1] self.dataZ = m[:,2] self.text=labls if not Td: self.axis.scatter(self.dataX, self.dataY, color=colors, label='myplot') else: self.axis.scatter(self.dataX, self.dataY,self.dataZ, color=colors, label='myplot')
def __init__(self, dataY, dataFrame, plotType): import matplotlib as plt plt.use("WXAgg") plt.interactive(False) self.plotType = plotType self.dataFrame = dataFrame self.figure = pl.figure() self.axis = self.figure.add_subplot(111) # create a long tooltip with newline to get around wx bug (in v2.6.3.3) # where newlines aren't recognized on subsequent self.tooltip.SetTip() calls self.tooltip = wx.ToolTip(tip="tip with a long %s line and a newline\n" % (" " * 100)) gcfm().canvas.SetToolTip(self.tooltip) self.tooltip.Enable(False) self.tooltip.SetDelay(0) self.figure.canvas.mpl_connect("motion_notify_event", self._onMotion) self.dataX = range(len(dataY)) self.dataY = dataY self.xTicks = dataFrame.index pl.xticks(self.dataX, self.xTicks) self.axis.plot(self.dataX, self.dataY, linestyle="-", marker="o", markersize=15, label="myplot")
def plot_psd(datas, cs=None, ylims=None, titlestr=''): f = figure(figsize=figsize) for data, c in zip(datas, cs): data = filter.notch(data)[0] # remove 60 Hz mains noise, as for SI calc # convert data from uV to mV. I think P is in mV^2?: P, freqs = mpl.mlab.psd(data/1e3, NFFT=NFFT, Fs=SAMPFREQ, noverlap=NOVERLAP) # keep only freqs between F0 and F1: f0, f1 = F0, F1 # need to set different local names, since they're not read-only if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where(P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (P0, P1) dB if P0 != None: P[P < P0] = P0 if P1 != None: P[P > P1] = P1 plot(freqs, P, c=c, ls='-', marker=None) # demarcate SI frequency bands with horizontal lines: hlines(y=-50, xmin=LFPSILOWBAND[0], xmax=LFPSILOWBAND[1], colors='e', linestyles='-', lw=5) hlines(y=-30, xmin=LFPSIHIGHBAND[0], xmax=LFPSIHIGHBAND[1], colors='e', linestyles='-', lw=5) axis('tight') xscale(XSCALE) ylim(ylims) xlabel("frequency (Hz)") ylabel("power (dB)") gcfm().window.setWindowTitle(titlestr+' '+XSCALE) f.tight_layout(pad=0.3) # crop figure to contents
def plot_psd(data, titlestr): data = filter.notch(data)[0] # remove 60 Hz mains noise, as for SI calc # convert data from uV to mV. I think P is in mV^2?: P, freqs = mpl.mlab.psd(data/1e3, NFFT=NFFT, Fs=SAMPFREQ, noverlap=NOVERLAP) # keep only freqs between F0 and F1: f0, f1 = F0, F1 # need to set different local names, since they're not read-only if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where(P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (P0, P1) dB if P0 != None: P[P < P0] = P0 if P1 != None: P[P > P1] = P1 f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.plot(freqs, P, 'k-') # add SI frequency band limits: a.axvline(x=LFPSILOWBAND[0], c='r', ls='--') a.axvline(x=LFPSILOWBAND[1], c='r', ls='--') a.axvline(x=LFPSIHIGHBAND[0], c='b', ls='--') a.axvline(x=LFPSIHIGHBAND[1], c='b', ls='--') a.axis('tight') a.set_xscale(XSCALE) a.set_ylim(ymin=P[-1]) # use last power value to set ymin a.set_xlabel("frequency (Hz)") a.set_ylabel("power (dB)") gcfm().window.setWindowTitle(titlestr+' '+XSCALE) f.tight_layout(pad=0.3) # crop figure to contents
def plot_psth(psthparams, nid, fmt='k-', alpha=0.8, ms=6, mew=2, ymax=None, yticks=None, figsize=(24, 7)): """Plot nid's corresponding PSTH in psthparams""" t, psth, thresh, baseline, peakis, lis, ris = psthparams[nid] pl.figure(figsize=figsize) pl.plot(t, psth, fmt) # plot thresh and baseline levels: pl.axhline(y=thresh, c='r', ls='--') pl.axhline(y=baseline, c='e', ls='--') # mark peaks and their edges: if len(lis) > 0: pl.plot(t[lis], psth[lis], 'g+', alpha=alpha, ms=ms*1.5, mew=mew) # left edges if len(ris) > 0: pl.plot(t[ris-1], psth[ris-1], 'mx', alpha=alpha, ms=ms, mew=mew) # right edges if len(peakis) > 0: pl.plot(t[peakis], psth[peakis], 'ko', alpha=alpha, ms=ms, mec='none') # peaks pl.xlim(xmax=t[-1]) pl.ylim(ymin=0, ymax=ymax) if yticks: pl.yticks(yticks) titlestr = 'n%d, thresh=%g, baseline=%g' % (nid, thresh, baseline) gcfm().window.setWindowTitle(titlestr) pl.gcf().tight_layout(pad=0.3) # crop figure to contents
def si_plot(self, t, si, t0=None, t1=None, xlim=None, ylim=None, ylabel=None, showxlabel=True, showylabel=True, showtitle=True, title=None, showtext=True, text=None, hlines=[0], states=False, desynchsi=0.2, synchsi=0.9, lw=4, alpha=1, relative2t0=False, swapaxes=False, figsize=(20, 6.5)): """Plot synchrony index as a function of time, with hopefully the same temporal scale as some of the other plots in self""" uns = get_ipython().user_ns if figsize == None: f = pl.gcf() a = pl.gca() else: f = pl.figure(figsize=figsize) a = f.add_subplot(111) xlabel = "time (s)" if ylabel == None: ylabel = "synchrony index (AU?)" if swapaxes: t, si = si, t # swap t and si xlim, ylim = ylim, xlim ylim = ylim[1], ylim[0] # swap new ylimits so t=0 is at top xlabel, ylabel = ylabel, xlabel # swap labels showxlabel, showylabel = showylabel, showxlabel # swap flags # underplot vertical lines: for hline in hlines: a.axvline(x=hline, c='e', ls='--', marker=None) else: # underplot horizontal lines: for hline in hlines: a.axhline(y=hline, c='e', ls='--', marker=None) # plot lines over time demarcating desynched and synched periods: REC2STATETRANGES = uns['REC2STATETRANGES'] if states: dtrange, strange = np.asarray(REC2STATETRANGES[self.r.absname]) / 1e6 dtrange = max(dtrange[0], t0), min(dtrange[1], t1) # clip desynch trange to t0, t1 strange = max(strange[0], t0), min(strange[1], t1) # clip synch trange to t0, t1 if relative2t0: dtrange = dtrange - t0 strange = strange - t0 if swapaxes: si0, si1 = xlim tlines = a.vlines else: si0, si1 = ylim tlines = a.hlines dsi = abs(si1 - si0) tlines(si0+dsi*desynchsi, dtrange[0], dtrange[1], colors='b', lw=lw, alpha=alpha) tlines(si0+dsi*synchsi, strange[0], strange[1], colors='r', lw=lw, alpha=alpha) a.plot(t, si, 'k-') # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started: a.set_xlim(xlim) # low/high limits are unchanged if None a.set_ylim(ylim) if showxlabel: a.set_xlabel(xlabel) if showylabel: a.set_ylabel(ylabel) #a.autoscale(axis='x', enable=True, tight=True) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) if title: gcfm().window.setWindowTitle(title) if showtitle: a.set_title(title) if showtext: a.text(0.998, 0.01, '%s' % text, color='k', transform=a.transAxes, horizontalalignment='right', verticalalignment='bottom') f.tight_layout(pad=0.3) # crop figure to contents
def scsistim(self, method='mean', width=None, tres=None, timeaverage=False, plottime=False, s=5, figsize=(7.5, 6.5)): """Scatter plot some summary statistic of spike correlations of each recording vs LFP synchrony index SI. Colour each point according to stimulus type. width and tres (sec) dictate tranges to split recordings up into. timeaverage averages across time values of both sc and si for each recording. s is point size""" ## TODO: maybe limit to visually responsive cells ## TODO: add linear regression of si vs log(sc) uns = get_ipython().user_ns if width == None: width = uns['LFPSIWIDTH'] if tres == None: tres = width bsrids = uns['BSRIDS'][self.absname] msrids = uns['MSRIDS'][self.absname] mvrids = uns['NSRIDS'][self.absname] dbrids = uns['DBRIDS'][self.absname] rids = sorted(bsrids + msrids + mvrids + dbrids) # do everything in rid order print('blankscreen: %r' % [self.r[rid].name for rid in bsrids]) print('mseq: %r' % [self.r[rid].name for rid in msrids]) print('movie: %r' % [self.r[rid].name for rid in mvrids]) print('driftbar: %r' % [self.r[rid].name for rid in dbrids]) isect = core.intersect1d([msrids, bsrids, mvrids, dbrids]) if len(isect) != 0: raise RuntimeError("some rids were classified into more than one type: %r" % isect) scs, sis, c = [], [], [] for rid in rids: r = self.r[rid] print('%s: %s' % (r.absname, r.name)) spikecorr = r.sc(width=width, tres=tres) """ TODO: not sure if this is the right way to do this. A different set of neurons for each recording are chosen, then mean sc(t) across all pairs for each recording is found, and pooled across recordings. This pooling is maybe a bit dodgy. Is it valid to pool sc(t) values across recordings when the included neurons are different for each recording? The alternative is to deal only with neurons which exceed MINTHRESH track-wide, but the problem with that is that for much of the time, such neurons are completely silent, and therefore don't deserve to be included in sc calculations for those durations. """ sc, si = spikecorr.si(method=method, plot=False) # calls sc.sct() and sc.si() sc = sc[0] # pull out the spike correlation values that span all laminae if timeaverage: # average across all time values of sc and si to get a single coordinate # per recording sc = sc.mean() si = si.mean() scs.append(sc) sis.append(si) if rid in bsrids: color = 'e' elif rid in msrids: color = 'k' elif rid in mvrids: color = 'r' elif rid in dbrids: color = 'b' else: raise ValueError("unclassified recording: %r" % r.name) c.append(np.tile(color, len(sc))) scs = np.hstack(scs) sis = np.hstack(sis) c = np.hstack(c) f = pl.figure(figsize=figsize) a = f.add_subplot(111) if plottime: # underplot lines connecting points adjacent in time a.plot(scs, sis, 'e--') a.scatter(scs, sis, c=c, edgecolors='none', s=s) a.set_ylim(0, 1) a.set_xlabel('%s spike correlations' % method) a.set_ylabel('synchrony index') titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) # make proxy line artists for legend: bs = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='e', mec='e') ms = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='k', mec='k') mv = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='r', mec='r') db = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='b', mec='b') # add legend: a.legend([bs, ms, mv, db], ['blank screen', 'mseq', 'movie', 'drift bar'], numpoints=1, loc='lower right', handlelength=1, handletextpad=0.5, labelspacing=0.1) f.tight_layout(pad=0.3) # crop figure to contents return scs, sis, c
def plot(self, t0=None, t1=None, chanis=None, gain=1, c='k', alpha=1.0, yunits='um', yticks=None, title=True, xlabel=True, relative2t0=False, lim2stim=False, scalebar=True, lw=4, figsize=(20, 6.5)): """Plot chanis of LFP data between t0 and t1 in sec. Unfortunatley, setting an alpha < 1 doesn't seem to reveal detail when a line obscures itself, such as when plotting a very long time series. relative2t0 controls whether to plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din. If only one chan is requested, it's plotted on a mV scale instead of a spatial scale.""" self.get_data() ts = self.get_tssec() # full set of timestamps, in sec if t0 == None: t0, t1 = ts[0], ts[-1] if t1 == None: t1 = t0 + 10 # 10 sec window if chanis == None: chanis = range(len(self.chans)) # all chans if lim2stim: t0, t1 = self.apply_lim2stim(t0, t1) t0i, t1i = ts.searchsorted((t0, t1)) ts = ts[t0i:t1i] # constrained set of timestamps, in sec chanis = tolist(chanis) nchans = len(chanis) # grab desired channels and time range: data = self.data[chanis][:, t0i:t1i] if nchans > 1: # convert uV to um: totalgain = self.UV2UM * gain data = data * totalgain else: # convert uV to mV: data = data / 1000 yunits = 'mV' nt = len(ts) assert nt == data.shape[1] if relative2t0: # convert ts to time from t0, otherwise plot time from start of ADC clock: ts -= t0 x = np.tile(ts, nchans) x.shape = nchans, nt segments = np.zeros((nchans, nt, 2)) # x vals in col 0, yvals in col 1 segments[:, :, 0] = x if nchans > 1: segments[:, :, 1] = -data # set to -ve here because of invert_yaxis() below else: segments[:, :, 1] = data if nchans > 1: # add y offsets: maxypos = 0 for chanii, chani in enumerate(chanis): chan = self.chans[chani] ypos = self.chanpos[chan][1] # in um segments[chanii, :, 1] += ypos # vertical distance below top of probe maxypos = max(maxypos, ypos) if yunits == 'mm': # convert from um to mm segments[:, :, 1] /= 1000 maxypos = maxypos / 1000 # convert from int to float totalgain = totalgain / 1000 lc = LineCollection(segments, linewidth=1, linestyle='-', colors=c, alpha=alpha, antialiased=True, visible=True) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.add_collection(lc) # add to axes' pool of LCs if scalebar: # add vertical scale bar at end of last channel to represent 1 mV: if nchans > 1: ymin, ymax = maxypos - 500 * totalgain, maxypos + 500 * totalgain # +/- 0.5 mV else: ymin, ymax = -0.5, 0.5 # mV a.vlines(ts.max() * 0.99, ymin, ymax, lw=lw, colors='e') a.autoscale(enable=True, tight=True) # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started: a.set_xlim(xmin=0) if nchans > 1: a.invert_yaxis() # for spatial scale if yticks != None: a.set_yticks(yticks) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) if xlabel: a.set_xlabel("time (s)") if yunits == 'um': a.set_ylabel("depth ($\mu$m)") elif yunits == 'mm': a.set_ylabel("depth (mm)") elif yunits == 'mV': a.set_ylabel("LFP (mV)") titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) if title: a.set_title(titlestr) a.text(0.998, 0.99, '%s' % self.r.name, transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents self.f = f return self
sis = np.hstack(sis) Vpps = np.hstack(Vpps) / 1e3 # convert from uV to mV stds = np.hstack(stds) figure(figsize=figsize) #plot(Vpps, sis, 'k.', ms=4, alpha=ALPHA) Vppedges = np.arange(0.25, VPPMAX+0.25, 0.25) # mV Vppmeans, sismeans, sisstds = scatterbin(Vpps, sis, Vppedges, xaverage=None, yaverage=np.mean) errorbar(Vppmeans, sismeans, yerr=sisstds, fmt='k.-', ms=6, lw=1, zorder=9999) xlim(xmin=0, xmax=VPPMAX) ylim(0.2, 1) yticks([0.2, 0.4, 0.6, 0.8, 1]) xlabel('LFP $V_{pp}$ (mV)') ylabel('SI (L/(L+H))') gcfm().window.setWindowTitle('SI vs Vpp lfpwidth=%g lfptres=%g' % (lfpwidth, lfptres)) tight_layout(pad=0.3) figure(figsize=figsize) #plot(stds, sis, 'k.', ms=4, alpha=ALPHA) stdedges = np.arange(25, STDMAX+25, 25) # uV stdmeans, sismeans, sisstds = scatterbin(stds, sis, stdedges, xaverage=None, yaverage=np.mean) errorbar(stdmeans, sismeans, yerr=sisstds, fmt='k.-', ms=6, lw=1, zorder=9999) xlim(xmin=0, xmax=STDMAX) ylim(0.2, 1) yticks([0.2, 0.4, 0.6, 0.8, 1]) xlabel('LFP $\sigma$ ($\mu$V)') ylabel('SI (L/(L+H))') gcfm().window.setWindowTitle('SI vs sigma lfpwidth=%g lfptres=%g' % (lfpwidth, lfptres)) tight_layout(pad=0.3)
"""Create a SpikeCorr.shifts() plot for lots of recordings. Run by calling `%run -i scripts/shifts.py` within neuropy""" from pylab import get_current_fig_manager as gcfm from animal import Animal try: ptc22; except NameError: ptc22 = Animal('/home/mspacek/data/ptc22') ptc22.load('tr1') for i in range(4, 23): s = 'ptc22.tr1.r%02d.sc().shifts(-12000, 12000, 50)' % i exec(s) title(s) gcfm().window.setWindowTitle(s) print(s) show() ptc22.load('tr2') for i in range(23, 37): s = 'ptc22.tr2.r%02d.sc().shifts(-12000, 12000, 50)' % i exec(s) title(s) gcfm().window.setWindowTitle(s) print(s) show()
def specgram(self, t0=None, t1=None, f0=0.1, f1=100, p0=-60, p1=None, chanis=-1, width=None, tres=None, cm='jet', colorbar=False, showstates=False, lw=4, alpha=1, relative2t0=False, lim2stim=False, title=True, reclabel=True, swapaxes=False, figsize=None): """Plot a spectrogram from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of specified chanis. width and tres are in sec. As an alternative to cm.jet (the default), cm.gray, cm.hsv cm.terrain, and cm.cubehelix_r colormaps seem to bring out the most structure in the spectrogram. showstates controls whether to plot lines demarcating desynchronized and synchronized periods. relative2t0 controls whether to plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din""" uns = get_ipython().user_ns self.get_data() ts = self.get_tssec() # full set of timestamps, in sec if t0 == None: t0, t1 = ts[0], ts[-1] # full duration if t1 == None: t1 = t0 + 10 # 10 sec window if lim2stim: t0, t1 = self.apply_lim2stim(t0, t1) dt = t1 - t0 if width == None: width = uns['LFPSPECGRAMWIDTH'] # sec if tres == None: tres = uns['LFPSPECGRAMTRES'] # sec assert tres <= width NFFT = intround(width * self.sampfreq) noverlap = intround(NFFT - tres * self.sampfreq) t0i, t1i = ts.searchsorted((t0, t1)) #ts = ts[t0i:t1i] # constrained set of timestamps, in sec data = self.data[:, t0i:t1i] # slice data if figsize == None: # convert from recording duration time to width in inches, 0.87 accommodates # padding around the specgram: figwidth = (dt / 1000) * 5 + 0.87 figheight = 2.5 # inches figsize = figwidth, figheight f = pl.figure(figsize=figsize) a = f.add_subplot(111) if iterable(chanis): data = data[chanis].mean(axis=0) # take mean of data on chanis else: data = data[chanis] # get single row of data at chanis #data = filter.notch(data)[0] # remove 60 Hz mains noise # convert data from uV to mV, returned t is midpoints of time bins in sec from # start of data. I think P is in mV^2?: P, freqs, t = mpl.mlab.specgram(data / 1e3, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap) if not relative2t0: t += t0 # convert t to time from start of ADC clock: # keep only freqs between f0 and f1: if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] df = f1 - f0 lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where( P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo( np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (p0, p1) dB if p0 != None: P[P < p0] = p0 if p1 != None: P[P > p1] = p1 #self.P = P # plot horizontal bars over time demarcating different ranges of SI values, # or manually defined desynched and synched periods: statelinepos = f0 - df * 0.015 # plot horizontal bars just below x axis if showstates: if showstates in [True, 'auto']: print( "TODO: there's an offset plotting bug for 'auto', compare with 'manual'" ) si, t = self.si(plot=False) stranges, states = self.si_split(si, t) # sec STATECOLOURS = uns['LFPPRBINCOLOURS'] elif showstates == 'manual': stranges, states = [], [] for state in uns['MANUALSTATES']: for strange in uns['REC2STATE2TRANGES'][ self.r.absname][state]: stranges.append(strange) states.append(state) stranges = np.vstack(stranges) # 2D array STATECOLOURS = uns['MANUALSTATECOLOURS'] else: raise ValueError('invalid value showstates=%r' % showstates) # clip stranges to t0, t1: stranges[0, 0] = max(stranges[0, 0], t0) stranges[-1, 1] = min(stranges[-1, 1], t1) if swapaxes: lines = a.vlines else: lines = a.hlines for strange, state in zip(stranges, states): clr = STATECOLOURS[state] lines(statelinepos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha, clip_on=False) # Label far left, right, top and bottom edges of imshow image. imshow interpolates # between these to place the axes ticks. Time limits are # set from midpoints of specgram time bins extent = t[0], t[-1], freqs[0], freqs[-1] #print('specgram extent: %r' % (extent,)) # flip P vertically for compatibility with imshow: im = a.imshow(P[::-1], extent=extent, cmap=cm) a.autoscale(enable=True, tight=True) a.axis('tight') # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started: a.set_xlim(xmin=0, xmax=t[-1]) a.set_ylim(ymin=freqs[0], ymax=freqs[-1]) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) a.set_xlabel("time (s)") a.set_ylabel("frequency (Hz)") titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) if title: a.set_title(titlestr) if reclabel: a.text(0.994, 0.95, '%s' % self.r.absname, color='w', transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents if colorbar: f.colorbar( im, pad=0) # creates big whitespace to the right for some reason self.f = f return P, freqs, t
def psd(self, t0=None, t1=None, f0=0.2, f1=110, p0=None, p1=None, chanis=-1, width=None, tres=None, xscale='log', figsize=(5, 5)): """Plot power spectral density from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of specified chanis. width and tres are in sec.""" uns = get_ipython().user_ns self.get_data() ts = self.get_tssec() # full set of timestamps, in sec if t0 == None: t0, t1 = ts[0], ts[-1] # full duration if t1 == None: t1 = t0 + 10 # 10 sec window if width == None: width = uns['LFPSPECGRAMWIDTH'] # sec if tres == None: tres = uns['LFPSPECGRAMTRES'] # sec assert tres <= width NFFT = intround(width * self.sampfreq) noverlap = intround(NFFT - tres * self.sampfreq) t0i, t1i = ts.searchsorted((t0, t1)) #ts = ts[t0i:t1i] # constrained set of timestamps, in sec data = self.data[:, t0i:t1i] # slice data f = pl.figure(figsize=figsize) a = f.add_subplot(111) if iterable(chanis): data = data[chanis].mean(axis=0) # take mean of data on chanis else: data = data[chanis] # get single row of data at chanis #data = filter.notch(data)[0] # remove 60 Hz mains noise # convert data from uV to mV. I think P is in mV^2?: P, freqs = mpl.mlab.psd(data/1e3, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap) # keep only freqs between f0 and f1: if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where(P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (p0, p1) dB if p0 != None: P[P < p0] = p0 if p1 != None: P[P > p1] = p1 #self.P = P a.plot(freqs, P, 'k-') # add SI frequency band limits: LFPPRLOBAND, LFPPRHIBAND = uns['LFPPRLOBAND'], uns['LFPPRHIBAND'] a.axvline(x=LFPPRLOBAND[0], c='r', ls='--') a.axvline(x=LFPPRLOBAND[1], c='r', ls='--') a.axvline(x=LFPPRHIBAND[0], c='b', ls='--') a.axvline(x=LFPPRHIBAND[1], c='b', ls='--') a.axis('tight') a.set_xscale(xscale) a.set_xlabel("frequency (Hz)") a.set_ylabel("power (dB)") titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) a.text(0.998, 0.99, '%s' % self.r.name, color='k', transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents self.f = f return P, freqs
def specgram(self, t0=None, t1=None, f0=0.1, f1=100, p0=-60, p1=None, chanis=-1, width=None, tres=None, cm='jet', colorbar=False, showstates=False, lw=4, alpha=1, relative2t0=False, lim2stim=False, title=True, reclabel=True, swapaxes=False, figsize=None): """Plot a spectrogram from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of specified chanis. width and tres are in sec. As an alternative to cm.jet (the default), cm.gray, cm.hsv cm.terrain, and cm.cubehelix_r colormaps seem to bring out the most structure in the spectrogram. showstates controls whether to plot lines demarcating desynchronized and synchronized periods. relative2t0 controls whether to plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din""" uns = get_ipython().user_ns self.get_data() ts = self.get_tssec() # full set of timestamps, in sec if t0 == None: t0, t1 = ts[0], ts[-1] # full duration if t1 == None: t1 = t0 + 10 # 10 sec window if lim2stim: t0, t1 = self.apply_lim2stim(t0, t1) dt = t1 - t0 if width == None: width = uns['LFPSPECGRAMWIDTH'] # sec if tres == None: tres = uns['LFPSPECGRAMTRES'] # sec assert tres <= width NFFT = intround(width * self.sampfreq) noverlap = intround(NFFT - tres * self.sampfreq) t0i, t1i = ts.searchsorted((t0, t1)) #ts = ts[t0i:t1i] # constrained set of timestamps, in sec data = self.data[:, t0i:t1i] # slice data if figsize == None: # convert from recording duration time to width in inches, 0.87 accommodates # padding around the specgram: figwidth = (dt / 1000) * 5 + 0.87 figheight = 2.5 # inches figsize = figwidth, figheight f = pl.figure(figsize=figsize) a = f.add_subplot(111) if iterable(chanis): data = data[chanis].mean(axis=0) # take mean of data on chanis else: data = data[chanis] # get single row of data at chanis #data = filter.notch(data)[0] # remove 60 Hz mains noise # convert data from uV to mV, returned t is midpoints of time bins in sec from # start of data. I think P is in mV^2?: P, freqs, t = mpl.mlab.specgram(data/1e3, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap) if not relative2t0: t += t0 # convert t to time from start of ADC clock: # keep only freqs between f0 and f1: if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] df = f1 - f0 lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where(P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (p0, p1) dB if p0 != None: P[P < p0] = p0 if p1 != None: P[P > p1] = p1 #self.P = P # plot horizontal bars over time demarcating different ranges of SI values, # or manually defined desynched and synched periods: statelinepos = f0 - df*0.015 # plot horizontal bars just below x axis if showstates: if showstates in [True, 'auto']: print("TODO: there's an offset plotting bug for 'auto', compare with 'manual'") si, t = self.si(plot=False) stranges, states = self.si_split(si, t) # sec STATECOLOURS = uns['LFPPRBINCOLOURS'] elif showstates == 'manual': stranges, states = [], [] for state in uns['MANUALSTATES']: for strange in uns['REC2STATE2TRANGES'][self.r.absname][state]: stranges.append(strange) states.append(state) stranges = np.vstack(stranges) # 2D array STATECOLOURS = uns['MANUALSTATECOLOURS'] else: raise ValueError('invalid value showstates=%r' % showstates) # clip stranges to t0, t1: stranges[0, 0] = max(stranges[0, 0], t0) stranges[-1, 1] = min(stranges[-1, 1], t1) if swapaxes: lines = a.vlines else: lines = a.hlines for strange, state in zip(stranges, states): clr = STATECOLOURS[state] lines(statelinepos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha, clip_on=False) # Label far left, right, top and bottom edges of imshow image. imshow interpolates # between these to place the axes ticks. Time limits are # set from midpoints of specgram time bins extent = t[0], t[-1], freqs[0], freqs[-1] #print('specgram extent: %r' % (extent,)) # flip P vertically for compatibility with imshow: im = a.imshow(P[::-1], extent=extent, cmap=cm) a.autoscale(enable=True, tight=True) a.axis('tight') # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started: a.set_xlim(xmin=0, xmax=t[-1]) a.set_ylim(ymin=freqs[0], ymax=freqs[-1]) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) a.set_xlabel("time (s)") a.set_ylabel("frequency (Hz)") titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) if title: a.set_title(titlestr) if reclabel: a.text(0.994, 0.95, '%s' % self.r.absname, color='w', transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents if colorbar: f.colorbar(im, pad=0) # creates big whitespace to the right for some reason self.f = f return P, freqs, t
def sc_si(source, method='mean', sisource='lfp', kind=None, chani=-1, sirange=None, layers=False, ms=1, figsize=(7.5, 6.5)): """Pool recording.sc().si() results across recordings specified by source, plot the result""" uns = get_ipython().user_ns if layers == False: layers = ['all'] elif layers == True: layers = ['sup', 'deep'] LAYER2I = {'all': 0, 'sup': 1, 'mid': 2, 'deep': 3, 'other': 4} layeris = [LAYER2I[layer] for layer in layers] recs, tracks = parse_source(source) if sisource not in ['lfp', 'mua']: raise ValueError('unknown sisource %r' % sisource) if kind == None: if sisource == 'lfp': kind = uns['LFPSIKIND'] else: kind = uns['MUASIKIND'] # calculate corrss, sis = [], [] for rec in recs: print(rec.absname) corrs, si, ylabel = rec.sc().si(method=method, sisource=sisource, kind=kind, chani=chani, sirange=sirange, plot=False) corrss.append(corrs) sis.append(si) corrs = np.hstack(corrss) si = np.hstack(sis) # plot f = pl.figure(figsize=figsize) a = f.add_subplot(111) #ylim = corrs[layeris].min(), corrs[layeris].max() #yrange = ylim[1] - ylim[0] #extra = yrange*0.03 # 3 % #ylim = ylim[0]-extra, ylim[1]+extra ylim = uns['SCLIMITS'] # keep only those points whose synchrony index falls within sirange: if sirange == None: finitesi = si[np.isfinite(si)] sirange = finitesi.min(), finitesi.max() sirange = np.asarray(sirange) keepis = (sirange[0] <= si[0]) * (si[0] <= sirange[1] ) # boolean index array si = si[:, keepis] corrs = corrs[:, keepis] # plot linear regressions of corrs vs si[0]: if 'all' in layers: m0, b0, r0, p0, stderr0 = linregress(si[0], corrs[0]) a.plot(sirange, m0 * sirange + b0, 'e--') if 'sup' in layers: m1, b1, r1, p1, stderr1 = linregress(si[0], corrs[1]) a.plot(sirange, m1 * sirange + b1, 'r--') if 'mid' in layers: m2, b2, r2, p2, stderr2 = linregress(si[0], corrs[2]) a.plot(sirange, m2 * sirange + b2, 'g--') if 'deep' in layers: m3, b3, r3, p3, stderr3 = linregress(si[0], corrs[3]) a.plot(sirange, m3 * sirange + b3, 'b--') if 'other' in layers: m4, b4, r4, p4, stderr4 = linregress(si[0], corrs[4]) a.plot(sirange, m4 * sirange + b4, 'y--', zorder=0) # scatter plot corrs vs si, one colour per laminarity: if 'all' in layers: a.plot(si[0], corrs[0], 'e.', ms=ms, label='all, m=%.3f, r=%.3f' % (m0, r0)) if 'sup' in layers: a.plot(si[0], corrs[1], 'r.', ms=ms, label='superficial, m=%.3f, r=%.3f' % (m1, r1)) if 'mid' in layers: a.plot(si[0], corrs[2], 'g.', ms=ms, label='middle, m=%.3f, r=%.3f' % (m2, r2)) if 'deep' in layers: a.plot(si[0], corrs[3], 'b.', ms=ms, label='deep, m=%.3f, r=%.3f' % (m3, r3)) if 'other' in layers: a.plot(si[0], corrs[4], 'y.', ms=ms, label='other, m=%.3f, r=%.3f' % (m4, r4), zorder=0) #a.set_xlim(sirange) if kind[0] == 'n': a.set_xlim(-1, 1) a.set_ylim(ylim) #a.autoscale(enable=True, axis='y', tight=True) a.set_xlabel('%s SI (%s)' % (sisource.upper(), kind)) a.set_ylabel(ylabel) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) a.legend(loc='upper left', handlelength=1, handletextpad=0.5, labelspacing=0.1) f.tight_layout(pad=0.3) # crop figure to contents
def si_plot(self, si, t, t0=None, t1=None, xlim=None, ylim=None, yticks=None, ylabel=None, showxlabel=True, showylabel=True, showtitle=True, title=None, reclabel=True, hlines=[0], showstates=False, statelinepos=None, lw=4, alpha=1, swapaxes=False, figsize=None): """Plot synchrony index as a function of time, with hopefully the same temporal scale as some of the other plots in self""" uns = get_ipython().user_ns f = pl.figure(figsize=figsize) a = f.add_subplot(111) xlabel = "time (s)" if ylabel == None: ylabel = "synchrony index (AU?)" if swapaxes: t, si = si, t # swap t and si xlim, ylim = ylim, xlim ylim = ylim[1], ylim[0] # swap new ylimits so t=0 is at top xlabel, ylabel = ylabel, xlabel # swap labels showxlabel, showylabel = showylabel, showxlabel # swap flags # underplot vertical lines: for hline in hlines: a.axvline(x=hline, c='e', ls='--', marker=None) else: # underplot horizontal lines: for hline in hlines: a.axhline(y=hline, c='e', ls='--', marker=None) # plot horizontal bars over time demarcating different ranges of SI values, # or manually defined desynched and synched periods: if showstates in [True, 'auto']: stranges, states = self.si_split(si, t) if swapaxes: lines = a.vlines else: lines = a.hlines slposs = statelinepos if len(slposs) == 1: # use same statelinepos for all states nstranges = len(stranges) slposs = slposs * nstranges for strange, state, slpos in zip(stranges, states, slposs): clr = uns['LFPPRBINCOLOURS'][state] lines(slpos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha) elif showstates == 'manual': REC2STATETRANGES = uns['REC2STATETRANGES'] dtrange, strange = np.asarray(REC2STATETRANGES[self.r.absname]) / 1e6 dtrange = max(dtrange[0], t0), min(dtrange[1], t1) # clip desynch trange to t0, t1 strange = max(strange[0], t0), min(strange[1], t1) # clip synch trange to t0, t1 if swapaxes: lines = a.vlines else: lines = a.hlines slposs = statelinepos if len(slposs) == 1: # use same statelinepos for both states slposs = slposs * 2 lines(slposs[0], dtrange[0], dtrange[1], colors='b', lw=lw, alpha=alpha) lines(slposs[1], strange[0], strange[1], colors='r', lw=lw, alpha=alpha) a.plot(t, si, 'k-') # depending on relative2t0 in si(), x=0 represents either t0 or time ADC clock started: a.set_xlim(xlim) # low/high limits are unchanged if None a.set_ylim(ylim) if yticks != None: a.set_yticks(yticks) if showxlabel: a.set_xlabel(xlabel) if showylabel: a.set_ylabel(ylabel) #a.autoscale(axis='x', enable=True, tight=True) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) if title == None: title = lastcmd() gcfm().window.setWindowTitle(title) if showtitle: a.set_title(title) if reclabel: a.text(0.994, 0.01, '%s' % self.r.absname, color='k', transform=a.transAxes, horizontalalignment='right', verticalalignment='bottom') f.tight_layout(pad=0.3) # crop figure to contents
def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10, rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)): """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1. If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then subtract that from the unshifted CCH to get the shift corrected CCH""" if nid1 == None: nid1 = nid0 autocorr = nid0 == nid1 n0 = self.alln[nid0] n1 = self.alln[nid1] calctrange = trange * 1000 # calculation trange, in us if shift: assert nshifts > 0 shift *= 1000 # convert to us maxshift = nshifts * shift calctrange = trange + maxshift # expand calculated trange to encompass shifts calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us if autocorr: dts = dts[dts != 0] # remove 0s for autocorr if shift: # calculate dts for shift corrector shiftis = range(-nshifts, nshifts+1) shiftis.remove(0) # don't shift by 0, that's the original which we'll subtract from shifts = np.asarray(shiftis) * shift shiftdts = np.hstack([ dts+s for s in shifts ]) # in us print('shifts =', shifts / 1000) if not binw: nbins = intround(np.sqrt(len(dts))) # good heuristic nbins = max(20, nbins) # enforce min nbins nbins = min(200, nbins) # enforce max nbins else: nbins = intround(2 * trange / binw) dts = dts / 1000 # in ms, converts to float64 array t = np.linspace(start=-trange, stop=trange, num=nbins+1, endpoint=True) # ms binw = t[1] - t[0] # all should be equal width, ms n = np.histogram(dts, bins=t, density=False)[0] if shift: # subtract shift corrector shiftdts = shiftdts / 1000 # in ms, converts to float64 array shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts*2) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') n -= shiftn if norm: # normalize and convert to float: n = n / n.max() elif rate: # normalize by binw and convert to float: n = n / binw f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') if norm: a.set_ylabel('coincidence rate (AU)') a.set_yticks([0, 1]) elif rate: a.set_ylabel('coincidence rate (Hz)') else: a.set_ylabel('count') if title: a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id)) wtitlestr = lastcmd()# + ', binw=%.1f ms' % binw gcfm().window.setWindowTitle(wtitlestr) f.tight_layout(pad=0.3) # crop figure to contents
def pospdf(self, neurons='all', dim='y', edges=None, nbins=10, stats=False, labels=True, a=None, figsize=(7.5, 6.5)): """Plot PDF of cell positions ('x' or 'y') along the polytrode to get an idea of how cells are distributed in space""" if neurons == 'all': neurons = list(self.alln.values()) elif neurons == 'quiet': neurons = list(self.qn.values()) elif neurons == 'active': neurons = list(self.n.values()) dimi = {'x': 0, 'y': 1}[dim] p = [n.pos[dimi] for n in neurons] # all position values if edges != None: nbins = len(edges) - 1 bins = edges # assume it includes rightmost bin edge else: nbins = max(nbins, 2 * intround(np.sqrt(self.nneurons))) bins = nbins n, p = np.histogram(p, bins=bins) # p includes rightmost bin edge binwidth = p[1] - p[0] # take width of first bin in p if stats: mean = np.mean(p) median = np.median(p) argmode = n.argmax() mode = p[argmode] + binwidth / 2 # middle of tallest bin stdev = np.std(p) if a == None: f = pl.figure(figsize=figsize) a = f.add_subplot(111) else: # add to existing axes a.hold(True) f = pl.gcf() # use CCWHITEDICT1 for familiarity with len 10 1-based id to colour mapping #color = CCWHITEDICT1[int(self.id)] color = 'k' # exclude rightmost bin edge in p a.bar(left=p[:-1], height=n, width=binwidth, bottom=0, color=color, ec=color) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) if labels: a.set_title(titlestr) a.set_xlabel('neuron %s position (um)' % dim) a.set_ylabel('neuron count') if stats: # add stuff to top right of plot: uns = get_ipython().user_ns a.text(0.99, 0.99, 'mean = %.3f\n' 'median = %.3f\n' 'mode = %.3f\n' 'stdev = %.3f\n' 'minrate = %.2f Hz\n' 'nneurons = %d\n' 'dt = %d min' % (mean, median, mode, stdev, uns['MINRATE'], self.nneurons, intround(self.dtmin)), transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents f.canvas.draw() # this is needed if a != None when passed as arg return a
def si_plot(self, si, t, t0=None, t1=None, xlim=None, ylim=None, yticks=None, ylabel=None, showxlabel=True, showylabel=True, showtitle=True, title=None, reclabel=True, hlines=[0], showstates=False, statelinepos=None, lw=4, alpha=1, swapaxes=False, figsize=None): """Plot synchrony index as a function of time, with hopefully the same temporal scale as some of the other plots in self""" uns = get_ipython().user_ns f = pl.figure(figsize=figsize) a = f.add_subplot(111) xlabel = "time (s)" if ylabel == None: ylabel = "synchrony index (AU?)" if swapaxes: t, si = si, t # swap t and si xlim, ylim = ylim, xlim ylim = ylim[1], ylim[0] # swap new ylimits so t=0 is at top xlabel, ylabel = ylabel, xlabel # swap labels showxlabel, showylabel = showylabel, showxlabel # swap flags # underplot vertical lines: for hline in hlines: a.axvline(x=hline, c='e', ls='--', marker=None) else: # underplot horizontal lines: for hline in hlines: a.axhline(y=hline, c='e', ls='--', marker=None) # plot horizontal bars over time demarcating different ranges of SI values, # or manually defined desynched and synched periods: if showstates in [True, 'auto']: stranges, states = self.si_split(si, t) if swapaxes: lines = a.vlines else: lines = a.hlines slposs = statelinepos if len(slposs) == 1: # use same statelinepos for all states nstranges = len(stranges) slposs = slposs * nstranges for strange, state, slpos in zip(stranges, states, slposs): clr = uns['LFPPRBINCOLOURS'][state] lines(slpos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha) elif showstates == 'manual': REC2STATETRANGES = uns['REC2STATETRANGES'] dtrange, strange = np.asarray( REC2STATETRANGES[self.r.absname]) / 1e6 dtrange = max(dtrange[0], t0), min(dtrange[1], t1) # clip desynch trange to t0, t1 strange = max(strange[0], t0), min(strange[1], t1) # clip synch trange to t0, t1 if swapaxes: lines = a.vlines else: lines = a.hlines slposs = statelinepos if len(slposs) == 1: # use same statelinepos for both states slposs = slposs * 2 lines(slposs[0], dtrange[0], dtrange[1], colors='b', lw=lw, alpha=alpha) lines(slposs[1], strange[0], strange[1], colors='r', lw=lw, alpha=alpha) a.plot(t, si, 'k-') # depending on relative2t0 in si(), x=0 represents either t0 or time ADC clock started: a.set_xlim(xlim) # low/high limits are unchanged if None a.set_ylim(ylim) if yticks != None: a.set_yticks(yticks) if showxlabel: a.set_xlabel(xlabel) if showylabel: a.set_ylabel(ylabel) #a.autoscale(axis='x', enable=True, tight=True) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) if title == None: title = lastcmd() gcfm().window.setWindowTitle(title) if showtitle: a.set_title(title) if reclabel: a.text(0.994, 0.01, '%s' % self.r.absname, color='k', transform=a.transAxes, horizontalalignment='right', verticalalignment='bottom') f.tight_layout(pad=0.3) # crop figure to contents
def npos(self, colour='active', inchespermicron=0.007, legend=False, alpha=0.6): """Plot (x, y) cell positions over top of polytrode channel positions, to get an idea of how cells are distributed in space. Colour cells by 'active', 'rftype', 'spiketype' or 'sigma'.""" uns = get_ipython().user_ns npos = np.asarray([neuron.pos for neuron in self.alln.values()]) chanpos = self.chanpos chanxs, chanys = chanpos[:, 0], chanpos[:, 1] uchanxs = np.unique(chanxs) xspace = np.diff(uchanxs).max( ) # max spacing of consecutive unique x chan positions hsw = uns['PTSHANKWIDTHS'][self.pttype] / 2 # half shank width xs = np.hstack((npos[:, 0], chanxs, [-hsw, hsw])) ys = np.hstack((npos[:, 1], chanys)) ymin = min(min(ys), 0) xlim = min(xs.min(), uchanxs[0] - xspace / 2), max(xs.max(), uchanxs[-1] + xspace / 2) ylim = ys.max() + xspace, ymin # inverted y axis figwidth = inchespermicron * np.ptp( xlim) * 2 + 3 * legend # make space for y axis labels figheight = inchespermicron * np.ptp(ylim) f = pl.figure(figsize=(figwidth, figheight)) a = f.add_subplot(111, aspect='equal') a.set_frame_on(False) # plot rectangle representing shank width and length, excluding the tip: sl = ylim[0] # starting from bottom left, going clockwise: shankxs = -hsw, -hsw, hsw, hsw shankys = sl, ymin, ymin, sl a.fill(shankxs, shankys, color='lightgrey', ec='none') # plot electrode sites: a.plot(chanpos[:, 0], chanpos[:, 1], 'k.', ms=5) if colour == 'active': # plot active and quiet cell positions in red and blue, respectively: anpos = np.asarray([neuron.pos for neuron in self.n.values()]) qnpos = np.asarray([neuron.pos for neuron in self.qn.values()]) na = len(anpos) nq = len(qnpos) # layer in inverse order of importance: if na: a.plot(qnpos[:, 0], qnpos[:, 1], 'b.', ms=10, alpha=alpha, label='quiet') if nq: a.plot(anpos[:, 0], anpos[:, 1], 'r.', ms=10, alpha=alpha, label='active') elif colour == 'rftype': # plot simple, complex, LGN afferent and None in red, blue, green and grey: spos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == 'simple' ]) cpos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == 'complex' ]) Lpos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == 'LGN' ]) Npos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == None ]) ns = len(spos) nc = len(cpos) nL = len(Lpos) nN = len(Npos) # layer in inverse order of importance: if nN: a.plot(Npos[:, 0], Npos[:, 1], 'e.', ms=10, alpha=alpha, label='unknown') if nL: a.plot(Lpos[:, 0], Lpos[:, 1], 'g.', ms=10, alpha=alpha, label='LGN afferent') if nc: a.plot(cpos[:, 0], cpos[:, 1], 'b.', ms=10, alpha=alpha, label='complex') if ns: a.plot(spos[:, 0], spos[:, 1], 'r.', ms=10, alpha=alpha, label='simple') elif colour == 'spiketype': # plot fast, slow, fastasym and slowasym in red, blue, green and grey: fpos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'fast' ]) spos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'slow' ]) fapos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'fastasym' ]) sapos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'slowasym' ]) nf = len(fpos) ns = len(spos) nfa = len(fapos) nsa = len(sapos) # layer in inverse order of frequency: if nf: a.plot(fpos[:, 0], fpos[:, 1], 'r.', ms=10, alpha=alpha, label='fast') if ns: a.plot(spos[:, 0], spos[:, 1], 'b.', ms=10, alpha=alpha, label='slow') if nfa: a.plot(fapos[:, 0], fapos[:, 1], 'g.', ms=10, alpha=alpha, label='fast asymmetric') if nsa: a.plot(sapos[:, 0], sapos[:, 1], 'e.', ms=10, alpha=alpha, label='slow asymmetric') elif colour == 'sigma': sigmas = np.asarray( [neuron.sigma for neuron in self.alln.values()]) cmap = mpl.cm.hot_r # best to fully saturate alpha because colour indicates value, not just class: sc = a.scatter(npos[:, 0], npos[:, 1], edgecolor='none', c=sigmas, cmap=cmap, alpha=1.0, s=30, zorder=10) else: raise RuntimeError("unknown colour kwarg %r" % colour) a.set_xlim(xlim) a.set_ylim(ylim) a.set_xticks(uchanxs) a.set_yticks(np.arange(0, ylim[0], 200)) #a.xaxis.set_ticks_position('bottom') #a.yaxis.set_ticks_position('left') # put legend to right of the axes: if legend: if colour == 'sigma': f.colorbar(sc, ax=a, shrink=0.1, pad=0.1, aspect=10, ticks=[min(sigmas), max(sigmas)], format='%d', label='sigma') else: a.legend(loc='center left', bbox_to_anchor=(1.2, 0.5), frameon=False) bbox = a.get_position() wh = bbox.width / bbox.height # w:h ratio of axes, includes all ticks and labels? w, h = gcfm().canvas.get_width_height() gcfm().resize(w * wh, h) titlestr = lastcmd() gcfm().set_window_title(titlestr) a.set_title(self.absname)
def scsistim(self, method='weighted mean', width=None, tres=None, timeaverage=False, plottime=False, s=5, figsize=(7.5, 6.5)): """Scatter plot some summary statistic of spike correlations of each recording vs synchrony index SI. Colour each point according to stimulus type. width and tres dictate tranges to split recordings up into. timeaverage means average across time values of both sc and si for each recording""" ## TODO: maybe limit to visually responsive cells ## TODO: add linear regression of si vs log(sc) uns = get_ipython().user_ns if width == None: width = uns['SIWIDTH'] # want powers of two for efficient FFT if tres == None: tres = width rids = sorted(self.r) # do everything in rid order recs = [ self.r[rid] for rid in rids ] msrids, bsrids, mvrids, dbrids = [], [], [], [] for rid in rids: r = self.r[rid] rname = r.name if 'mseq' in rname: msrids.append(rid) elif 'blank' in rname or 'spont' in rname: bsrids.append(rid) elif 'MVI' in rname: mvrids.append(rid) elif 'driftbar' in rname: dbrids.append(rid) print('mseq: %r' % [self.r[rid].name for rid in msrids]) print('blankscreen: %r' % [self.r[rid].name for rid in bsrids]) print('movie: %r' % [self.r[rid].name for rid in mvrids]) print('driftbar: %r' % [self.r[rid].name for rid in dbrids]) isect = core.intersect1d([msrids, bsrids, mvrids, dbrids]) if len(isect) != 0: raise RuntimeError("some rids were classified into more than one type: %r" % isect) rids = np.unique(np.hstack([msrids, bsrids, mvrids, dbrids])) scs, sis, c = [], [], [] for rid in rids: r = self.r[rid] print('%s: %s' % (r.absname, r.name)) spikecorr = r.sc(width=width, tres=tres) sc, si = spikecorr.si(method=method, plot=False) # calls sc.sct() and sc.si() sc = sc[0] # pull out the spike correlation values that span all laminae if timeaverage: # average across all time values of sc and si to get a single coordinate # per recording sc = sc.mean() si = si.mean() scs.append(sc) sis.append(si) if rid in msrids: color = 'k' elif rid in bsrids: color = 'e' elif rid in mvrids: color = 'r' elif rid in dbrids: color = 'b' else: raise ValueError("unclassified recording: %r" % r.name) c.append(np.tile(color, len(sc))) scs = np.hstack(scs) sis = np.hstack(sis) c = np.hstack(c) f = pl.figure(figsize=figsize) a = f.add_subplot(111) if plottime: # underplot lines connecting points adjacent in time a.plot(scs, sis, 'e--') a.scatter(scs, sis, c=c, edgecolors='none', s=s) a.set_ylim(0, 1) a.set_xlabel('%s spike correlations' % method) a.set_ylabel('synchrony index') titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) # make proxy line artists for legend: ms = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='k', mec='k') bs = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='e', mec='e') mv = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='r', mec='r') db = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='b', mec='b') # add legend: a.legend([ms, bs, mv, db], ['mseq', 'blank screen', 'movie', 'drift bar'], numpoints=1, loc='lower right', handlelength=1, handletextpad=0.5, labelspacing=0.1) f.tight_layout(pad=0.3) # crop figure to contents return scs, sis, c
def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10, rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)): """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1. If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then subtract that from the unshifted CCH to get the shift corrected CCH""" if nid1 == None: nid1 = nid0 autocorr = nid0 == nid1 n0 = self.alln[nid0] n1 = self.alln[nid1] calctrange = trange * 1000 # calculation trange, in us if shift: assert nshifts > 0 shift *= 1000 # convert to us maxshift = nshifts * shift calctrange = trange + maxshift # expand calculated trange to encompass shifts calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us if autocorr: dts = dts[dts != 0] # remove 0s for autocorr if shift: # calculate dts for shift corrector shiftis = range(-nshifts, nshifts + 1) shiftis.remove( 0 ) # don't shift by 0, that's the original which we'll subtract from shifts = np.asarray(shiftis) * shift shiftdts = np.hstack([dts + s for s in shifts]) # in us print('shifts =', shifts / 1000) if not binw: nbins = intround(np.sqrt(len(dts))) # good heuristic nbins = max(20, nbins) # enforce min nbins nbins = min(200, nbins) # enforce max nbins else: nbins = intround(2 * trange / binw) dts = dts / 1000 # in ms, converts to float64 array t = np.linspace(start=-trange, stop=trange, num=nbins + 1, endpoint=True) # ms binw = t[1] - t[0] # all should be equal width, ms n = np.histogram(dts, bins=t, density=False)[0] if shift: # subtract shift corrector shiftdts = shiftdts / 1000 # in ms, converts to float64 array shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts * 2) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') n -= shiftn if norm: # normalize and convert to float: n = n / n.max() elif rate: # normalize by binw and convert to float: n = n / binw f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') if norm: a.set_ylabel('coincidence rate (AU)') a.set_yticks([0, 1]) elif rate: a.set_ylabel('coincidence rate (Hz)') else: a.set_ylabel('count') if title: a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id)) wtitlestr = lastcmd() # + ', binw=%.1f ms' % binw gcfm().window.setWindowTitle(wtitlestr) f.tight_layout(pad=0.3) # crop figure to contents
ai2s.append(ai2) aais.append(aai) if plotwaves: figure() plot(wave, 'k') # plot fwhm of primary and secondary peaks: nit = len(t1) # number of interpolated timepoints minri1 = min(ri1, nit) minri2 = min(ri2, nit) plot(np.arange(li1, minri1), wave[li1:minri1], 'r') plot(np.arange(li2, minri2), wave[li2:minri2], 'b') # plot points used for ipi: plot(exti1, wave[exti1], 'g', ms=10) plot(exti2, wave[exti2], 'g', ms=10) titlestr = 'wave %d (%s)' % (len(fwhm1s)-1, track.absname + '.n%d' % nid) gcfm().window.setWindowTitle(titlestr) ''' absslope = abs(np.diff(wave)) / newtres # uV/us maxslopes.append(max(absslope)) nabsslope = abs(np.diff(nwave)) / newtres # 1/us maxnslopes.append(max(nabsslope)) # another way to measure waveform duration is to see over what duration the abs(slope) # is greater than something close to 0. Starting from each end, at what timepoint does # the slope exceed this minimum threshold? Difference between timepoints is duration # of waveform slopeis = np.where(absslope > absslopethresh)[0] if len(slopeis) < 2: # exclude cells whose slope isn't above threshold for at least two timepoints: duration = 0 slopeiss.append([0, 0]) else:
def plot(self, t0=None, t1=None, chanis=None, gain=1, c='k', alpha=1.0, yunits='um', yticks=None, title=True, xlabel=True, relative2t0=False, lim2stim=False, scalebar=True, lw=4, figsize=(20, 6.5)): """Plot chanis of LFP data between t0 and t1 in sec. Unfortunatley, setting an alpha < 1 doesn't seem to reveal detail when a line obscures itself, such as when plotting a very long time series. relative2t0 controls whether to plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din. If only one chan is requested, it's plotted on a mV scale instead of a spatial scale.""" self.get_data() ts = self.get_tssec() # full set of timestamps, in sec if t0 == None: t0, t1 = ts[0], ts[-1] if t1 == None: t1 = t0 + 10 # 10 sec window if chanis == None: chanis = range(len(self.chans)) # all chans if lim2stim: t0, t1 = self.apply_lim2stim(t0, t1) t0i, t1i = ts.searchsorted((t0, t1)) ts = ts[t0i:t1i] # constrained set of timestamps, in sec chanis = tolist(chanis) nchans = len(chanis) # grab desired channels and time range: data = self.data[chanis][:, t0i:t1i] if nchans > 1: # convert uV to um: totalgain = self.UV2UM * gain data = data * totalgain else: # convert uV to mV: data = data / 1000 yunits = 'mV' nt = len(ts) assert nt == data.shape[1] if relative2t0: # convert ts to time from t0, otherwise plot time from start of ADC clock: ts -= t0 x = np.tile(ts, nchans) x.shape = nchans, nt segments = np.zeros((nchans, nt, 2)) # x vals in col 0, yvals in col 1 segments[:, :, 0] = x if nchans > 1: segments[:, :, 1] = -data # set to -ve here because of invert_yaxis() below else: segments[:, :, 1] = data if nchans > 1: # add y offsets: maxypos = 0 for chanii, chani in enumerate(chanis): chan = self.chans[chani] ypos = self.chanpos[chan][1] # in um segments[chanii, :, 1] += ypos # vertical distance below top of probe maxypos = max(maxypos, ypos) if yunits == 'mm': # convert from um to mm segments[:, :, 1] /= 1000 maxypos = maxypos / 1000 # convert from int to float totalgain = totalgain / 1000 lc = LineCollection(segments, linewidth=1, linestyle='-', colors=c, alpha=alpha, antialiased=True, visible=True) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.add_collection(lc) # add to axes' pool of LCs if scalebar: # add vertical scale bar at end of last channel to represent 1 mV: if nchans > 1: ymin, ymax = maxypos-500*totalgain, maxypos+500*totalgain # +/- 0.5 mV else: ymin, ymax = -0.5, 0.5 # mV a.vlines(ts.max()*0.99, ymin, ymax, lw=lw, colors='e') a.autoscale(enable=True, tight=True) # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started: a.set_xlim(xmin=0) if nchans > 1: a.invert_yaxis() # for spatial scale if yticks != None: a.set_yticks(yticks) # turn off annoying "+2.41e3" type offset on x axis: formatter = mpl.ticker.ScalarFormatter(useOffset=False) a.xaxis.set_major_formatter(formatter) if xlabel: a.set_xlabel("time (s)") if yunits == 'um': a.set_ylabel("depth ($\mu$m)") elif yunits == 'mm': a.set_ylabel("depth (mm)") elif yunits == 'mV': a.set_ylabel("LFP (mV)") titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) if title: a.set_title(titlestr) a.text(0.998, 0.99, '%s' % self.r.name, transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents self.f = f return self
def psd(self, t0=None, t1=None, f0=0.2, f1=110, p0=None, p1=None, chanis=-1, width=None, tres=None, xscale='log', figsize=(5, 5)): """Plot power spectral density from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of specified chanis. width and tres are in sec.""" uns = get_ipython().user_ns self.get_data() ts = self.get_tssec() # full set of timestamps, in sec if t0 == None: t0, t1 = ts[0], ts[-1] # full duration if t1 == None: t1 = t0 + 10 # 10 sec window if width == None: width = uns['LFPSPECGRAMWIDTH'] # sec if tres == None: tres = uns['LFPSPECGRAMTRES'] # sec assert tres <= width NFFT = intround(width * self.sampfreq) noverlap = intround(NFFT - tres * self.sampfreq) t0i, t1i = ts.searchsorted((t0, t1)) #ts = ts[t0i:t1i] # constrained set of timestamps, in sec data = self.data[:, t0i:t1i] # slice data f = pl.figure(figsize=figsize) a = f.add_subplot(111) if iterable(chanis): data = data[chanis].mean(axis=0) # take mean of data on chanis else: data = data[chanis] # get single row of data at chanis #data = filter.notch(data)[0] # remove 60 Hz mains noise # convert data from uV to mV. I think P is in mV^2?: P, freqs = mpl.mlab.psd(data / 1e3, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap) # keep only freqs between f0 and f1: if f0 == None: f0 = freqs[0] if f1 == None: f1 = freqs[-1] lo, hi = freqs.searchsorted([f0, f1]) P, freqs = P[lo:hi], freqs[lo:hi] # check for and replace zero power values (ostensibly due to gaps in recording) # before attempting to convert to dB: zis = np.where( P == 0.0) # row and column indices where P has zero power if len(zis[0]) > 0: # at least one hit P[zis] = np.finfo( np.float64).max # temporarily replace zeros with max float minnzval = P.min() # get minimum nonzero value P[zis] = minnzval # replace with min nonzero values P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2? # for better visualization, clip power values to within (p0, p1) dB if p0 != None: P[P < p0] = p0 if p1 != None: P[P > p1] = p1 #self.P = P a.plot(freqs, P, 'k-') # add SI frequency band limits: LFPPRLOBAND, LFPPRHIBAND = uns['LFPPRLOBAND'], uns['LFPPRHIBAND'] a.axvline(x=LFPPRLOBAND[0], c='r', ls='--') a.axvline(x=LFPPRLOBAND[1], c='r', ls='--') a.axvline(x=LFPPRHIBAND[0], c='b', ls='--') a.axvline(x=LFPPRHIBAND[1], c='b', ls='--') a.axis('tight') a.set_xscale(xscale) a.set_xlabel("frequency (Hz)") a.set_ylabel("power (dB)") titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) a.text(0.998, 0.99, '%s' % self.r.name, color='k', transform=a.transAxes, horizontalalignment='right', verticalalignment='top') f.tight_layout(pad=0.3) # crop figure to contents self.f = f return P, freqs
def scsistim(self, method='mean', width=None, tres=None, timeaverage=False, plottime=False, s=5, figsize=(7.5, 6.5)): """Scatter plot some summary statistic of spike correlations of each recording vs LFP synchrony index SI. Colour each point according to stimulus type. width and tres (sec) dictate tranges to split recordings up into. timeaverage averages across time values of both sc and si for each recording. s is point size""" ## TODO: maybe limit to visually responsive cells ## TODO: add linear regression of si vs log(sc) uns = get_ipython().user_ns if width == None: width = uns['LFPSIWIDTH'] if tres == None: tres = width bsrids = uns['BSRIDS'][self.absname] msrids = uns['MSRIDS'][self.absname] mvrids = uns['NSRIDS'][self.absname] dbrids = uns['DBRIDS'][self.absname] rids = sorted(bsrids + msrids + mvrids + dbrids) # do everything in rid order print('blankscreen: %r' % [self.r[rid].name for rid in bsrids]) print('mseq: %r' % [self.r[rid].name for rid in msrids]) print('movie: %r' % [self.r[rid].name for rid in mvrids]) print('driftbar: %r' % [self.r[rid].name for rid in dbrids]) isect = core.intersect1d([msrids, bsrids, mvrids, dbrids]) if len(isect) != 0: raise RuntimeError( "some rids were classified into more than one type: %r" % isect) scs, sis, c = [], [], [] for rid in rids: r = self.r[rid] print('%s: %s' % (r.absname, r.name)) spikecorr = r.sc(width=width, tres=tres) """ TODO: not sure if this is the right way to do this. A different set of neurons for each recording are chosen, then mean sc(t) across all pairs for each recording is found, and pooled across recordings. This pooling is maybe a bit dodgy. Is it valid to pool sc(t) values across recordings when the included neurons are different for each recording? The alternative is to deal only with neurons which exceed MINTHRESH track-wide, but the problem with that is that for much of the time, such neurons are completely silent, and therefore don't deserve to be included in sc calculations for those durations. """ sc, si = spikecorr.si(method=method, plot=False) # calls sc.sct() and sc.si() sc = sc[ 0] # pull out the spike correlation values that span all laminae if timeaverage: # average across all time values of sc and si to get a single coordinate # per recording sc = sc.mean() si = si.mean() scs.append(sc) sis.append(si) if rid in bsrids: color = 'e' elif rid in msrids: color = 'k' elif rid in mvrids: color = 'r' elif rid in dbrids: color = 'b' else: raise ValueError("unclassified recording: %r" % r.name) c.append(np.tile(color, len(sc))) scs = np.hstack(scs) sis = np.hstack(sis) c = np.hstack(c) f = pl.figure(figsize=figsize) a = f.add_subplot(111) if plottime: # underplot lines connecting points adjacent in time a.plot(scs, sis, 'e--') a.scatter(scs, sis, c=c, edgecolors='none', s=s) a.set_ylim(0, 1) a.set_xlabel('%s spike correlations' % method) a.set_ylabel('synchrony index') titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) # make proxy line artists for legend: bs = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='e', mec='e') ms = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='k', mec='k') mv = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='r', mec='r') db = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='b', mec='b') # add legend: a.legend([bs, ms, mv, db], ['blank screen', 'mseq', 'movie', 'drift bar'], numpoints=1, loc='lower right', handlelength=1, handletextpad=0.5, labelspacing=0.1) f.tight_layout(pad=0.3) # crop figure to contents return scs, sis, c
def sc_si(source, method='mean', sisource='lfp', kind=None, chani=-1, sirange=None, layers=False, ms=1, figsize=(7.5, 6.5)): """Pool recording.sc().si() results across recordings specified by source, plot the result""" uns = get_ipython().user_ns if layers == False: layers = ['all'] elif layers == True: layers = ['sup', 'deep'] LAYER2I = {'all':0, 'sup':1, 'mid':2, 'deep':3, 'other':4} layeris = [ LAYER2I[layer] for layer in layers ] recs, tracks = parse_source(source) if sisource not in ['lfp', 'mua']: raise ValueError('unknown sisource %r' % sisource) if kind == None: if sisource == 'lfp': kind = uns['LFPSIKIND'] else: kind = uns['MUASIKIND'] # calculate corrss, sis = [], [] for rec in recs: print(rec.absname) corrs, si, ylabel = rec.sc().si(method=method, sisource=sisource, kind=kind, chani=chani, sirange=sirange, plot=False) corrss.append(corrs) sis.append(si) corrs = np.hstack(corrss) si = np.hstack(sis) # plot f = pl.figure(figsize=figsize) a = f.add_subplot(111) #ylim = corrs[layeris].min(), corrs[layeris].max() #yrange = ylim[1] - ylim[0] #extra = yrange*0.03 # 3 % #ylim = ylim[0]-extra, ylim[1]+extra ylim = uns['SCLIMITS'] # keep only those points whose synchrony index falls within sirange: if sirange == None: finitesi = si[np.isfinite(si)] sirange = finitesi.min(), finitesi.max() sirange = np.asarray(sirange) keepis = (sirange[0] <= si[0]) * (si[0] <= sirange[1]) # boolean index array si = si[:, keepis] corrs = corrs[:, keepis] # plot linear regressions of corrs vs si[0]: if 'all' in layers: m0, b0, r0, p0, stderr0 = linregress(si[0], corrs[0]) a.plot(sirange, m0*sirange+b0, 'e--') if 'sup' in layers: m1, b1, r1, p1, stderr1 = linregress(si[0], corrs[1]) a.plot(sirange, m1*sirange+b1, 'r--') if 'mid' in layers: m2, b2, r2, p2, stderr2 = linregress(si[0], corrs[2]) a.plot(sirange, m2*sirange+b2, 'g--') if 'deep' in layers: m3, b3, r3, p3, stderr3 = linregress(si[0], corrs[3]) a.plot(sirange, m3*sirange+b3, 'b--') if 'other' in layers: m4, b4, r4, p4, stderr4 = linregress(si[0], corrs[4]) a.plot(sirange, m4*sirange+b4, 'y--', zorder=0) # scatter plot corrs vs si, one colour per laminarity: if 'all' in layers: a.plot(si[0], corrs[0], 'e.', ms=ms, label='all, m=%.3f, r=%.3f' % (m0, r0)) if 'sup' in layers: a.plot(si[0], corrs[1], 'r.', ms=ms, label='superficial, m=%.3f, r=%.3f' % (m1, r1)) if 'mid' in layers: a.plot(si[0], corrs[2], 'g.', ms=ms, label='middle, m=%.3f, r=%.3f' % (m2, r2)) if 'deep' in layers: a.plot(si[0], corrs[3], 'b.', ms=ms, label='deep, m=%.3f, r=%.3f' % (m3, r3)) if 'other' in layers: a.plot(si[0], corrs[4], 'y.', ms=ms, label='other, m=%.3f, r=%.3f' % (m4, r4), zorder=0) #a.set_xlim(sirange) if kind[0] == 'n': a.set_xlim(-1, 1) a.set_ylim(ylim) #a.autoscale(enable=True, axis='y', tight=True) a.set_xlabel('%s SI (%s)' % (sisource.upper(), kind)) a.set_ylabel(ylabel) titlestr = lastcmd() gcfm().window.setWindowTitle(titlestr) a.set_title(titlestr) a.legend(loc='upper left', handlelength=1, handletextpad=0.5, labelspacing=0.1) f.tight_layout(pad=0.3) # crop figure to contents
figure(figsize=figsize) #plot(Vpps, sis, 'k.', ms=4, alpha=ALPHA) Vppedges = np.arange(0.25, VPPMAX + 0.25, 0.25) # mV Vppmeans, sismeans, sisstds = scatterbin(Vpps, sis, Vppedges, xaverage=None, yaverage=np.mean) errorbar(Vppmeans, sismeans, yerr=sisstds, fmt='k.-', ms=6, lw=1, zorder=9999) xlim(xmin=0, xmax=VPPMAX) ylim(0.2, 1) yticks([0.2, 0.4, 0.6, 0.8, 1]) xlabel('LFP $V_{pp}$ (mV)') ylabel('SI (L/(L+H))') gcfm().window.setWindowTitle('SI vs Vpp lfpwidth=%g lfptres=%g' % (lfpwidth, lfptres)) tight_layout(pad=0.3) figure(figsize=figsize) #plot(stds, sis, 'k.', ms=4, alpha=ALPHA) stdedges = np.arange(25, STDMAX + 25, 25) # uV stdmeans, sismeans, sisstds = scatterbin(stds, sis, stdedges, xaverage=None, yaverage=np.mean) errorbar(stdmeans, sismeans, yerr=sisstds, fmt='k.-', ms=6, lw=1, zorder=9999) xlim(xmin=0, xmax=STDMAX) ylim(0.2, 1) yticks([0.2, 0.4, 0.6, 0.8, 1]) xlabel('LFP $\sigma$ ($\mu$V)')
def npos(self, colour='active', inchespermicron=0.007, legend=False, alpha=0.6): """Plot (x, y) cell positions over top of polytrode channel positions, to get an idea of how cells are distributed in space. Colour cells by 'active', 'rftype', 'spiketype' or 'sigma'.""" uns = get_ipython().user_ns npos = np.asarray([ neuron.pos for neuron in self.alln.values() ]) chanpos = self.chanpos chanxs, chanys = chanpos[:, 0], chanpos[:, 1] uchanxs = np.unique(chanxs) xspace = np.diff(uchanxs).max() # max spacing of consecutive unique x chan positions hsw = uns['PTSHANKWIDTHS'][self.pttype] / 2 # half shank width xs = np.hstack((npos[:, 0], chanxs, [-hsw, hsw])) ys = np.hstack((npos[:, 1], chanys)) ymin = min(min(ys), 0) xlim = min(xs.min(), uchanxs[0]-xspace/2), max(xs.max(), uchanxs[-1]+xspace/2) ylim = ys.max()+xspace, ymin # inverted y axis figwidth = inchespermicron * np.ptp(xlim) * 2 + 3*legend # make space for y axis labels figheight = inchespermicron * np.ptp(ylim) f = pl.figure(figsize=(figwidth, figheight)) a = f.add_subplot(111, aspect='equal') a.set_frame_on(False) # plot rectangle representing shank width and length, excluding the tip: sl = ylim[0] # starting from bottom left, going clockwise: shankxs = -hsw, -hsw, hsw, hsw shankys = sl, ymin, ymin, sl a.fill(shankxs, shankys, color='lightgrey', ec='none') # plot electrode sites: a.plot(chanpos[:, 0], chanpos[:, 1], 'k.', ms=5) if colour == 'active': # plot active and quiet cell positions in red and blue, respectively: anpos = np.asarray([ neuron.pos for neuron in self.n.values() ]) qnpos = np.asarray([ neuron.pos for neuron in self.qn.values() ]) na = len(anpos) nq = len(qnpos) # layer in inverse order of importance: if na: a.plot(qnpos[:, 0], qnpos[:, 1], 'b.', ms=10, alpha=alpha, label='quiet') if nq: a.plot(anpos[:, 0], anpos[:, 1], 'r.', ms=10, alpha=alpha, label='active') elif colour == 'rftype': # plot simple, complex, LGN afferent and None in red, blue, green and grey: spos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == 'simple' ]) cpos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == 'complex' ]) Lpos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == 'LGN' ]) Npos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.rftype == None ]) ns = len(spos) nc = len(cpos) nL = len(Lpos) nN = len(Npos) # layer in inverse order of importance: if nN: a.plot(Npos[:, 0], Npos[:, 1], 'e.', ms=10, alpha=alpha, label='unknown') if nL: a.plot(Lpos[:, 0], Lpos[:, 1], 'g.', ms=10, alpha=alpha, label='LGN afferent') if nc: a.plot(cpos[:, 0], cpos[:, 1], 'b.', ms=10, alpha=alpha, label='complex') if ns: a.plot(spos[:, 0], spos[:, 1], 'r.', ms=10, alpha=alpha, label='simple') elif colour == 'spiketype': # plot fast, slow, fastasym and slowasym in red, blue, green and grey: fpos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'fast' ]) spos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'slow' ]) fapos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'fastasym' ]) sapos = np.asarray([ neuron.pos for neuron in self.alln.values() if neuron.spiketype == 'slowasym' ]) nf = len(fpos) ns = len(spos) nfa = len(fapos) nsa = len(sapos) # layer in inverse order of frequency: if nf: a.plot(fpos[:, 0], fpos[:, 1], 'r.', ms=10, alpha=alpha, label='fast') if ns: a.plot(spos[:, 0], spos[:, 1], 'b.', ms=10, alpha=alpha, label='slow') if nfa: a.plot(fapos[:, 0], fapos[:, 1], 'g.', ms=10, alpha=alpha, label='fast asymmetric') if nsa: a.plot(sapos[:, 0], sapos[:, 1], 'e.', ms=10, alpha=alpha, label='slow asymmetric') elif colour == 'sigma': sigmas = np.asarray([ neuron.sigma for neuron in self.alln.values() ]) cmap = mpl.cm.hot_r # best to fully saturate alpha because colour indicates value, not just class: sc = a.scatter(npos[:, 0], npos[:, 1], edgecolor='none', c=sigmas, cmap=cmap, alpha=1.0, s=30, zorder=10) else: raise RuntimeError("unknown colour kwarg %r" % colour) a.set_xlim(xlim) a.set_ylim(ylim) a.set_xticks(uchanxs) a.set_yticks(np.arange(0, ylim[0], 200)) #a.xaxis.set_ticks_position('bottom') #a.yaxis.set_ticks_position('left') # put legend to right of the axes: if legend: if colour == 'sigma': f.colorbar(sc, ax=a, shrink=0.1, pad=0.1, aspect=10, ticks=[min(sigmas), max(sigmas)], format='%d', label='sigma') else: a.legend(loc='center left', bbox_to_anchor=(1.2, 0.5), frameon=False) bbox = a.get_position() wh = bbox.width / bbox.height # w:h ratio of axes, includes all ticks and labels? w, h = gcfm().canvas.get_width_height() gcfm().resize(w*wh, h) titlestr = lastcmd() gcfm().set_window_title(titlestr) a.set_title(self.absname)