Пример #1
0
def mua_si_lfp_si(source, layers=False, ms=1, figsize=(7.5, 6.5)):
    """Pool recording.mua_si_lfp_si() results across recordings specified by source,
    plot the result"""
    uns = get_ipython().user_ns
    recs, tracks = parse_source(source)
    lfpsis, muasis = [], []
    for rec in recs:
        print(rec.absname)
        lfpsi, muasi, t = rec.mua_si_lfp_si(ms=ms, layers=layers, plot=False, plotseries=False,
                                            figsize=figsize)
        lfpsis.append(lfpsi)
        muasis.append(muasi)
    lfpsi = np.hstack(lfpsis)
    muasi = np.hstack(muasis)
    # plot:
    f = pl.figure(figsize=figsize)
    a = f.add_subplot(111)
    a.plot([-1, 1], [-1, 1], 'e--') # underplot y=x line
    a.plot(lfpsi, muasi[0], 'e.', ms=ms)
    if layers:
        a.plot(lfpsi, muasi[1], 'r.', ms=ms)
        a.plot(lfpsi, muasi[2], 'g.', ms=ms)
        a.plot(lfpsi, muasi[3], 'b.', ms=ms)
    a.set_xlabel('LFP SI (%s)' % uns['LFPSIKIND'])
    a.set_ylabel('MUA SI (%s)' % uns['MUASIKIND'])
    a.set_xlim(-1, 1)
    a.set_ylim(-1, 1)
    titlestr = lastcmd()
    gcfm().window.setWindowTitle(titlestr)
    a.set_title(titlestr)
    f.tight_layout(pad=0.3) # crop figure to contents
Пример #2
0
    def pospdf(self, neurons=None, dim='y', nbins=10, a=None, stats=False, figsize=(7.5, 6.5)):
        """Plot PDF of cell positions ('x' or 'y') along the polytrode
        to get an idea of how cells are distributed in space"""
        if neurons == 'all':
            neurons = self.alln.values()
        elif neurons == 'quiet':
            neurons = self.qn.values()
        else:
            neurons = self.n.values()
        dimi = {'x':0, 'y':1}[dim]
        p = [ n.pos[dimi] for n in neurons ] # all position values
        nbins = max(nbins, 2*intround(np.sqrt(self.nneurons)))
        n, p = np.histogram(p, bins=nbins) # p includes rightmost bin edge
        binwidth = p[1] - p[0] # take width of first bin in p

        if stats:
            mean = np.mean(p)
            median = np.median(p)
            argmode = n.argmax()
            mode = p[argmode] + binwidth / 2 # middle of tallest bin
            stdev = np.std(p)

        if a == None:
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
        else: # add to existing axes
            a.hold(True)
            f = pl.gcf()

        # use CLUSTERCOLOURDICT for familiarity with len 10 1-based id to colour mapping
        #color = CLUSTERCOLOURDICT[int(self.id)]
        color = 'k'

        # exclude rightmost bin edge in p
        a.bar(left=p[:-1], height=n, width=binwidth, bottom=0, color=color, ec=color,
              yerr=None, xerr=None, capsize=3)
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        a.set_xlabel('neuron %s position (um)' % dim)
        a.set_ylabel('neuron count')

        if stats:
            # add stuff to top right of plot:
            uns = get_ipython().user_ns
            a.text(0.99, 0.99, 'mean = %.3f\n'
                               'median = %.3f\n'
                               'mode = %.3f\n'
                               'stdev = %.3f\n'
                               'minrate = %.2f Hz\n'
                               'nneurons = %d\n'
                               'dt = %d min'
                               % (mean, median, mode, stdev,
                                  uns['MINRATE'], self.nneurons, intround(self.dtmin)),
                               transform = a.transAxes,
                               horizontalalignment='right',
                               verticalalignment='top')
        f.tight_layout(pad=0.3) # crop figure to contents
        f.canvas.draw() # this is needed if a != None when passed as arg
        return a
Пример #3
0
 def plot(self, nbins=None, rate=False, figsize=(7.5, 6.5)):
     """style can be 'rate', but defaults to count"""
     if nbins == None:
         nbins = intround(np.sqrt(len(self.dts))) # good heuristic
     dts = self.dts / 1000 # in ms, converts to float64 array
     trange = self.trange / 1000 # in ms, converts to float64 array
     nbins = max(20, nbins) # enforce min nbins
     nbins = min(200, nbins) # enforce max nbins
     t = np.linspace(start=trange[0], stop=trange[1], num=nbins, endpoint=True)
     n = np.histogram(dts, bins=t, density=False)[0]
     binwidth = t[1] - t[0] # all should be equal width
     if rate: # normalize by binwidth and convert to float:
         n = n / float(binwidth)
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     a.bar(left=t[:-1], height=n, width=binwidth) # omit last right edge in t
     a.set_xlim(t[0], t[-1])
     a.set_xlabel('ISI (ms)')
     if rate:
         a.set_ylabel('spike rate (Hz)')
     else:
         a.set_ylabel('count')
     #a.set_title('n%d spikes relative to n%d spikes' % (self.n1.id, self.n0.id))
     title = lastcmd() + ', binwidth: %.2f ms' % binwidth
     a.set_title(title)
     gcfm().window.setWindowTitle(title)
     f.tight_layout(pad=0.3) # crop figure to contents
     self.f = f
     return self
Пример #4
0
def mua_si_lfp_si(source, layers=False, ms=1, figsize=(7.5, 6.5)):
    """Pool recording.mua_si_lfp_si() results across recordings specified by source,
    plot the result"""
    uns = get_ipython().user_ns
    recs, tracks = parse_source(source)
    lfpsis, muasis = [], []
    for rec in recs:
        print(rec.absname)
        lfpsi, muasi, t = rec.mua_si_lfp_si(ms=ms,
                                            layers=layers,
                                            plot=False,
                                            plotseries=False,
                                            figsize=figsize)
        lfpsis.append(lfpsi)
        muasis.append(muasi)
    lfpsi = np.hstack(lfpsis)
    muasi = np.hstack(muasis)
    # plot:
    f = pl.figure(figsize=figsize)
    a = f.add_subplot(111)
    a.plot([-1, 1], [-1, 1], 'e--')  # underplot y=x line
    a.plot(lfpsi, muasi[0], 'e.', ms=ms)
    if layers:
        a.plot(lfpsi, muasi[1], 'r.', ms=ms)
        a.plot(lfpsi, muasi[2], 'g.', ms=ms)
        a.plot(lfpsi, muasi[3], 'b.', ms=ms)
    a.set_xlabel('LFP SI (%s)' % uns['LFPSIKIND'])
    a.set_ylabel('MUA SI (%s)' % uns['MUASIKIND'])
    a.set_xlim(-1, 1)
    a.set_ylim(-1, 1)
    titlestr = lastcmd()
    gcfm().window.setWindowTitle(titlestr)
    a.set_title(titlestr)
    f.tight_layout(pad=0.3)  # crop figure to contents
Пример #5
0
 def plot(self, normed=True, scale=2.0, MPL=False, margins=True):
     win = RevCorrs.plot(self,
                         normed=normed,
                         title=lastcmd(),
                         scale=scale,
                         MPL=MPL,
                         margins=margins)
     return win  # necessary in IPython
Пример #6
0
    def scstim(self, method='mean', width=None, tres=None, figsize=(7.5, 6.5)):
        """Scatter plot some summary statistic of spike correlations of each recording,
        classified by the stimulus group each recording falls into. width and tres dictate
        tranges to split recordings up into, if any"""

        ## TODO: for each pair of recordings, find common subset of active neurons and
        ## calculate pairwise corrs for each recording in that pair using just those neurons

        ## TODO: maybe limit to visually responsive cells

        uns = get_ipython().user_ns
        if width == None:
            width = uns['SCWIDTH']
        if tres == None:
            tres = width
        blankmseqrids = uns['BSRIDS'][self.absname] + uns['MSRIDS'][
            self.absname]
        movdriftrids = uns['NSRIDS'][self.absname] + uns['DBRIDS'][
            self.absname]

        blankmseqcorrs = []
        movdriftcorrs = []
        for rid in (blankmseqrids + movdriftrids):
            r = self.r[rid]
            print('%s: %s' % (r.absname, r.name))
            spikecorr = r.sc(width=width, tres=tres)
            sc = spikecorr.sct(method=method)[0]
            sc = sc[
                0]  # pull out the spike correlation values that span all laminae
            if rid in blankmseqrids:
                blankmseqcorrs.append(sc)
            else:
                movdriftcorrs.append(sc)
        blankmseqcorrs = np.hstack(blankmseqcorrs)
        movdriftcorrs = np.hstack(movdriftcorrs)
        # repeat each element in blankmseqcorrs len(movdriftcorrs) times:
        x = np.repeat(blankmseqcorrs, len(movdriftcorrs))
        # tile movdriftcorrs len(blankmseqcorrs) times:
        y = np.tile(movdriftcorrs, len(blankmseqcorrs))

        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        lim = min([x.min(), y.min(), 0]), max([x.max(), y.max()])
        a.plot(lim, lim, c='e', ls='--', marker=None)  # y=x line
        a.plot(x, y, 'k.')
        #a.set_xlim(lim)
        #a.set_ylim(lim)
        a.set_xlabel('%s spike correlations: blankscreen and mseq' % method)
        a.set_ylabel('%s spike correlations: movie and drift bar' % method)
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        f.tight_layout(pad=0.3)  # crop figure to contents
        f.show()
Пример #7
0
    def scstim(self, method='mean', width=None, tres=None, figsize=(7.5, 6.5)):
        """Scatter plot some summary statistic of spike correlations of each recording,
        classified by the stimulus group each recording falls into. width and tres dictate
        tranges to split recordings up into, if any"""

        ## TODO: for each pair of recordings, find common subset of active neurons and calculate
        ## pairwise corrs for each recording in that pair using just those neurons

        ## TODO: maybe limit to visually responsive cells

        uns = get_ipython().user_ns
        if width == None:
            width = uns['SCWIDTH']
        if tres == None:
            tres = width
        blankmseqrids = uns['BSRIDS'][self.absname] + uns['MSRIDS'][self.absname]
        movdriftrids = uns['NSRIDS'][self.absname] + uns['DBRIDS'][self.absname]

        blankmseqcorrs = []
        movdriftcorrs = []
        for rid in (blankmseqrids + movdriftrids):
            r = self.r[rid]
            print('%s: %s' % (r.absname, r.name))
            spikecorr = r.sc(width=width, tres=tres)
            sc = spikecorr.sct(method=method)[0]
            sc = sc[0] # pull out the spike correlation values that span all laminae
            if rid in blankmseqrids:
                blankmseqcorrs.append(sc)
            else:
                movdriftcorrs.append(sc)
        blankmseqcorrs = np.hstack(blankmseqcorrs)
        movdriftcorrs = np.hstack(movdriftcorrs)
        # repeat each element in blankmseqcorrs len(movdriftcorrs) times:
        x = np.repeat(blankmseqcorrs, len(movdriftcorrs))
        # tile movdriftcorrs len(blankmseqcorrs) times:
        y = np.tile(movdriftcorrs, len(blankmseqcorrs))

        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        lim = min([x.min(), y.min(), 0]), max([x.max(), y.max()])
        a.plot(lim, lim, c='e', ls='--', marker=None) # y=x line
        a.plot(x, y, 'k.')
        #a.set_xlim(lim)
        #a.set_ylim(lim)
        a.set_xlabel('%s spike correlations: blankscreen and mseq' % method)
        a.set_ylabel('%s spike correlations: movie and drift bar' % method)
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        f.tight_layout(pad=0.3) # crop figure to contents
        f.show()
Пример #8
0
 def meanratepdf(self, bins=None, figsize=(7.5, 6.5)):
     """Plot histogram of mean firing rates"""
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     if bins == None:
         bins = np.arange(0, 1, 0.05)
     n, mr = np.histogram(self.meanrates, bins=bins, density=False)
     binwidth = mr[1] - mr[0] # take width of first bin
     a.bar(left=mr[:-1], height=n, width=binwidth, bottom=0, color='k', ec='k')
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     a.set_title(titlestr)
     a.set_xlabel('mean firing rate (Hz)')
     a.set_ylabel('neuron count')
     f.tight_layout(pad=0.3) # crop figure to contents
Пример #9
0
def sc_ising_vs_cch(source, ms=5, figsize=(7.5, 6.5)):
    """Scatter plot spike corrs calculated from Ising matrix against those calculated
    from CCH. INCOMPLETE.

    - find tracks in common, get allnids from each track

    - how to deal with big time gaps between experiments in a single recording? I constrain
    to the set of tranges of each experiment in rec.codes()

    - maybe i can convert the core.SpikeCorr object to take a source argument instead of
    recording/experiment objects
        - do all the spikecorr analyses make sense for multiple recordings, or for recordings
        from different tracks?

    - for each track absname
    """

    isingscs = {}
    cchscs = {}
    # init a dict
    # for each rec, find out which track it's from

    recs, tracks = parse_source(source)
    isingscs, cchscs = [], []
    for rec in recs:
        print(rec.absname)
        sc = rec.sc()
        sc.calc()
        isingscs.append(sc.corrs)
        cchscs.append(rec.sc_cch())
    # for something...

    isingsc = np.hstack(isingscs)
    cchsc = np.hstack(cchscs)

    # plot:
    f = pl.figure(figsize=figsize)
    a = f.add_subplot(111)
    a.plot(isingsc, cchsc, 'e.', ms=ms)
    a.set_xlabel('Ising spike corrs')
    a.set_ylabel('CCH spike corrs')
    a.set_xlim(-0.05, 0.2)
    a.set_ylim(-0.5, 1)
    titlestr = lastcmd()
    gcfm().window.setWindowTitle(titlestr)
    a.set_title(titlestr)
    f.tight_layout(pad=0.3)  # crop figure to contents
Пример #10
0
def sc_ising_vs_cch(source, ms=5, figsize=(7.5, 6.5)):
    """Scatter plot spike corrs calculated from Ising matrix against those calculated
    from CCH. INCOMPLETE.

    - find tracks in common, get allnids from each track

    - how to deal with big time gaps between experiments in a single recording? I constrain
    to the set of tranges of each experiment in rec.codes()

    - maybe i can convert the core.SpikeCorr object to take a source argument instead of
    recording/experiment objects
        - do all the spikecorr analyses make sense for multiple recordings, or for recordings
        from different tracks?

    - for each track absname
    """

    isingscs = {}
    cchscs = {}
    # init a dict
    # for each rec, find out which track it's from

    recs, tracks = parse_source(source)
    isingscs, cchscs = [], []
    for rec in recs:
        print(rec.absname)
        sc = rec.sc()
        sc.calc()
        isingscs.append(sc.corrs)
        cchscs.append(rec.sc_cch())
    # for something...

    isingsc = np.hstack(isingscs)
    cchsc = np.hstack(cchscs)

    # plot:
    f = pl.figure(figsize=figsize)
    a = f.add_subplot(111)
    a.plot(isingsc, cchsc, 'e.', ms=ms)
    a.set_xlabel('Ising spike corrs')
    a.set_ylabel('CCH spike corrs')
    a.set_xlim(-0.05, 0.2)
    a.set_ylim(-0.5, 1)
    titlestr = lastcmd()
    gcfm().window.setWindowTitle(titlestr)
    a.set_title(titlestr)
    f.tight_layout(pad=0.3) # crop figure to contents
Пример #11
0
    def plot(self, var='ori', fixed=None, figsize=(4, 4), title=False):
        """var: string name of variable you want to plot a tuning curve for
        fixed: dict with keys containing names of vars to keep fixed when building tuning
        curve, and values containing each var's value(s) to fix at
        
        Ex: r71.n[1].tune().plot('phase0', fixed={'ori':138, 'sfreqCycDeg':[0.4, 0.8]})
        """
        if var == 'ori':
            theta, r, z, p = self.pref(var=var, fixed=fixed)
            txt = ('pref=%.2f\n'
                   'r=%.2f\n'
                   'z=%.2f\n'
                   'p=%.6f' % (theta, r, z, p))
        else:
            self.calc(var=var, fixed=fixed)
            ysum = self.y.sum()
            if ysum != 0:
                r = self.y.max() / ysum  # fraction of spikes at max
            else:
                r = 0.0
            txt = 'peak=%.2f\nr=%.2f' % (self.peak, r)

        # create a new figure:
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        fontsize = get_ipython().user_ns['fontsize']  # function
        fs = fontsize()  # save original font size
        a.plot(self.x, self.y, c='e', ls='--', mew=0, mfc='k', ms=10)
        a.set_ylim(ymin=0)
        a.set_xlabel(var)
        a.set_ylabel('spike count')
        titlestr = lastcmd()
        titlestr += ' nid%d' % self.neuron.id
        if title:
            a.set_title(titlestr)
        f.canvas.window().setWindowTitle(titlestr)
        a.text(0.99,
               0.99,
               txt,
               transform=a.transAxes,
               horizontalalignment='right',
               verticalalignment='top')
        f.tight_layout(pad=0.3)  # crop figure to contents
        return self
Пример #12
0
 def std(self,
         t0=None,
         t1=None,
         chani=-1,
         width=None,
         tres=None,
         fmt='k-',
         title=True,
         figsize=(20, 3.5)):
     """Plot standard deviation of LFP signal from t0 to t1 on chani, using bins of width
     and tres"""
     uns = get_ipython().user_ns
     self.get_data()
     data = self.data[chani]
     ts = self.get_tssec()
     if t0 == None:
         t0 = ts[0]
     if t1 == None:
         t1 = ts[-1]
     if width == None:
         width = uns['LFPSIWIDTH']  # sec
     if tres == None:
         tres = uns['LFPSITRES']  # sec
     tranges = split_tranges([(t0, t1)], width, tres)
     stds = []
     for trange in tranges:
         ti0, ti1 = ts.searchsorted(trange)
         stds.append(data[ti0:ti1].std())
     stds = np.hstack(stds)
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     a.plot(tranges[:, 0], stds, fmt)
     a.autoscale(enable=True, tight=True)
     a.set_xlim(xmin=0)  # ADC clock starts at t=0
     a.set_xlabel('time (s)')
     a.set_ylabel('LFP $\sigma$ ($\mu$V)')
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     if title:
         a.set_title(titlestr)
     f.tight_layout(pad=0.3)
     self.f = f
     return stds
Пример #13
0
 def meanratepdf(self, bins=None, figsize=(7.5, 6.5)):
     """Plot histogram of mean firing rates"""
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     if bins == None:
         bins = np.arange(0, 1, 0.05)
     n, mr = np.histogram(self.meanrates, bins=bins, density=False)
     binwidth = mr[1] - mr[0]  # take width of first bin
     a.bar(left=mr[:-1],
           height=n,
           width=binwidth,
           bottom=0,
           color='k',
           ec='k')
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     a.set_title(titlestr)
     a.set_xlabel('mean firing rate (Hz)')
     a.set_ylabel('neuron count')
     f.tight_layout(pad=0.3)  # crop figure to contents
Пример #14
0
    def plot(self, var='ori', fixed=None, figsize=(4, 4), title=False):
        """var: string name of variable you want to plot a tuning curve for
        fixed: dict with keys containing names of vars to keep fixed when building tuning
        curve, and values containing each var's value(s) to fix at
        
        Ex: r71.n[1].tune().plot('phase0', fixed={'ori':138, 'sfreqCycDeg':[0.4, 0.8]})
        """
        if var == 'ori':
            theta, r, z, p = self.pref(var=var, fixed=fixed)
            txt = ('pref=%.2f\n'
                   'r=%.2f\n'
                   'z=%.2f\n'
                   'p=%.6f' % (theta, r, z, p))
        else:
            self.calc(var=var, fixed=fixed)
            ysum = self.y.sum()
            if ysum != 0:
                r = self.y.max() / ysum # fraction of spikes at max
            else:
                r = 0.0
            txt = 'peak=%.2f\nr=%.2f' % (self.peak, r)

        # create a new figure:
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        fontsize = get_ipython().user_ns['fontsize'] # function
        fs = fontsize() # save original font size
        a.plot(self.x, self.y, c='e', ls='--', mew=0, mfc='k', ms=10)
        a.set_ylim(ymin=0)
        a.set_xlabel(var)
        a.set_ylabel('spike count')
        titlestr = lastcmd()
        titlestr += ' nid%d' % self.neuron.id
        if title:
            a.set_title(titlestr)
        f.canvas.window().setWindowTitle(titlestr)
        a.text(0.99, 0.99, txt, transform=a.transAxes,
               horizontalalignment='right', verticalalignment='top')
        f.tight_layout(pad=0.3) # crop figure to contents
        return self
Пример #15
0
 def std(self, t0=None, t1=None, chani=-1, width=None, tres=None, fmt='k-', title=True,
         figsize=(20, 3.5)):
     """Plot standard deviation of LFP signal from t0 to t1 on chani, using bins of width
     and tres"""
     uns = get_ipython().user_ns
     self.get_data()
     data = self.data[chani]
     ts = self.get_tssec()
     if t0 == None:
         t0 = ts[0]
     if t1 == None:
         t1 = ts[-1]
     if width == None:
         width = uns['LFPSIWIDTH'] # sec
     if tres == None:
         tres = uns['LFPSITRES'] # sec
     tranges = split_tranges([(t0, t1)], width, tres)
     stds = []
     for trange in tranges:
         ti0, ti1 = ts.searchsorted(trange)
         stds.append(data[ti0:ti1].std())
     stds = np.hstack(stds)
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     a.plot(tranges[:, 0], stds, fmt)
     a.autoscale(enable=True, tight=True)
     a.set_xlim(xmin=0) # ADC clock starts at t=0
     a.set_xlabel('time (s)')
     a.set_ylabel('LFP $\sigma$ ($\mu$V)')
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     if title:
         a.set_title(titlestr)
     f.tight_layout(pad=0.3)
     self.f = f
     return stds
Пример #16
0
    def npos(self,
             colour='active',
             inchespermicron=0.007,
             legend=False,
             alpha=0.6):
        """Plot (x, y) cell positions over top of polytrode channel positions, to get an idea
        of how cells are distributed in space. Colour cells by 'active', 'rftype',
        'spiketype' or 'sigma'."""
        uns = get_ipython().user_ns
        npos = np.asarray([neuron.pos for neuron in self.alln.values()])
        chanpos = self.chanpos
        chanxs, chanys = chanpos[:, 0], chanpos[:, 1]
        uchanxs = np.unique(chanxs)
        xspace = np.diff(uchanxs).max(
        )  # max spacing of consecutive unique x chan positions
        hsw = uns['PTSHANKWIDTHS'][self.pttype] / 2  # half shank width
        xs = np.hstack((npos[:, 0], chanxs, [-hsw, hsw]))
        ys = np.hstack((npos[:, 1], chanys))
        ymin = min(min(ys), 0)
        xlim = min(xs.min(),
                   uchanxs[0] - xspace / 2), max(xs.max(),
                                                 uchanxs[-1] + xspace / 2)
        ylim = ys.max() + xspace, ymin  # inverted y axis

        figwidth = inchespermicron * np.ptp(
            xlim) * 2 + 3 * legend  # make space for y axis labels
        figheight = inchespermicron * np.ptp(ylim)
        f = pl.figure(figsize=(figwidth, figheight))
        a = f.add_subplot(111, aspect='equal')
        a.set_frame_on(False)
        # plot rectangle representing shank width and length, excluding the tip:
        sl = ylim[0]
        # starting from bottom left, going clockwise:
        shankxs = -hsw, -hsw, hsw, hsw
        shankys = sl, ymin, ymin, sl
        a.fill(shankxs, shankys, color='lightgrey', ec='none')
        # plot electrode sites:
        a.plot(chanpos[:, 0], chanpos[:, 1], 'k.', ms=5)
        if colour == 'active':
            # plot active and quiet cell positions in red and blue, respectively:
            anpos = np.asarray([neuron.pos for neuron in self.n.values()])
            qnpos = np.asarray([neuron.pos for neuron in self.qn.values()])
            na = len(anpos)
            nq = len(qnpos)
            # layer in inverse order of importance:
            if na:
                a.plot(qnpos[:, 0],
                       qnpos[:, 1],
                       'b.',
                       ms=10,
                       alpha=alpha,
                       label='quiet')
            if nq:
                a.plot(anpos[:, 0],
                       anpos[:, 1],
                       'r.',
                       ms=10,
                       alpha=alpha,
                       label='active')
        elif colour == 'rftype':
            # plot simple, complex, LGN afferent and None in red, blue, green and grey:
            spos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.rftype == 'simple'
            ])
            cpos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.rftype == 'complex'
            ])
            Lpos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.rftype == 'LGN'
            ])
            Npos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.rftype == None
            ])
            ns = len(spos)
            nc = len(cpos)
            nL = len(Lpos)
            nN = len(Npos)
            # layer in inverse order of importance:
            if nN:
                a.plot(Npos[:, 0],
                       Npos[:, 1],
                       'e.',
                       ms=10,
                       alpha=alpha,
                       label='unknown')
            if nL:
                a.plot(Lpos[:, 0],
                       Lpos[:, 1],
                       'g.',
                       ms=10,
                       alpha=alpha,
                       label='LGN afferent')
            if nc:
                a.plot(cpos[:, 0],
                       cpos[:, 1],
                       'b.',
                       ms=10,
                       alpha=alpha,
                       label='complex')
            if ns:
                a.plot(spos[:, 0],
                       spos[:, 1],
                       'r.',
                       ms=10,
                       alpha=alpha,
                       label='simple')
        elif colour == 'spiketype':
            # plot fast, slow, fastasym and slowasym in red, blue, green and grey:
            fpos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.spiketype == 'fast'
            ])
            spos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.spiketype == 'slow'
            ])
            fapos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.spiketype == 'fastasym'
            ])
            sapos = np.asarray([
                neuron.pos for neuron in self.alln.values()
                if neuron.spiketype == 'slowasym'
            ])
            nf = len(fpos)
            ns = len(spos)
            nfa = len(fapos)
            nsa = len(sapos)
            # layer in inverse order of frequency:
            if nf:
                a.plot(fpos[:, 0],
                       fpos[:, 1],
                       'r.',
                       ms=10,
                       alpha=alpha,
                       label='fast')
            if ns:
                a.plot(spos[:, 0],
                       spos[:, 1],
                       'b.',
                       ms=10,
                       alpha=alpha,
                       label='slow')
            if nfa:
                a.plot(fapos[:, 0],
                       fapos[:, 1],
                       'g.',
                       ms=10,
                       alpha=alpha,
                       label='fast asymmetric')
            if nsa:
                a.plot(sapos[:, 0],
                       sapos[:, 1],
                       'e.',
                       ms=10,
                       alpha=alpha,
                       label='slow asymmetric')
        elif colour == 'sigma':
            sigmas = np.asarray(
                [neuron.sigma for neuron in self.alln.values()])
            cmap = mpl.cm.hot_r
            # best to fully saturate alpha because colour indicates value, not just class:
            sc = a.scatter(npos[:, 0],
                           npos[:, 1],
                           edgecolor='none',
                           c=sigmas,
                           cmap=cmap,
                           alpha=1.0,
                           s=30,
                           zorder=10)
        else:
            raise RuntimeError("unknown colour kwarg %r" % colour)
        a.set_xlim(xlim)
        a.set_ylim(ylim)
        a.set_xticks(uchanxs)
        a.set_yticks(np.arange(0, ylim[0], 200))
        #a.xaxis.set_ticks_position('bottom')
        #a.yaxis.set_ticks_position('left')
        # put legend to right of the axes:
        if legend:
            if colour == 'sigma':
                f.colorbar(sc,
                           ax=a,
                           shrink=0.1,
                           pad=0.1,
                           aspect=10,
                           ticks=[min(sigmas), max(sigmas)],
                           format='%d',
                           label='sigma')
            else:
                a.legend(loc='center left',
                         bbox_to_anchor=(1.2, 0.5),
                         frameon=False)
        bbox = a.get_position()
        wh = bbox.width / bbox.height  # w:h ratio of axes, includes all ticks and labels?
        w, h = gcfm().canvas.get_width_height()
        gcfm().resize(w * wh, h)
        titlestr = lastcmd()
        gcfm().set_window_title(titlestr)
        a.set_title(self.absname)
Пример #17
0
    def cch(self,
            nid0,
            nid1=None,
            trange=50,
            binw=None,
            shift=None,
            nshifts=10,
            rate=False,
            norm=False,
            c='k',
            title=True,
            figsize=(7.5, 6.5)):
        """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1.
        If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift
        (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then
        subtract that from the unshifted CCH to get the shift corrected CCH"""
        if nid1 == None:
            nid1 = nid0
        autocorr = nid0 == nid1
        n0 = self.alln[nid0]
        n1 = self.alln[nid1]
        calctrange = trange * 1000  # calculation trange, in us
        if shift:
            assert nshifts > 0
            shift *= 1000  # convert to us
            maxshift = nshifts * shift
            calctrange = trange + maxshift  # expand calculated trange to encompass shifts
        calctrange = np.array([-calctrange,
                               calctrange])  # convert to a +/- array, in us
        dts = util.xcorr(n0.spikes, n1.spikes, calctrange)  # in us
        if autocorr:
            dts = dts[dts != 0]  # remove 0s for autocorr
        if shift:  # calculate dts for shift corrector
            shiftis = range(-nshifts, nshifts + 1)
            shiftis.remove(
                0
            )  # don't shift by 0, that's the original which we'll subtract from
            shifts = np.asarray(shiftis) * shift
            shiftdts = np.hstack([dts + s for s in shifts])  # in us
            print('shifts =', shifts / 1000)

        if not binw:
            nbins = intround(np.sqrt(len(dts)))  # good heuristic
            nbins = max(20, nbins)  # enforce min nbins
            nbins = min(200, nbins)  # enforce max nbins
        else:
            nbins = intround(2 * trange / binw)

        dts = dts / 1000  # in ms, converts to float64 array
        t = np.linspace(start=-trange,
                        stop=trange,
                        num=nbins + 1,
                        endpoint=True)  # ms
        binw = t[1] - t[0]  # all should be equal width, ms
        n = np.histogram(dts, bins=t, density=False)[0]
        if shift:  # subtract shift corrector
            shiftdts = shiftdts / 1000  # in ms, converts to float64 array
            shiftn = np.histogram(shiftdts, bins=t,
                                  density=False)[0] / (nshifts * 2)
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
            a.bar(left=t[:-1], height=shiftn,
                  width=binw)  # omit last right edge in t
            a.set_xlim(t[0], t[-1])
            a.set_xlabel('spike interval (ms)')
            n -= shiftn
        if norm:  # normalize and convert to float:
            n = n / n.max()
        elif rate:  # normalize by binw and convert to float:
            n = n / binw
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        a.bar(left=t[:-1], height=n, width=binw, color=c,
              ec=c)  # omit last right edge in t
        a.set_xlim(t[0], t[-1])
        a.set_xlabel('spike interval (ms)')
        if norm:
            a.set_ylabel('coincidence rate (AU)')
            a.set_yticks([0, 1])
        elif rate:
            a.set_ylabel('coincidence rate (Hz)')
        else:
            a.set_ylabel('count')
        if title:
            a.set_title('spike times of n%d wrt n%d' %
                        (self.n1.id, self.n0.id))
        wtitlestr = lastcmd()  # + ', binw=%.1f ms' % binw
        gcfm().window.setWindowTitle(wtitlestr)
        f.tight_layout(pad=0.3)  # crop figure to contents
Пример #18
0
    def specgram(self, t0=None, t1=None, f0=0.1, f1=100, p0=-60, p1=None, chanis=-1,
                 width=None, tres=None, cm='jet', colorbar=False,
                 showstates=False, lw=4, alpha=1, relative2t0=False, lim2stim=False,
                 title=True, reclabel=True, swapaxes=False, figsize=None):
        """Plot a spectrogram from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values
        from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most
        superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of
        specified chanis. width and tres are in sec. As an alternative to cm.jet (the
        default), cm.gray, cm.hsv cm.terrain, and cm.cubehelix_r colormaps seem to bring out
        the most structure in the spectrogram. showstates controls whether to plot lines
        demarcating desynchronized and synchronized periods. relative2t0 controls whether to
        plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range
        only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din"""
        uns = get_ipython().user_ns
        self.get_data()
        ts = self.get_tssec() # full set of timestamps, in sec
        if t0 == None:
            t0, t1 = ts[0], ts[-1] # full duration
        if t1 == None:
            t1 = t0 + 10 # 10 sec window
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        dt = t1 - t0
        if width == None:
            width = uns['LFPSPECGRAMWIDTH'] # sec
        if tres == None:
            tres = uns['LFPSPECGRAMTRES'] # sec
        assert tres <= width
        NFFT = intround(width * self.sampfreq)
        noverlap = intround(NFFT - tres * self.sampfreq)
        t0i, t1i = ts.searchsorted((t0, t1))
        #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
        data = self.data[:, t0i:t1i] # slice data
        if figsize == None:
            # convert from recording duration time to width in inches, 0.87 accommodates
            # padding around the specgram:
            figwidth = (dt / 1000) * 5 + 0.87
            figheight = 2.5 # inches
            figsize = figwidth, figheight
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if iterable(chanis):
            data = data[chanis].mean(axis=0) # take mean of data on chanis
        else:
            data = data[chanis] # get single row of data at chanis
        #data = filter.notch(data)[0] # remove 60 Hz mains noise
        # convert data from uV to mV, returned t is midpoints of time bins in sec from
        # start of data. I think P is in mV^2?:
        P, freqs, t = mpl.mlab.specgram(data/1e3, NFFT=NFFT, Fs=self.sampfreq,
                                        noverlap=noverlap)
        if not relative2t0:
            t += t0 # convert t to time from start of ADC clock:
        # keep only freqs between f0 and f1:
        if f0 == None:
            f0 = freqs[0]
        if f1 == None:
            f1 = freqs[-1]
        df = f1 - f0
        lo, hi = freqs.searchsorted([f0, f1])
        P, freqs = P[lo:hi], freqs[lo:hi]
        # check for and replace zero power values (ostensibly due to gaps in recording)
        # before attempting to convert to dB:
        zis = np.where(P == 0.0) # row and column indices where P has zero power
        if len(zis[0]) > 0: # at least one hit
            P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float
            minnzval = P.min() # get minimum nonzero value
            P[zis] = minnzval # replace with min nonzero values
        P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2?
        # for better visualization, clip power values to within (p0, p1) dB
        if p0 != None:
            P[P < p0] = p0
        if p1 != None:
            P[P > p1] = p1
        #self.P = P

        # plot horizontal bars over time demarcating different ranges of SI values,
        # or manually defined desynched and synched periods:
        statelinepos = f0 - df*0.015 # plot horizontal bars just below x axis
        if showstates:
            if showstates in [True, 'auto']:
                print("TODO: there's an offset plotting bug for 'auto', compare with 'manual'")
                si, t = self.si(plot=False)
                stranges, states = self.si_split(si, t) # sec
                STATECOLOURS = uns['LFPPRBINCOLOURS']
            elif showstates == 'manual':
                stranges, states = [], []
                for state in uns['MANUALSTATES']:
                    for strange in uns['REC2STATE2TRANGES'][self.r.absname][state]:
                        stranges.append(strange)
                        states.append(state)
                stranges = np.vstack(stranges) # 2D array
                STATECOLOURS = uns['MANUALSTATECOLOURS']
            else:
                raise ValueError('invalid value showstates=%r' % showstates)
            # clip stranges to t0, t1:
            stranges[0, 0] = max(stranges[0, 0], t0)
            stranges[-1, 1] = min(stranges[-1, 1], t1)
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            for strange, state in zip(stranges, states):
                clr = STATECOLOURS[state]
                lines(statelinepos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha,
                      clip_on=False)

        # Label far left, right, top and bottom edges of imshow image. imshow interpolates
        # between these to place the axes ticks. Time limits are
        # set from midpoints of specgram time bins
        extent = t[0], t[-1], freqs[0], freqs[-1]
        #print('specgram extent: %r' % (extent,))
        # flip P vertically for compatibility with imshow:
        im = a.imshow(P[::-1], extent=extent, cmap=cm)
        a.autoscale(enable=True, tight=True)
        a.axis('tight')
        # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started:
        a.set_xlim(xmin=0, xmax=t[-1])
        a.set_ylim(ymin=freqs[0], ymax=freqs[-1])
        # turn off annoying "+2.41e3" type offset on x axis:
        formatter = mpl.ticker.ScalarFormatter(useOffset=False)
        a.xaxis.set_major_formatter(formatter)
        a.set_xlabel("time (s)")
        a.set_ylabel("frequency (Hz)")
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        if title:
            a.set_title(titlestr)
        if reclabel:
            a.text(0.994, 0.95, '%s' % self.r.absname, color='w', transform=a.transAxes,
                   horizontalalignment='right', verticalalignment='top')
        f.tight_layout(pad=0.3) # crop figure to contents
        if colorbar:
            f.colorbar(im, pad=0) # creates big whitespace to the right for some reason
        self.f = f
        return P, freqs, t
Пример #19
0
    def pospdf(self,
               neurons='all',
               dim='y',
               edges=None,
               nbins=10,
               stats=False,
               labels=True,
               a=None,
               figsize=(7.5, 6.5)):
        """Plot PDF of cell positions ('x' or 'y') along the polytrode
        to get an idea of how cells are distributed in space"""
        if neurons == 'all':
            neurons = list(self.alln.values())
        elif neurons == 'quiet':
            neurons = list(self.qn.values())
        elif neurons == 'active':
            neurons = list(self.n.values())
        dimi = {'x': 0, 'y': 1}[dim]
        p = [n.pos[dimi] for n in neurons]  # all position values
        if edges != None:
            nbins = len(edges) - 1
            bins = edges  # assume it includes rightmost bin edge
        else:
            nbins = max(nbins, 2 * intround(np.sqrt(self.nneurons)))
            bins = nbins
        n, p = np.histogram(p, bins=bins)  # p includes rightmost bin edge
        binwidth = p[1] - p[0]  # take width of first bin in p

        if stats:
            mean = np.mean(p)
            median = np.median(p)
            argmode = n.argmax()
            mode = p[argmode] + binwidth / 2  # middle of tallest bin
            stdev = np.std(p)

        if a == None:
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
        else:  # add to existing axes
            a.hold(True)
            f = pl.gcf()

        # use CCWHITEDICT1 for familiarity with len 10 1-based id to colour mapping
        #color = CCWHITEDICT1[int(self.id)]
        color = 'k'

        # exclude rightmost bin edge in p
        a.bar(left=p[:-1],
              height=n,
              width=binwidth,
              bottom=0,
              color=color,
              ec=color)
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        if labels:
            a.set_title(titlestr)
            a.set_xlabel('neuron %s position (um)' % dim)
            a.set_ylabel('neuron count')

        if stats:
            # add stuff to top right of plot:
            uns = get_ipython().user_ns
            a.text(0.99,
                   0.99,
                   'mean = %.3f\n'
                   'median = %.3f\n'
                   'mode = %.3f\n'
                   'stdev = %.3f\n'
                   'minrate = %.2f Hz\n'
                   'nneurons = %d\n'
                   'dt = %d min' % (mean, median, mode, stdev, uns['MINRATE'],
                                    self.nneurons, intround(self.dtmin)),
                   transform=a.transAxes,
                   horizontalalignment='right',
                   verticalalignment='top')
        f.tight_layout(pad=0.3)  # crop figure to contents
        f.canvas.draw()  # this is needed if a != None when passed as arg
        return a
Пример #20
0
    def si_plot(self,
                si,
                t,
                t0=None,
                t1=None,
                xlim=None,
                ylim=None,
                yticks=None,
                ylabel=None,
                showxlabel=True,
                showylabel=True,
                showtitle=True,
                title=None,
                reclabel=True,
                hlines=[0],
                showstates=False,
                statelinepos=None,
                lw=4,
                alpha=1,
                swapaxes=False,
                figsize=None):
        """Plot synchrony index as a function of time, with hopefully the same
        temporal scale as some of the other plots in self"""
        uns = get_ipython().user_ns
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)

        xlabel = "time (s)"
        if ylabel == None:
            ylabel = "synchrony index (AU?)"

        if swapaxes:
            t, si = si, t  # swap t and si
            xlim, ylim = ylim, xlim
            ylim = ylim[1], ylim[0]  # swap new ylimits so t=0 is at top
            xlabel, ylabel = ylabel, xlabel  # swap labels
            showxlabel, showylabel = showylabel, showxlabel  # swap flags
            # underplot vertical lines:
            for hline in hlines:
                a.axvline(x=hline, c='e', ls='--', marker=None)
        else:
            # underplot horizontal lines:
            for hline in hlines:
                a.axhline(y=hline, c='e', ls='--', marker=None)

        # plot horizontal bars over time demarcating different ranges of SI values,
        # or manually defined desynched and synched periods:
        if showstates in [True, 'auto']:
            stranges, states = self.si_split(si, t)
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            slposs = statelinepos
            if len(slposs) == 1:  # use same statelinepos for all states
                nstranges = len(stranges)
                slposs = slposs * nstranges
            for strange, state, slpos in zip(stranges, states, slposs):
                clr = uns['LFPPRBINCOLOURS'][state]
                lines(slpos,
                      strange[0],
                      strange[1],
                      colors=clr,
                      lw=lw,
                      alpha=alpha)
        elif showstates == 'manual':
            REC2STATETRANGES = uns['REC2STATETRANGES']
            dtrange, strange = np.asarray(
                REC2STATETRANGES[self.r.absname]) / 1e6
            dtrange = max(dtrange[0],
                          t0), min(dtrange[1],
                                   t1)  # clip desynch trange to t0, t1
            strange = max(strange[0],
                          t0), min(strange[1],
                                   t1)  # clip synch trange to t0, t1
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            slposs = statelinepos
            if len(slposs) == 1:  # use same statelinepos for both states
                slposs = slposs * 2
            lines(slposs[0],
                  dtrange[0],
                  dtrange[1],
                  colors='b',
                  lw=lw,
                  alpha=alpha)
            lines(slposs[1],
                  strange[0],
                  strange[1],
                  colors='r',
                  lw=lw,
                  alpha=alpha)

        a.plot(t, si, 'k-')
        # depending on relative2t0 in si(), x=0 represents either t0 or time ADC clock started:
        a.set_xlim(xlim)  # low/high limits are unchanged if None
        a.set_ylim(ylim)
        if yticks != None:
            a.set_yticks(yticks)
        if showxlabel:
            a.set_xlabel(xlabel)
        if showylabel:
            a.set_ylabel(ylabel)
        #a.autoscale(axis='x', enable=True, tight=True)
        # turn off annoying "+2.41e3" type offset on x axis:
        formatter = mpl.ticker.ScalarFormatter(useOffset=False)
        a.xaxis.set_major_formatter(formatter)
        if title == None:
            title = lastcmd()
        gcfm().window.setWindowTitle(title)
        if showtitle:
            a.set_title(title)
        if reclabel:
            a.text(0.994,
                   0.01,
                   '%s' % self.r.absname,
                   color='k',
                   transform=a.transAxes,
                   horizontalalignment='right',
                   verticalalignment='bottom')
        f.tight_layout(pad=0.3)  # crop figure to contents
Пример #21
0
    def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10,
            rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)):
        """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1.
        If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift
        (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then
        subtract that from the unshifted CCH to get the shift corrected CCH"""
        if nid1 == None:
            nid1 = nid0
        autocorr = nid0 == nid1
        n0 = self.alln[nid0]
        n1 = self.alln[nid1]
        calctrange = trange * 1000 # calculation trange, in us
        if shift:
            assert nshifts > 0
            shift *= 1000 # convert to us
            maxshift = nshifts * shift
            calctrange = trange + maxshift # expand calculated trange to encompass shifts
        calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us
        dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us
        if autocorr:
            dts = dts[dts != 0] # remove 0s for autocorr
        if shift: # calculate dts for shift corrector
            shiftis = range(-nshifts, nshifts+1)
            shiftis.remove(0) # don't shift by 0, that's the original which we'll subtract from
            shifts = np.asarray(shiftis) * shift
            shiftdts = np.hstack([ dts+s for s in shifts ]) # in us
            print('shifts =', shifts / 1000)

        if not binw:
            nbins = intround(np.sqrt(len(dts))) # good heuristic
            nbins = max(20, nbins) # enforce min nbins
            nbins = min(200, nbins) # enforce max nbins
        else:
            nbins = intround(2 * trange / binw)

        dts = dts / 1000 # in ms, converts to float64 array
        t = np.linspace(start=-trange, stop=trange, num=nbins+1, endpoint=True) # ms
        binw = t[1] - t[0] # all should be equal width, ms
        n = np.histogram(dts, bins=t, density=False)[0]
        if shift: # subtract shift corrector
            shiftdts = shiftdts / 1000 # in ms, converts to float64 array
            shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts*2)
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
            a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t
            a.set_xlim(t[0], t[-1])
            a.set_xlabel('spike interval (ms)')
            n -= shiftn
        if norm: # normalize and convert to float:
            n = n / n.max()
        elif rate: # normalize by binw and convert to float:
            n = n / binw
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t
        a.set_xlim(t[0], t[-1])
        a.set_xlabel('spike interval (ms)')
        if norm:
            a.set_ylabel('coincidence rate (AU)')
            a.set_yticks([0, 1])
        elif rate:
            a.set_ylabel('coincidence rate (Hz)')
        else:
            a.set_ylabel('count')
        if title:
            a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id))
        wtitlestr = lastcmd()# + ', binw=%.1f ms' % binw
        gcfm().window.setWindowTitle(wtitlestr)
        f.tight_layout(pad=0.3) # crop figure to contents
Пример #22
0
 def plot(self, t0=None, t1=None, chanis=None, gain=1, c='k', alpha=1.0, yunits='um',
          yticks=None, title=True, xlabel=True, relative2t0=False, lim2stim=False,
          scalebar=True, lw=4, figsize=(20, 6.5)):
     """Plot chanis of LFP data between t0 and t1 in sec. Unfortunatley, setting an alpha <
     1 doesn't seem to reveal detail when a line obscures itself, such as when plotting a
     very long time series. relative2t0 controls whether to plot relative to t0, or
     relative to start of ADC clock. lim2stim limits the time range only to when a stimulus
     was on screen, i.e. to the outermost times of non-NULL din. If only one chan is
     requested, it's plotted on a mV scale instead of a spatial scale."""
     self.get_data()
     ts = self.get_tssec() # full set of timestamps, in sec
     if t0 == None:
         t0, t1 = ts[0], ts[-1]
     if t1 == None:
         t1 = t0 + 10 # 10 sec window
     if chanis == None:
         chanis = range(len(self.chans)) # all chans
     if lim2stim:
         t0, t1 = self.apply_lim2stim(t0, t1)
     t0i, t1i = ts.searchsorted((t0, t1))
     ts = ts[t0i:t1i] # constrained set of timestamps, in sec
     chanis = tolist(chanis)
     nchans = len(chanis)
     # grab desired channels and time range:
     data = self.data[chanis][:, t0i:t1i]
     if nchans > 1: # convert uV to um:
         totalgain = self.UV2UM * gain
         data = data * totalgain
     else: # convert uV to mV:
         data = data / 1000
         yunits = 'mV'
     nt = len(ts)
     assert nt == data.shape[1]
     if relative2t0:
         # convert ts to time from t0, otherwise plot time from start of ADC clock:
         ts -= t0
     x = np.tile(ts, nchans)
     x.shape = nchans, nt
     segments = np.zeros((nchans, nt, 2)) # x vals in col 0, yvals in col 1
     segments[:, :, 0] = x
     if nchans > 1:
         segments[:, :, 1] = -data # set to -ve here because of invert_yaxis() below
     else:
         segments[:, :, 1] = data
     if nchans > 1: # add y offsets:
         maxypos = 0
         for chanii, chani in enumerate(chanis):
             chan = self.chans[chani]
             ypos = self.chanpos[chan][1] # in um
             segments[chanii, :, 1] += ypos # vertical distance below top of probe
             maxypos = max(maxypos, ypos)
         if yunits == 'mm': # convert from um to mm
             segments[:, :, 1] /= 1000
             maxypos = maxypos / 1000 # convert from int to float
             totalgain = totalgain / 1000
     lc = LineCollection(segments, linewidth=1, linestyle='-', colors=c, alpha=alpha,
                         antialiased=True, visible=True)
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     a.add_collection(lc) # add to axes' pool of LCs
     if scalebar: # add vertical scale bar at end of last channel to represent 1 mV:
         if nchans > 1:
             ymin, ymax = maxypos-500*totalgain, maxypos+500*totalgain # +/- 0.5 mV
         else:
             ymin, ymax = -0.5, 0.5 # mV
         a.vlines(ts.max()*0.99, ymin, ymax, lw=lw, colors='e')
     a.autoscale(enable=True, tight=True)
     # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started:
     a.set_xlim(xmin=0)
     if nchans > 1:
         a.invert_yaxis() # for spatial scale
     if yticks != None:
         a.set_yticks(yticks)
     # turn off annoying "+2.41e3" type offset on x axis:
     formatter = mpl.ticker.ScalarFormatter(useOffset=False)
     a.xaxis.set_major_formatter(formatter)
     if xlabel:
         a.set_xlabel("time (s)")
     if yunits == 'um':
         a.set_ylabel("depth ($\mu$m)")
     elif yunits == 'mm':
         a.set_ylabel("depth (mm)")
     elif yunits == 'mV':
         a.set_ylabel("LFP (mV)")
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     if title:
         a.set_title(titlestr)
         a.text(0.998, 0.99, '%s' % self.r.name, transform=a.transAxes,
                horizontalalignment='right', verticalalignment='top')
     f.tight_layout(pad=0.3) # crop figure to contents
     self.f = f
     return self
Пример #23
0
def sc_si(source, method='mean', sisource='lfp', kind=None, chani=-1, sirange=None,
          layers=False, ms=1, figsize=(7.5, 6.5)):
    """Pool recording.sc().si() results across recordings specified by source,
    plot the result"""
    uns = get_ipython().user_ns
    if layers == False:
        layers = ['all']
    elif layers == True:
        layers = ['sup', 'deep']
    LAYER2I = {'all':0, 'sup':1, 'mid':2, 'deep':3, 'other':4}
    layeris = [ LAYER2I[layer] for layer in layers ]

    recs, tracks = parse_source(source)

    if sisource not in ['lfp', 'mua']:
        raise ValueError('unknown sisource %r' % sisource)

    if kind == None:
        if sisource == 'lfp':
            kind = uns['LFPSIKIND']
        else:
            kind = uns['MUASIKIND']

    # calculate
    corrss, sis = [], []
    for rec in recs:
        print(rec.absname)
        corrs, si, ylabel = rec.sc().si(method=method, sisource=sisource, kind=kind,
                                        chani=chani, sirange=sirange, plot=False)
        corrss.append(corrs)
        sis.append(si)
    corrs = np.hstack(corrss)
    si = np.hstack(sis)

    # plot
    f = pl.figure(figsize=figsize)
    a = f.add_subplot(111)

    #ylim = corrs[layeris].min(), corrs[layeris].max()
    #yrange = ylim[1] - ylim[0]
    #extra = yrange*0.03 # 3 %
    #ylim = ylim[0]-extra, ylim[1]+extra
    ylim = uns['SCLIMITS']

    # keep only those points whose synchrony index falls within sirange:
    if sirange == None:
        finitesi = si[np.isfinite(si)]
        sirange = finitesi.min(), finitesi.max()
    sirange = np.asarray(sirange)
    keepis = (sirange[0] <= si[0]) * (si[0] <= sirange[1]) # boolean index array
    si = si[:, keepis]
    corrs = corrs[:, keepis]
    # plot linear regressions of corrs vs si[0]:
    if 'all' in layers:
        m0, b0, r0, p0, stderr0 = linregress(si[0], corrs[0])
        a.plot(sirange, m0*sirange+b0, 'e--')
    if 'sup' in layers:
        m1, b1, r1, p1, stderr1 = linregress(si[0], corrs[1])
        a.plot(sirange, m1*sirange+b1, 'r--')
    if 'mid' in layers:
        m2, b2, r2, p2, stderr2 = linregress(si[0], corrs[2])
        a.plot(sirange, m2*sirange+b2, 'g--')
    if 'deep' in layers:
        m3, b3, r3, p3, stderr3 = linregress(si[0], corrs[3])
        a.plot(sirange, m3*sirange+b3, 'b--')
    if 'other' in layers:
        m4, b4, r4, p4, stderr4 = linregress(si[0], corrs[4])
        a.plot(sirange, m4*sirange+b4, 'y--', zorder=0)

    # scatter plot corrs vs si, one colour per laminarity:
    if 'all' in layers:
        a.plot(si[0], corrs[0], 'e.', ms=ms, label='all, m=%.3f, r=%.3f'
                                                   % (m0, r0))
    if 'sup' in layers:
        a.plot(si[0], corrs[1], 'r.', ms=ms, label='superficial, m=%.3f, r=%.3f'
                                                   % (m1, r1))
    if 'mid' in layers:
        a.plot(si[0], corrs[2], 'g.', ms=ms, label='middle, m=%.3f, r=%.3f'
                                                   % (m2, r2))
    if 'deep' in layers:
        a.plot(si[0], corrs[3], 'b.', ms=ms, label='deep, m=%.3f, r=%.3f'
                                                   % (m3, r3))
    if 'other' in layers:
        a.plot(si[0], corrs[4], 'y.', ms=ms, label='other, m=%.3f, r=%.3f'
                                                   % (m4, r4), zorder=0)
    #a.set_xlim(sirange)
    if kind[0] == 'n':
        a.set_xlim(-1, 1)
    a.set_ylim(ylim)
    #a.autoscale(enable=True, axis='y', tight=True)
    a.set_xlabel('%s SI (%s)' % (sisource.upper(), kind))
    a.set_ylabel(ylabel)
    titlestr = lastcmd()
    gcfm().window.setWindowTitle(titlestr)
    a.set_title(titlestr)
    a.legend(loc='upper left', handlelength=1, handletextpad=0.5, labelspacing=0.1)
    f.tight_layout(pad=0.3) # crop figure to contents
Пример #24
0
 def plot(self, normed=True, scale=2.0):
     win = RevCorr.plot(self, normed=normed, title=lastcmd(), scale=scale)
     return win  # necessary in IPython
Пример #25
0
    def si_plot(self, si, t, t0=None, t1=None, xlim=None, ylim=None, yticks=None,
                ylabel=None, showxlabel=True, showylabel=True, showtitle=True,
                title=None, reclabel=True, hlines=[0], showstates=False,
                statelinepos=None, lw=4, alpha=1,
                swapaxes=False, figsize=None):
        """Plot synchrony index as a function of time, with hopefully the same
        temporal scale as some of the other plots in self"""
        uns = get_ipython().user_ns
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)

        xlabel = "time (s)"
        if ylabel == None:
            ylabel = "synchrony index (AU?)"

        if swapaxes:
            t, si = si, t # swap t and si
            xlim, ylim = ylim, xlim
            ylim = ylim[1], ylim[0] # swap new ylimits so t=0 is at top
            xlabel, ylabel = ylabel, xlabel # swap labels
            showxlabel, showylabel = showylabel, showxlabel # swap flags
            # underplot vertical lines:
            for hline in hlines:
                a.axvline(x=hline, c='e', ls='--', marker=None)
        else:
            # underplot horizontal lines:
            for hline in hlines:
                a.axhline(y=hline, c='e', ls='--', marker=None)

        # plot horizontal bars over time demarcating different ranges of SI values,
        # or manually defined desynched and synched periods:
        if showstates in [True, 'auto']:
            stranges, states = self.si_split(si, t)
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            slposs = statelinepos
            if len(slposs) == 1: # use same statelinepos for all states
                nstranges = len(stranges)
                slposs = slposs * nstranges
            for strange, state, slpos in zip(stranges, states, slposs):
                clr = uns['LFPPRBINCOLOURS'][state]
                lines(slpos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha)
        elif showstates == 'manual':
            REC2STATETRANGES = uns['REC2STATETRANGES']
            dtrange, strange = np.asarray(REC2STATETRANGES[self.r.absname]) / 1e6
            dtrange = max(dtrange[0], t0), min(dtrange[1], t1) # clip desynch trange to t0, t1
            strange = max(strange[0], t0), min(strange[1], t1) # clip synch trange to t0, t1
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            slposs = statelinepos
            if len(slposs) == 1: # use same statelinepos for both states
                slposs = slposs * 2
            lines(slposs[0], dtrange[0], dtrange[1], colors='b', lw=lw, alpha=alpha)
            lines(slposs[1], strange[0], strange[1], colors='r', lw=lw, alpha=alpha)

        a.plot(t, si, 'k-')
        # depending on relative2t0 in si(), x=0 represents either t0 or time ADC clock started:
        a.set_xlim(xlim) # low/high limits are unchanged if None
        a.set_ylim(ylim)
        if yticks != None:
            a.set_yticks(yticks)
        if showxlabel:
            a.set_xlabel(xlabel)
        if showylabel:
            a.set_ylabel(ylabel)
        #a.autoscale(axis='x', enable=True, tight=True)
        # turn off annoying "+2.41e3" type offset on x axis:
        formatter = mpl.ticker.ScalarFormatter(useOffset=False)
        a.xaxis.set_major_formatter(formatter)
        if title == None:
            title = lastcmd()
        gcfm().window.setWindowTitle(title)
        if showtitle:
            a.set_title(title)
        if reclabel:
            a.text(0.994, 0.01, '%s' % self.r.absname, color='k', transform=a.transAxes,
                   horizontalalignment='right', verticalalignment='bottom')
        f.tight_layout(pad=0.3) # crop figure to contents
Пример #26
0
 def npos(self, colour='active', inchespermicron=0.007, legend=False, alpha=0.6):
     """Plot (x, y) cell positions over top of polytrode channel positions, to get an idea
     of how cells are distributed in space. Colour cells by 'active', 'rftype',
     'spiketype' or 'sigma'."""
     uns = get_ipython().user_ns
     npos = np.asarray([ neuron.pos for neuron in self.alln.values() ])
     chanpos = self.chanpos
     chanxs, chanys = chanpos[:, 0], chanpos[:, 1]
     uchanxs = np.unique(chanxs)
     xspace = np.diff(uchanxs).max() # max spacing of consecutive unique x chan positions
     hsw = uns['PTSHANKWIDTHS'][self.pttype] / 2 # half shank width
     xs = np.hstack((npos[:, 0], chanxs, [-hsw, hsw]))
     ys = np.hstack((npos[:, 1], chanys))
     ymin = min(min(ys), 0)
     xlim = min(xs.min(), uchanxs[0]-xspace/2), max(xs.max(), uchanxs[-1]+xspace/2)
     ylim = ys.max()+xspace, ymin # inverted y axis
     
     figwidth = inchespermicron * np.ptp(xlim) * 2 + 3*legend # make space for y axis labels
     figheight = inchespermicron * np.ptp(ylim)
     f = pl.figure(figsize=(figwidth, figheight))
     a = f.add_subplot(111, aspect='equal')
     a.set_frame_on(False)
     # plot rectangle representing shank width and length, excluding the tip:
     sl = ylim[0]
     # starting from bottom left, going clockwise:
     shankxs = -hsw, -hsw, hsw, hsw
     shankys = sl, ymin, ymin, sl
     a.fill(shankxs, shankys, color='lightgrey', ec='none')
     # plot electrode sites:
     a.plot(chanpos[:, 0], chanpos[:, 1], 'k.', ms=5)
     if colour == 'active':
         # plot active and quiet cell positions in red and blue, respectively:
         anpos = np.asarray([ neuron.pos for neuron in self.n.values() ])
         qnpos = np.asarray([ neuron.pos for neuron in self.qn.values() ])
         na = len(anpos)
         nq = len(qnpos)
         # layer in inverse order of importance:
         if na: a.plot(qnpos[:, 0], qnpos[:, 1], 'b.', ms=10, alpha=alpha, label='quiet')
         if nq: a.plot(anpos[:, 0], anpos[:, 1], 'r.', ms=10, alpha=alpha, label='active')
     elif colour == 'rftype':
         # plot simple, complex, LGN afferent and None in red, blue, green and grey:
         spos = np.asarray([ neuron.pos for neuron in self.alln.values()
                             if neuron.rftype == 'simple' ])
         cpos = np.asarray([ neuron.pos for neuron in self.alln.values()
                             if neuron.rftype == 'complex' ])
         Lpos = np.asarray([ neuron.pos for neuron in self.alln.values()
                             if neuron.rftype == 'LGN' ])
         Npos = np.asarray([ neuron.pos for neuron in self.alln.values()
                             if neuron.rftype == None ])
         ns = len(spos)
         nc = len(cpos)
         nL = len(Lpos)
         nN = len(Npos)
         # layer in inverse order of importance:
         if nN: a.plot(Npos[:, 0], Npos[:, 1], 'e.', ms=10, alpha=alpha, label='unknown')
         if nL: a.plot(Lpos[:, 0], Lpos[:, 1], 'g.', ms=10, alpha=alpha, label='LGN afferent')
         if nc: a.plot(cpos[:, 0], cpos[:, 1], 'b.', ms=10, alpha=alpha, label='complex')
         if ns: a.plot(spos[:, 0], spos[:, 1], 'r.', ms=10, alpha=alpha, label='simple')
     elif colour == 'spiketype':
         # plot fast, slow, fastasym and slowasym in red, blue, green and grey:
         fpos = np.asarray([ neuron.pos for neuron in self.alln.values()
                             if neuron.spiketype == 'fast' ])
         spos = np.asarray([ neuron.pos for neuron in self.alln.values()
                             if neuron.spiketype == 'slow' ])
         fapos = np.asarray([ neuron.pos for neuron in self.alln.values()
                              if neuron.spiketype == 'fastasym' ])
         sapos = np.asarray([ neuron.pos for neuron in self.alln.values()
                              if neuron.spiketype == 'slowasym' ])
         nf = len(fpos)
         ns = len(spos)
         nfa = len(fapos)
         nsa = len(sapos)
         # layer in inverse order of frequency:
         if nf: a.plot(fpos[:, 0], fpos[:, 1], 'r.', ms=10, alpha=alpha, label='fast')
         if ns: a.plot(spos[:, 0], spos[:, 1], 'b.', ms=10, alpha=alpha, label='slow')
         if nfa: a.plot(fapos[:, 0], fapos[:, 1], 'g.', ms=10, alpha=alpha,
                        label='fast asymmetric')
         if nsa: a.plot(sapos[:, 0], sapos[:, 1], 'e.', ms=10, alpha=alpha,
                        label='slow asymmetric')
     elif colour == 'sigma':
         sigmas = np.asarray([ neuron.sigma for neuron in self.alln.values() ])
         cmap = mpl.cm.hot_r
         # best to fully saturate alpha because colour indicates value, not just class:
         sc = a.scatter(npos[:, 0], npos[:, 1], edgecolor='none', c=sigmas, cmap=cmap,
                        alpha=1.0, s=30, zorder=10)
     else:
         raise RuntimeError("unknown colour kwarg %r" % colour)
     a.set_xlim(xlim)
     a.set_ylim(ylim)
     a.set_xticks(uchanxs)
     a.set_yticks(np.arange(0, ylim[0], 200))
     #a.xaxis.set_ticks_position('bottom')
     #a.yaxis.set_ticks_position('left')
     # put legend to right of the axes:
     if legend:
         if colour == 'sigma':
             f.colorbar(sc, ax=a, shrink=0.1, pad=0.1, aspect=10,
                        ticks=[min(sigmas), max(sigmas)], format='%d', label='sigma')
         else:
             a.legend(loc='center left', bbox_to_anchor=(1.2, 0.5), frameon=False)
     bbox = a.get_position()
     wh = bbox.width / bbox.height # w:h ratio of axes, includes all ticks and labels?
     w, h = gcfm().canvas.get_width_height()
     gcfm().resize(w*wh, h)
     titlestr = lastcmd()
     gcfm().set_window_title(titlestr)
     a.set_title(self.absname)
Пример #27
0
    def templates(self, chans='max', cindex='nidi'):
        """Plot cell templates in their polytrode layout. chans can be 'max', 'nneigh', 'all'.
        cindex can be 'nidi' or 'nid', but best to colour cells by nidi to maximize
        alternation."""
        from colour import CCBLACKDICT0, CCBLACKDICT1
        from matplotlib.collections import LineCollection

        HUMPERINCH = 80 # for setting figure size in inches
        VUMPERINCH = 160 # for setting figure size in inches
        USPERUM = 15
        UVPERUM = 3
        HBORDERUS = 50 # us, horizontal border around chans
        VBORDERUV = 150 # uV, vertical border around plots
        HBORDER = HBORDERUS / USPERUM
        VBORDER = VBORDERUV / UVPERUM
        BG = 'black'
        SCALE = 500, 100 # scalebar size in (us, uV)
        SCALE = SCALE[0]/USPERUM, SCALE[1]/UVPERUM # um
        SCALEXOFFSET = 2 # um
        SCALEYOFFSET = 4 # um

        if chans not in ['max', 'nneigh', 'all',]:
            raise ValueError('unknown chans arg %r' % chans)
        if cindex == 'nidi':
            ccdict = CCBLACKDICT0 # use nidi to maximize colour alternation
        elif cindex == 'nid':
            ccdict = CCBLACKDICT1 # use nid to have colours that correspond to those in spyke
        else:
            raise ValueError('unknown cindex arg %r' % cindex)

        # for mpl, convert probe chanpos to center bottom origin instead of center top,
        # i.e. invert the y values:
        chanpos = self.sort.chanpos.copy()
        maxy = chanpos[:, 1].max()
        for chan, (x, y) in enumerate(chanpos):
            chanpos[chan, 1] = maxy - y

        if chans == 'nneigh': # generate dict of nearest neighbours indexed by maxchan
            dm = core.eucd(chanpos) # distance matrix
            minspace = dm[dm!=0].min()
            rincl = minspace * 1.1 # inclusion radius
            nneighs = {}
            for maxchan, pos in enumerate(chanpos):
                d = dm[maxchan]
                nnchans = np.where(d < rincl)[0]
                nneighs[maxchan] = nnchans

        colxs = np.unique(chanpos[:, 0]) # unique column x positions, sorted
        rowys = np.unique(chanpos[:, 1]) # unique row y positions, sorted
        ncols = len(colxs)
        nrows = len(rowys)
        hspace = (colxs[-1]-colxs[0]) / (ncols-1)
        vspace = (rowys[-1]-rowys[0]) / (nrows-1)

        # setting figure size actually sets window size, including toolbar and statusbar
        figwidth = (ncols*hspace + 2*HBORDER) / HUMPERINCH # inches
        figheight = (nrows*vspace + 2*VBORDER) / VUMPERINCH # inches
        dpi = mpl.rcParams['figure.dpi']
        #figwidth = (ncols*hspace) / HUMPERINCH # inches
        #figheight = (nrows*vspace) / VUMPERINCH # inches
        figwidth = intround(figwidth * dpi) / dpi # inches, rounded to nearest pixel
        figheight = intround(figheight * dpi) / dpi # inches, rounded to nearest pixel
        figsize = figwidth, figheight
        f = pl.figure(figsize=figsize, facecolor=BG, edgecolor=BG)
        a = f.add_subplot(111)

        # plot chan lines? maybe just the vertical lines?
        #for pos in chanpos:

        tres = self.sort.tres # time resolution, in us
        nts = np.unique([ neuron.nt for neuron in self.alln.values() ])
        if len(nts) != 1:
            raise RuntimeError("Not all neuron templates have the same number of timepoints. "
                               "That's probably bad.")
        nt = nts[0]
        ts = np.arange(0, neuron.nt*tres, tres) # time values in us

        nids = sorted(self.alln)
        for nidi, nid in enumerate(nids):
            colour = ccdict[eval(cindex)]
            neuron = self.alln[nid]
            # ncs (neuron channels) should be 0-based channel IDs:
            if chans == 'max':
                ncs = [neuron.maxchan]
            elif chans == 'nneigh':
                ncs = nneighs[neuron.maxchan]
            elif chans == 'all':
                ncs = neuron.chans
            # exclude channels of data within neigh that are missing from wavedata
            ncs = [ nc for nc in ncs if nc in neuron.chans ]
            # indices into neuron.chans, use to index into wavedata:
            ncis = np.hstack([ np.where(neuron.chans == nc)[0] for nc in ncs ])
            #import pdb; pdb.set_trace()
            wavedata = neuron.wavedata[ncis]
            # much less efficient, but much simpler than spyke code:
            for c, wd in zip(ncs, wavedata):
                x = chanpos[c, 0] + ts / USPERUM # um
                y = chanpos[c, 1] + wd / UVPERUM # um
                a.plot(x, y, ls='-', marker=None, lw=1, c=colour)

        a.set_axis_bgcolor(BG)
        a.set_xlabel('')
        a.set_ylabel('')
        a.xaxis.set_ticks([])
        a.yaxis.set_ticks([]) # if displayed, y ticks would be distance from bottom chan

        a.set_xlim(colxs[0]-HBORDER, colxs[-1]+nt*tres/USPERUM+HBORDER) # um
        a.set_ylim(rowys[0]-VBORDER, rowys[-1]+VBORDER) # um

        # add scale bars:
        r, b = a.get_xlim()[1]-SCALEXOFFSET, a.get_ylim()[0]+SCALEYOFFSET # um
        hbar = (r-SCALE[0], b), (r, b) # um
        vbar = (r, b), (r, b+SCALE[1]) # um
        scale = LineCollection([hbar, vbar], lw=1, colors='white', zorder=-1,
                               antialiased=True, visible=True)
        a.add_collection(scale) # add to axes' pool of LCs

        f.tight_layout(pad=0)
        #f.canvas.toolbar.hide()
        #f.canvas.window().statusBar().hide()
        f.canvas.set_window_title(lastcmd())
Пример #28
0
    def scsistim(self, method='mean', width=None, tres=None, timeaverage=False,
                 plottime=False, s=5, figsize=(7.5, 6.5)):
        """Scatter plot some summary statistic of spike correlations of each recording vs
        LFP synchrony index SI. Colour each point according to stimulus type. width and tres
        (sec) dictate tranges to split recordings up into. timeaverage averages across time
        values of both sc and si for each recording. s is point size"""
        ## TODO: maybe limit to visually responsive cells
        ## TODO: add linear regression of si vs log(sc)

        uns = get_ipython().user_ns
        if width == None:
            width = uns['LFPSIWIDTH']
        if tres == None:
            tres = width
        bsrids = uns['BSRIDS'][self.absname]
        msrids = uns['MSRIDS'][self.absname]
        mvrids = uns['NSRIDS'][self.absname]
        dbrids = uns['DBRIDS'][self.absname]
        rids = sorted(bsrids + msrids + mvrids + dbrids) # do everything in rid order
        print('blankscreen: %r' % [self.r[rid].name for rid in bsrids])
        print('mseq: %r' % [self.r[rid].name for rid in msrids])
        print('movie: %r' % [self.r[rid].name for rid in mvrids])
        print('driftbar: %r' % [self.r[rid].name for rid in dbrids])
        isect = core.intersect1d([msrids, bsrids, mvrids, dbrids])
        if len(isect) != 0:
            raise RuntimeError("some rids were classified into more than one type: %r" % isect)

        scs, sis, c = [], [], []
        for rid in rids:
            r = self.r[rid]
            print('%s: %s' % (r.absname, r.name))
            spikecorr = r.sc(width=width, tres=tres)
            """
            TODO: not sure if this is the right way to do this. A different set of neurons for
            each recording are chosen, then mean sc(t) across all pairs for each recording is
            found, and pooled across recordings. This pooling is maybe a bit dodgy. Is it
            valid to pool sc(t) values across recordings when the included neurons are
            different for each recording? The alternative is to deal only with neurons which
            exceed MINTHRESH track-wide, but the problem with that is that for much of the
            time, such neurons are completely silent, and therefore don't deserve to be
            included in sc calculations for those durations.
            """
            sc, si = spikecorr.si(method=method, plot=False) # calls sc.sct() and sc.si()
            sc = sc[0] # pull out the spike correlation values that span all laminae
            if timeaverage:
                # average across all time values of sc and si to get a single coordinate
                # per recording
                sc = sc.mean()
                si = si.mean()
            scs.append(sc)
            sis.append(si)
            if rid in bsrids: color = 'e'
            elif rid in msrids: color = 'k'
            elif rid in mvrids: color = 'r'
            elif rid in dbrids: color = 'b'
            else: raise ValueError("unclassified recording: %r" % r.name)
            c.append(np.tile(color, len(sc)))
        scs = np.hstack(scs)
        sis = np.hstack(sis)
        c = np.hstack(c)
        
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if plottime: # underplot lines connecting points adjacent in time
            a.plot(scs, sis, 'e--')
        a.scatter(scs, sis, c=c, edgecolors='none', s=s)
        a.set_ylim(0, 1)
        a.set_xlabel('%s spike correlations' % method)
        a.set_ylabel('synchrony index')
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        # make proxy line artists for legend:
        bs = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='e', mec='e')
        ms = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='k', mec='k')
        mv = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='r', mec='r')
        db = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='b', mec='b')
        # add legend:
        a.legend([bs, ms, mv, db],
                 ['blank screen', 'mseq', 'movie', 'drift bar'],
                 numpoints=1, loc='lower right',
                 handlelength=1, handletextpad=0.5, labelspacing=0.1)
        f.tight_layout(pad=0.3) # crop figure to contents
        return scs, sis, c
Пример #29
0
 def plot(self, normed=True, scale=2.0, MPL=False):
     super(STCs, self).plot(normed=normed, title=lastcmd(), scale=scale, MPL=MPL)
Пример #30
0
    def scsistim(self, method='weighted mean', width=None, tres=None, timeaverage=False,
                 plottime=False, s=5, figsize=(7.5, 6.5)):
        """Scatter plot some summary statistic of spike correlations of each recording vs
        synchrony index SI. Colour each point according to stimulus type. width and tres
        dictate tranges to split recordings up into. timeaverage means average across time
        values of both sc and si for each recording"""
        ## TODO: maybe limit to visually responsive cells
        ## TODO: add linear regression of si vs log(sc)

        uns = get_ipython().user_ns
        if width == None:
            width = uns['SIWIDTH'] # want powers of two for efficient FFT
        if tres == None:
            tres = width
        rids = sorted(self.r) # do everything in rid order
        recs = [ self.r[rid] for rid in rids ]
        msrids, bsrids, mvrids, dbrids = [], [], [], []
        for rid in rids:
            r = self.r[rid]
            rname = r.name
            if 'mseq' in rname:
                msrids.append(rid)
            elif 'blank' in rname or 'spont' in rname:
                bsrids.append(rid)
            elif 'MVI' in rname:
                mvrids.append(rid)
            elif 'driftbar' in rname:
                dbrids.append(rid)

        print('mseq: %r' % [self.r[rid].name for rid in msrids])
        print('blankscreen: %r' % [self.r[rid].name for rid in bsrids])
        print('movie: %r' % [self.r[rid].name for rid in mvrids])
        print('driftbar: %r' % [self.r[rid].name for rid in dbrids])
        isect = core.intersect1d([msrids, bsrids, mvrids, dbrids])
        if len(isect) != 0:
            raise RuntimeError("some rids were classified into more than one type: %r" % isect)
        rids = np.unique(np.hstack([msrids, bsrids, mvrids, dbrids]))

        scs, sis, c = [], [], []
        for rid in rids:
            r = self.r[rid]
            print('%s: %s' % (r.absname, r.name))
            spikecorr = r.sc(width=width, tres=tres)
            sc, si = spikecorr.si(method=method, plot=False) # calls sc.sct() and sc.si()
            sc = sc[0] # pull out the spike correlation values that span all laminae
            if timeaverage:
                # average across all time values of sc and si to get a single coordinate
                # per recording
                sc = sc.mean()
                si = si.mean()
            scs.append(sc)
            sis.append(si)
            if rid in msrids: color = 'k'
            elif rid in bsrids: color = 'e'
            elif rid in mvrids: color = 'r'
            elif rid in dbrids: color = 'b'
            else: raise ValueError("unclassified recording: %r" % r.name)
            c.append(np.tile(color, len(sc)))
        scs = np.hstack(scs)
        sis = np.hstack(sis)
        c = np.hstack(c)
        
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if plottime: # underplot lines connecting points adjacent in time
            a.plot(scs, sis, 'e--')
        a.scatter(scs, sis, c=c, edgecolors='none', s=s)
        a.set_ylim(0, 1)
        a.set_xlabel('%s spike correlations' % method)
        a.set_ylabel('synchrony index')
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        # make proxy line artists for legend:
        ms = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='k', mec='k')
        bs = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='e', mec='e')
        mv = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='r', mec='r')
        db = mpl.lines.Line2D([1], [1], color='white', marker='o', mfc='b', mec='b')
        # add legend:
        a.legend([ms, bs, mv, db],
                 ['mseq', 'blank screen', 'movie', 'drift bar'],
                 numpoints=1, loc='lower right',
                 handlelength=1, handletextpad=0.5, labelspacing=0.1)
        f.tight_layout(pad=0.3) # crop figure to contents
        return scs, sis, c
Пример #31
0
    def scsistim(self,
                 method='mean',
                 width=None,
                 tres=None,
                 timeaverage=False,
                 plottime=False,
                 s=5,
                 figsize=(7.5, 6.5)):
        """Scatter plot some summary statistic of spike correlations of each recording vs
        LFP synchrony index SI. Colour each point according to stimulus type. width and tres
        (sec) dictate tranges to split recordings up into. timeaverage averages across time
        values of both sc and si for each recording. s is point size"""
        ## TODO: maybe limit to visually responsive cells
        ## TODO: add linear regression of si vs log(sc)

        uns = get_ipython().user_ns
        if width == None:
            width = uns['LFPSIWIDTH']
        if tres == None:
            tres = width
        bsrids = uns['BSRIDS'][self.absname]
        msrids = uns['MSRIDS'][self.absname]
        mvrids = uns['NSRIDS'][self.absname]
        dbrids = uns['DBRIDS'][self.absname]
        rids = sorted(bsrids + msrids + mvrids +
                      dbrids)  # do everything in rid order
        print('blankscreen: %r' % [self.r[rid].name for rid in bsrids])
        print('mseq: %r' % [self.r[rid].name for rid in msrids])
        print('movie: %r' % [self.r[rid].name for rid in mvrids])
        print('driftbar: %r' % [self.r[rid].name for rid in dbrids])
        isect = core.intersect1d([msrids, bsrids, mvrids, dbrids])
        if len(isect) != 0:
            raise RuntimeError(
                "some rids were classified into more than one type: %r" %
                isect)

        scs, sis, c = [], [], []
        for rid in rids:
            r = self.r[rid]
            print('%s: %s' % (r.absname, r.name))
            spikecorr = r.sc(width=width, tres=tres)
            """
            TODO: not sure if this is the right way to do this. A different set of neurons for
            each recording are chosen, then mean sc(t) across all pairs for each recording is
            found, and pooled across recordings. This pooling is maybe a bit dodgy. Is it
            valid to pool sc(t) values across recordings when the included neurons are
            different for each recording? The alternative is to deal only with neurons which
            exceed MINTHRESH track-wide, but the problem with that is that for much of the
            time, such neurons are completely silent, and therefore don't deserve to be
            included in sc calculations for those durations.
            """
            sc, si = spikecorr.si(method=method,
                                  plot=False)  # calls sc.sct() and sc.si()
            sc = sc[
                0]  # pull out the spike correlation values that span all laminae
            if timeaverage:
                # average across all time values of sc and si to get a single coordinate
                # per recording
                sc = sc.mean()
                si = si.mean()
            scs.append(sc)
            sis.append(si)
            if rid in bsrids: color = 'e'
            elif rid in msrids: color = 'k'
            elif rid in mvrids: color = 'r'
            elif rid in dbrids: color = 'b'
            else: raise ValueError("unclassified recording: %r" % r.name)
            c.append(np.tile(color, len(sc)))
        scs = np.hstack(scs)
        sis = np.hstack(sis)
        c = np.hstack(c)

        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if plottime:  # underplot lines connecting points adjacent in time
            a.plot(scs, sis, 'e--')
        a.scatter(scs, sis, c=c, edgecolors='none', s=s)
        a.set_ylim(0, 1)
        a.set_xlabel('%s spike correlations' % method)
        a.set_ylabel('synchrony index')
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        # make proxy line artists for legend:
        bs = mpl.lines.Line2D([1], [1],
                              color='white',
                              marker='o',
                              mfc='e',
                              mec='e')
        ms = mpl.lines.Line2D([1], [1],
                              color='white',
                              marker='o',
                              mfc='k',
                              mec='k')
        mv = mpl.lines.Line2D([1], [1],
                              color='white',
                              marker='o',
                              mfc='r',
                              mec='r')
        db = mpl.lines.Line2D([1], [1],
                              color='white',
                              marker='o',
                              mfc='b',
                              mec='b')
        # add legend:
        a.legend([bs, ms, mv, db],
                 ['blank screen', 'mseq', 'movie', 'drift bar'],
                 numpoints=1,
                 loc='lower right',
                 handlelength=1,
                 handletextpad=0.5,
                 labelspacing=0.1)
        f.tight_layout(pad=0.3)  # crop figure to contents
        return scs, sis, c
Пример #32
0
 def plot(self, interp='nearest', normed=True, scale=2.0):
     win = RevCorr.plot(self, interp=interp, normed=normed, title=lastcmd(),
                        scale=scale)
     return win # necessary in IPython
Пример #33
0
 def plot(self, normed=True, scale=2.0, MPL=False, margins=True):
     win = RevCorrs.plot(self, normed=normed, title=lastcmd(), scale=scale, MPL=MPL,
                         margins=margins)
     return win # necessary in IPython
Пример #34
0
 def psd(self, t0=None, t1=None, f0=0.2, f1=110, p0=None, p1=None, chanis=-1,
         width=None, tres=None, xscale='log', figsize=(5, 5)):
     """Plot power spectral density from t0 to t1 in sec, from f0 to f1 in Hz, and clip
     power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0
     uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1,
     take mean of specified chanis. width and tres are in sec."""
     uns = get_ipython().user_ns
     self.get_data()
     ts = self.get_tssec() # full set of timestamps, in sec
     if t0 == None:
         t0, t1 = ts[0], ts[-1] # full duration
     if t1 == None:
         t1 = t0 + 10 # 10 sec window
     if width == None:
         width = uns['LFPSPECGRAMWIDTH'] # sec
     if tres == None:
         tres = uns['LFPSPECGRAMTRES'] # sec
     assert tres <= width
     NFFT = intround(width * self.sampfreq)
     noverlap = intround(NFFT - tres * self.sampfreq)
     t0i, t1i = ts.searchsorted((t0, t1))
     #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
     data = self.data[:, t0i:t1i] # slice data
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     if iterable(chanis):
         data = data[chanis].mean(axis=0) # take mean of data on chanis
     else:
         data = data[chanis] # get single row of data at chanis
     #data = filter.notch(data)[0] # remove 60 Hz mains noise
     # convert data from uV to mV. I think P is in mV^2?:
     P, freqs = mpl.mlab.psd(data/1e3, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap)
     # keep only freqs between f0 and f1:
     if f0 == None:
         f0 = freqs[0]
     if f1 == None:
         f1 = freqs[-1]
     lo, hi = freqs.searchsorted([f0, f1])
     P, freqs = P[lo:hi], freqs[lo:hi]
     # check for and replace zero power values (ostensibly due to gaps in recording)
     # before attempting to convert to dB:
     zis = np.where(P == 0.0) # row and column indices where P has zero power
     if len(zis[0]) > 0: # at least one hit
         P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float
         minnzval = P.min() # get minimum nonzero value
         P[zis] = minnzval # replace with min nonzero values
     P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2?
     # for better visualization, clip power values to within (p0, p1) dB
     if p0 != None:
         P[P < p0] = p0
     if p1 != None:
         P[P > p1] = p1
     #self.P = P
     a.plot(freqs, P, 'k-')
     # add SI frequency band limits:
     LFPPRLOBAND, LFPPRHIBAND = uns['LFPPRLOBAND'], uns['LFPPRHIBAND']
     a.axvline(x=LFPPRLOBAND[0], c='r', ls='--')
     a.axvline(x=LFPPRLOBAND[1], c='r', ls='--')
     a.axvline(x=LFPPRHIBAND[0], c='b', ls='--')
     a.axvline(x=LFPPRHIBAND[1], c='b', ls='--')
     a.axis('tight')
     a.set_xscale(xscale)
     a.set_xlabel("frequency (Hz)")
     a.set_ylabel("power (dB)")
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     a.set_title(titlestr)
     a.text(0.998, 0.99, '%s' % self.r.name, color='k', transform=a.transAxes,
            horizontalalignment='right', verticalalignment='top')
     f.tight_layout(pad=0.3) # crop figure to contents
     self.f = f
     return P, freqs
Пример #35
0
    def specgram(self,
                 t0=None,
                 t1=None,
                 f0=0.1,
                 f1=100,
                 p0=-60,
                 p1=None,
                 chanis=-1,
                 width=None,
                 tres=None,
                 cm='jet',
                 colorbar=False,
                 showstates=False,
                 lw=4,
                 alpha=1,
                 relative2t0=False,
                 lim2stim=False,
                 title=True,
                 reclabel=True,
                 swapaxes=False,
                 figsize=None):
        """Plot a spectrogram from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values
        from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most
        superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of
        specified chanis. width and tres are in sec. As an alternative to cm.jet (the
        default), cm.gray, cm.hsv cm.terrain, and cm.cubehelix_r colormaps seem to bring out
        the most structure in the spectrogram. showstates controls whether to plot lines
        demarcating desynchronized and synchronized periods. relative2t0 controls whether to
        plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range
        only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din"""
        uns = get_ipython().user_ns
        self.get_data()
        ts = self.get_tssec()  # full set of timestamps, in sec
        if t0 == None:
            t0, t1 = ts[0], ts[-1]  # full duration
        if t1 == None:
            t1 = t0 + 10  # 10 sec window
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        dt = t1 - t0
        if width == None:
            width = uns['LFPSPECGRAMWIDTH']  # sec
        if tres == None:
            tres = uns['LFPSPECGRAMTRES']  # sec
        assert tres <= width
        NFFT = intround(width * self.sampfreq)
        noverlap = intround(NFFT - tres * self.sampfreq)
        t0i, t1i = ts.searchsorted((t0, t1))
        #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
        data = self.data[:, t0i:t1i]  # slice data
        if figsize == None:
            # convert from recording duration time to width in inches, 0.87 accommodates
            # padding around the specgram:
            figwidth = (dt / 1000) * 5 + 0.87
            figheight = 2.5  # inches
            figsize = figwidth, figheight
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if iterable(chanis):
            data = data[chanis].mean(axis=0)  # take mean of data on chanis
        else:
            data = data[chanis]  # get single row of data at chanis
        #data = filter.notch(data)[0] # remove 60 Hz mains noise
        # convert data from uV to mV, returned t is midpoints of time bins in sec from
        # start of data. I think P is in mV^2?:
        P, freqs, t = mpl.mlab.specgram(data / 1e3,
                                        NFFT=NFFT,
                                        Fs=self.sampfreq,
                                        noverlap=noverlap)
        if not relative2t0:
            t += t0  # convert t to time from start of ADC clock:
        # keep only freqs between f0 and f1:
        if f0 == None:
            f0 = freqs[0]
        if f1 == None:
            f1 = freqs[-1]
        df = f1 - f0
        lo, hi = freqs.searchsorted([f0, f1])
        P, freqs = P[lo:hi], freqs[lo:hi]
        # check for and replace zero power values (ostensibly due to gaps in recording)
        # before attempting to convert to dB:
        zis = np.where(
            P == 0.0)  # row and column indices where P has zero power
        if len(zis[0]) > 0:  # at least one hit
            P[zis] = np.finfo(
                np.float64).max  # temporarily replace zeros with max float
            minnzval = P.min()  # get minimum nonzero value
            P[zis] = minnzval  # replace with min nonzero values
        P = 10. * np.log10(P)  # convert power to dB wrt 1 mV^2?
        # for better visualization, clip power values to within (p0, p1) dB
        if p0 != None:
            P[P < p0] = p0
        if p1 != None:
            P[P > p1] = p1
        #self.P = P

        # plot horizontal bars over time demarcating different ranges of SI values,
        # or manually defined desynched and synched periods:
        statelinepos = f0 - df * 0.015  # plot horizontal bars just below x axis
        if showstates:
            if showstates in [True, 'auto']:
                print(
                    "TODO: there's an offset plotting bug for 'auto', compare with 'manual'"
                )
                si, t = self.si(plot=False)
                stranges, states = self.si_split(si, t)  # sec
                STATECOLOURS = uns['LFPPRBINCOLOURS']
            elif showstates == 'manual':
                stranges, states = [], []
                for state in uns['MANUALSTATES']:
                    for strange in uns['REC2STATE2TRANGES'][
                            self.r.absname][state]:
                        stranges.append(strange)
                        states.append(state)
                stranges = np.vstack(stranges)  # 2D array
                STATECOLOURS = uns['MANUALSTATECOLOURS']
            else:
                raise ValueError('invalid value showstates=%r' % showstates)
            # clip stranges to t0, t1:
            stranges[0, 0] = max(stranges[0, 0], t0)
            stranges[-1, 1] = min(stranges[-1, 1], t1)
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            for strange, state in zip(stranges, states):
                clr = STATECOLOURS[state]
                lines(statelinepos,
                      strange[0],
                      strange[1],
                      colors=clr,
                      lw=lw,
                      alpha=alpha,
                      clip_on=False)

        # Label far left, right, top and bottom edges of imshow image. imshow interpolates
        # between these to place the axes ticks. Time limits are
        # set from midpoints of specgram time bins
        extent = t[0], t[-1], freqs[0], freqs[-1]
        #print('specgram extent: %r' % (extent,))
        # flip P vertically for compatibility with imshow:
        im = a.imshow(P[::-1], extent=extent, cmap=cm)
        a.autoscale(enable=True, tight=True)
        a.axis('tight')
        # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started:
        a.set_xlim(xmin=0, xmax=t[-1])
        a.set_ylim(ymin=freqs[0], ymax=freqs[-1])
        # turn off annoying "+2.41e3" type offset on x axis:
        formatter = mpl.ticker.ScalarFormatter(useOffset=False)
        a.xaxis.set_major_formatter(formatter)
        a.set_xlabel("time (s)")
        a.set_ylabel("frequency (Hz)")
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        if title:
            a.set_title(titlestr)
        if reclabel:
            a.text(0.994,
                   0.95,
                   '%s' % self.r.absname,
                   color='w',
                   transform=a.transAxes,
                   horizontalalignment='right',
                   verticalalignment='top')
        f.tight_layout(pad=0.3)  # crop figure to contents
        if colorbar:
            f.colorbar(
                im,
                pad=0)  # creates big whitespace to the right for some reason
        self.f = f
        return P, freqs, t
Пример #36
0
 def plot(self, interp='nearest', normed=True, scale=2.0):
     super(STCs, self).plot(interp=interp, normed=normed,
                            title=lastcmd(),
                            scale=scale)
Пример #37
0
 def psd(self,
         t0=None,
         t1=None,
         f0=0.2,
         f1=110,
         p0=None,
         p1=None,
         chanis=-1,
         width=None,
         tres=None,
         xscale='log',
         figsize=(5, 5)):
     """Plot power spectral density from t0 to t1 in sec, from f0 to f1 in Hz, and clip
     power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0
     uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1,
     take mean of specified chanis. width and tres are in sec."""
     uns = get_ipython().user_ns
     self.get_data()
     ts = self.get_tssec()  # full set of timestamps, in sec
     if t0 == None:
         t0, t1 = ts[0], ts[-1]  # full duration
     if t1 == None:
         t1 = t0 + 10  # 10 sec window
     if width == None:
         width = uns['LFPSPECGRAMWIDTH']  # sec
     if tres == None:
         tres = uns['LFPSPECGRAMTRES']  # sec
     assert tres <= width
     NFFT = intround(width * self.sampfreq)
     noverlap = intround(NFFT - tres * self.sampfreq)
     t0i, t1i = ts.searchsorted((t0, t1))
     #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
     data = self.data[:, t0i:t1i]  # slice data
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     if iterable(chanis):
         data = data[chanis].mean(axis=0)  # take mean of data on chanis
     else:
         data = data[chanis]  # get single row of data at chanis
     #data = filter.notch(data)[0] # remove 60 Hz mains noise
     # convert data from uV to mV. I think P is in mV^2?:
     P, freqs = mpl.mlab.psd(data / 1e3,
                             NFFT=NFFT,
                             Fs=self.sampfreq,
                             noverlap=noverlap)
     # keep only freqs between f0 and f1:
     if f0 == None:
         f0 = freqs[0]
     if f1 == None:
         f1 = freqs[-1]
     lo, hi = freqs.searchsorted([f0, f1])
     P, freqs = P[lo:hi], freqs[lo:hi]
     # check for and replace zero power values (ostensibly due to gaps in recording)
     # before attempting to convert to dB:
     zis = np.where(
         P == 0.0)  # row and column indices where P has zero power
     if len(zis[0]) > 0:  # at least one hit
         P[zis] = np.finfo(
             np.float64).max  # temporarily replace zeros with max float
         minnzval = P.min()  # get minimum nonzero value
         P[zis] = minnzval  # replace with min nonzero values
     P = 10. * np.log10(P)  # convert power to dB wrt 1 mV^2?
     # for better visualization, clip power values to within (p0, p1) dB
     if p0 != None:
         P[P < p0] = p0
     if p1 != None:
         P[P > p1] = p1
     #self.P = P
     a.plot(freqs, P, 'k-')
     # add SI frequency band limits:
     LFPPRLOBAND, LFPPRHIBAND = uns['LFPPRLOBAND'], uns['LFPPRHIBAND']
     a.axvline(x=LFPPRLOBAND[0], c='r', ls='--')
     a.axvline(x=LFPPRLOBAND[1], c='r', ls='--')
     a.axvline(x=LFPPRHIBAND[0], c='b', ls='--')
     a.axvline(x=LFPPRHIBAND[1], c='b', ls='--')
     a.axis('tight')
     a.set_xscale(xscale)
     a.set_xlabel("frequency (Hz)")
     a.set_ylabel("power (dB)")
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     a.set_title(titlestr)
     a.text(0.998,
            0.99,
            '%s' % self.r.name,
            color='k',
            transform=a.transAxes,
            horizontalalignment='right',
            verticalalignment='top')
     f.tight_layout(pad=0.3)  # crop figure to contents
     self.f = f
     return P, freqs
Пример #38
0
    def si(self, kind=None, chani=-1, width=None, tres=None,
           lfpwidth=None, lfptres=None, lowband=None, highband=None, plot=True,
           states=False, desynchsi=0.2, synchsi=0.2, lw=4, alpha=1, relative2t0=False,
           lim2stim=False, showxlabel=True, showylabel=True, showtitle=True, showtext=True,
           swapaxes=False, figsize=(20, 3.5)):
        """Calculate an LFP synchrony index, using potentially overlapping windows of width
        and tres, in sec, from the LFP spectrogram, itself composed of bins of lfpwidth and
        lfptres. relative2t0 controls whether to plot relative to t0, or relative to start of
        ADC clock. lim2stim limits the time range only to when a stimulus was presented, i.e.
        to the outermost times of non-NULL din.

        Note that for power ratio methods (kind: L/(L+H) or L/H),
        width and tres are not used, only lfpwidth and lfptres. Options for kind are:

        'L/(L+H)': fraction of power in low band vs total power (Saleem2010)

        'L/H': low to highband power ratio (Li, Poo, Dan 2009)

        'cv': coefficient of variation (std / mean) of all power

        'ncv': normalized CV: (std - mean) / (std + mean)

        'nstdmed': normalized stdmed: (std - med) / (std + med)

        'n2stdmean': normalized 2stdmean: (2*std - mean) / (2*std + mean)

        'n3stdmean': normalized 3stdmean: (3*std - mean) / (3*std + mean)

        """
        uns = get_ipython().user_ns
        if kind == None:
            kind = uns['LFPSIKIND']
        if kind.startswith('L/'):
            pratio = True
        else:
            pratio = False

        data = self.get_data()
        ts = self.get_tssec() # full set of timestamps, in sec
        t0, t1 = ts[0], ts[-1]
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        t0i, t1i = ts.searchsorted((t0, t1))
        x = data[chani, t0i:t1i] / 1e3 # slice data, convert from uV to mV
        x = filter.notch(x)[0] # remove 60 Hz mains noise
        try:
            rr = self.r.e0.I['REFRESHRATE']
        except AttributeError: # probably a recording with no experiment
            rr = 200 # assume 200 Hz refresh rate
        if rr <= 100: # CRT was at low vertical refresh rate
            print('filtering out %d Hz from LFP in %s' % (intround(rr), self.r.name))
            x = filter.notch(x, freq=rr)[0] # remove CRT interference

        if width == None:
            width = uns['LFPSIWIDTH'] # sec
        if tres == None:
            tres = uns['LFPSITRES'] # sec
        if lfpwidth == None:
            lfpwidth = uns['LFPWIDTH'] # sec
        if lfptres == None:
            lfptres = uns['LFPTRES'] # sec
        if lowband == None:
            lowband = uns['LFPSILOWBAND']
        f0, f1 = lowband
        if highband == None:
            highband = uns['LFPSIHIGHBAND']
        f2, f3 = highband

        assert lfptres <= lfpwidth
        NFFT = intround(lfpwidth * self.sampfreq)
        noverlap = intround(NFFT - lfptres * self.sampfreq)
        #print('len(x), NFFT, noverlap: %d, %d, %d' % (len(x), NFFT, noverlap))
        # t is midpoints of timebins in sec from start of data. P is in mV^2?:
        P, freqs, Pt = mpl.mlab.specgram(x, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap)
        # don't convert power to dB, just washes out the signal in the ratio:
        #P = 10. * np.log10(P)
        if not relative2t0:
            Pt += t0 # convert t to time from start of ADC clock:
        nfreqs = len(freqs)

        # keep only freqs between f0 and f1, and f2 and f3:
        f0i, f1i, f2i, f3i = freqs.searchsorted([f0, f1, f2, f3])
        lP = P[f0i:f1i] # nsubfreqs x nt
        hP = P[f2i:f3i] # nsubfreqs x nt
        lP = lP.sum(axis=0) # nt
        hP = hP.sum(axis=0) # nt

        if pratio:
            t = Pt
            ylim = 0, 1
            ylabel = 'SI (%s)' % kind
        else:
            # potentially overlapping bin time ranges:
            trange = Pt[0], Pt[-1]
            tranges = split_tranges([trange], width, tres) # in sec
            ntranges = len(tranges)
            tis = Pt.searchsorted(tranges) # ntranges x 2 array
            # number of timepoints to use for each trange, almost all will be the same width:
            binnt = intround((tis[:, 1] - tis[:, 0]).mean())
            binhP = np.zeros((ntranges, binnt)) # init appropriate array
            for trangei, t0i in enumerate(tis[:, 0]):
                binhP[trangei] = hP[t0i:t0i+binnt]
            # get midpoint of each trange:
            t = tranges.mean(axis=1)

        #old_settings = np.seterr(all='ignore') # suppress div by 0 errors
        # plot power signal to be analyzed
        #self.si_plot(Pt, hP, t0=0, t1=t[-1], ylim=None, ylabel='highband power',
        #             title=lastcmd()+' highband power', text=self.r.name)
        hlines = []
        if kind[0] == 'n':
            ylim = -1, 1
            hlines = [0]
        # calculate some metric of each column, ie each width:
        if kind == 'L/(L+H)':
            si = lP/(lP + hP)
        elif kind == 'L/H':
            si = lP/hP
        elif kind == 'nLH':
            t = Pt
            si = (lP - hP) / (lP + hP)
            ylabel = 'LFP (L - H) / (L + H)'
        elif kind == 'cv':
            si = binhP.std(axis=1) / binhP.mean(axis=1)
            ylim = 0, 2
            ylabel = 'LFP power CV'
        elif kind == 'ncv':
            s = binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s - mean) / (s + mean)
            ylabel = 'LFP power (std - mean) / (std + mean)'
            #pl.plot(t, s)
            #pl.plot(t, mean)
        elif kind == 'n2stdmean':
            s2 = 2 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s2 - mean) / (s2 + mean)
            ylabel = 'LFP power (2*std - mean) / (2*std + mean)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s2)
            #pl.plot(t, mean)
        elif kind == 'n3stdmean':
            s3 = 3 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s3 - mean) / (s3 + mean)
            ylabel = 'LFP power (3*std - mean) / (3*std + mean)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s3)
            #pl.plot(t, mean)
        elif kind == 'n4stdmean':
            s4 = 4 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s4 - mean) / (s4 + mean)
            ylabel = 'LFP power (4*std - mean) / (4*std + mean)'
            #pl.plot(t, s4)
            #pl.plot(t, mean)
        elif kind == 'nstdmed':
            s = binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s - med) / (s + med)
            ylabel = 'LFP power (std - med) / (std + med)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s)
            #pl.plot(t, med)
        elif kind == 'n2stdmed':
            s2 = 2 * binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s2 - med) / (s2 + med)
            ylabel = 'LFP power (2*std - med) / (2*std + med)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s2)
            #pl.plot(t, med)
        elif kind == 'n3stdmed':
            s3 = 3 * binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s3 - med) / (s3 + med)
            ylabel = 'LFP power (3*std - med) / (3*std + med)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s3)
            #pl.plot(t, med)
        elif kind == 'nstdmin':
            s = binhP.std(axis=1)
            min = binhP.min(axis=1)
            si = (s - min) / (s + min)
            ylabel = 'LFP power (std - min) / (std + min)'
            #pl.plot(t, s)
            #pl.plot(t, min)
        elif kind == 'nmadmean':
            mean = binhP.mean(axis=1)
            mad = (np.abs(binhP - mean[:, None])).mean(axis=1)
            si = (mad - mean) / (mad + mean)
            ylabel = 'MUA (MAD - mean) / (MAD + mean)'
            #pl.plot(t, mad)
            #pl.plot(t, mean)
        elif kind == 'nmadmed':
            med = np.median(binhP, axis=1)
            mad = (np.abs(binhP - med[:, None])).mean(axis=1)
            si = (mad - med) / (mad + med)
            ylabel = 'MUA (MAD - median) / (MAD + median)'
            #pl.plot(t, mad)
            #pl.plot(t, med)
        elif kind == 'nvarmin':
            v = binhP.var(axis=1)
            min = binhP.min(axis=1)
            si = (v - min) / (v + min)
            ylabel = 'LFP power (std - min) / (std + min)'
            #pl.plot(t, v)
            #pl.plot(t, min)
        elif kind == 'nptpmean':
            ptp = binhP.ptp(axis=1)
            mean = binhP.mean(axis=1)
            si = (ptp - mean) / (ptp + mean)
            ylabel = 'MUA (ptp - mean) / (ptp + mean)'
            #pl.plot(t, ptp)
            #pl.plot(t, mean)
        elif kind == 'nptpmed':
            ptp = binhP.ptp(axis=1)
            med = np.median(binhP, axis=1)
            si = (ptp - med) / (ptp + med)
            ylabel = 'MUA (ptp - med) / (ptp + med)'
            #pl.plot(t, ptp)
            #pl.plot(t, med)
        elif kind == 'nptpmin':
            ptp = binhP.ptp(axis=1)
            min = binhP.min(axis=1)
            si = (ptp - min) / (ptp + min)
            ylabel = 'MUA (ptp - min) / (ptp + min)'
            #pl.plot(t, ptp)
            #pl.plot(t, min)
        elif kind == 'nmaxmin':
            max = binhP.max(axis=1)
            min = binhP.min(axis=1)
            si = (max - min) / (max + min)
            ylabel = 'MUA (max - min) / (max + min)'
            #pl.plot(t, max)
            #pl.plot(t, min)
        else:
            raise ValueError('unknown kind %r' % kind)
        if plot:
            # calculate xlim, always start from 0, add half a bin width to xmax:
            if pratio:
                xlim = (0, t[-1]+lfpwidth/2)
            else:
                xlim = (0, t[-1]+width/2)
            self.si_plot(t, si, t0=t0, t1=t1, xlim=xlim, ylim=ylim, ylabel=ylabel,
                         showxlabel=showxlabel, showylabel=showylabel, showtitle=showtitle,
                         title=lastcmd(), showtext=showtext, text=self.r.name, hlines=hlines,
                         states=states, desynchsi=desynchsi, synchsi=synchsi, lw=lw,
                         alpha=alpha, relative2t0=relative2t0, swapaxes=swapaxes,
                         figsize=figsize)
        #np.seterr(**old_settings) # restore old settings
        return si, t # t are midpoints of bins, offset depends on relative2t0
Пример #39
0
 def plot(self, var='ori', fixed=None):
     """var: string name of variable you want to plot a tuning curve for
     fixed: dict with keys containing names of vars to keep fixed when building tuning
     curve, and values containing each var's value(s) to fix at
     
     Ex: r71.n[1].tune().plot('phase0', fixed={'ori':138, 'sfreqCycDeg':[0.4, 0.8]})
     """
     if not self.done:
         self.calc(tdelay=self.tdelay)
     if fixed != None:
         fixedsweepis = []
         for fixedvar, fixedvals in fixed.items():
             vals = self.experiment.sweeptable.data[fixedvar]
             if fixedvar == 'ori': # correct for orientation offset by adding
                 if (vals > 180).any():
                     maxori = 360
                 else:
                     maxori = 180
                 vals = vals.copy() # don't modify the sweeptable!
                 vals += self.experiment.s.orioff # static parameter
                 vals %= maxori
             sweepis = []
             for fixedval in toiter(fixedvals):
                 sweepis.append(np.where(vals == fixedval)[0])
             sweepis = np.concatenate(sweepis)
             fixedsweepis.append(sweepis)
         # intersect all fixedvar arrays in fixedsweepis:
         fixedsweepis = core.intersect1d(fixedsweepis)
         #print(fixedsweepis)
     # get values for var at all unique sweep indices:
     vals = self.experiment.sweeptable.data[var]
     if var == 'ori': # correct for orientation offset by adding
         if (vals > 180).any():
             maxori = 360
         else:
             maxori = 180
         vals = vals.copy() # don't modify the sweeptable!
         vals += self.experiment.s.orioff # static parameter
         vals %= maxori
     x = np.unique(vals) # x axis
     y = np.zeros(len(x), dtype=int) # spike counts for each variable value
     for vali, val in enumerate(x):
         sweepis = np.where(vals == val)[0]
         if fixed != None:
             sweepis = np.intersect1d(sweepis, fixedsweepis, assume_unique=True)
             print sweepis
         for sweepi in sweepis:
             y[vali] += self.counts[sweepi].sum()
     # create a new figure:
     f = pl.figure()
     a = f.add_subplot(111)
     a.plot(x, y, 'k.-')
     a.set_xlabel(var)
     a.set_ylabel('spike count')
     titlestr = lastcmd()
     titlestr += ' nid%d' % self.neuron.id
     a.set_title(titlestr)
     f.canvas.window().setWindowTitle(titlestr)
     a.text(0.99, 0.99, 'peak=(%s, %s)' % (x[y.argmax()], y.max()),
            transform=a.transAxes,
            horizontalalignment='right',
            verticalalignment='top')
     f.tight_layout(pad=0.3) # crop figure to contents
     self.f = f
     self.x, self.y = x, y
     return self
Пример #40
0
 def plot(self, normed=True, scale=2.0, MPL=False):
     super(STCs, self).plot(normed=normed,
                            title=lastcmd(),
                            scale=scale,
                            MPL=MPL)
Пример #41
0
def sc_si(source,
          method='mean',
          sisource='lfp',
          kind=None,
          chani=-1,
          sirange=None,
          layers=False,
          ms=1,
          figsize=(7.5, 6.5)):
    """Pool recording.sc().si() results across recordings specified by source,
    plot the result"""
    uns = get_ipython().user_ns
    if layers == False:
        layers = ['all']
    elif layers == True:
        layers = ['sup', 'deep']
    LAYER2I = {'all': 0, 'sup': 1, 'mid': 2, 'deep': 3, 'other': 4}
    layeris = [LAYER2I[layer] for layer in layers]

    recs, tracks = parse_source(source)

    if sisource not in ['lfp', 'mua']:
        raise ValueError('unknown sisource %r' % sisource)

    if kind == None:
        if sisource == 'lfp':
            kind = uns['LFPSIKIND']
        else:
            kind = uns['MUASIKIND']

    # calculate
    corrss, sis = [], []
    for rec in recs:
        print(rec.absname)
        corrs, si, ylabel = rec.sc().si(method=method,
                                        sisource=sisource,
                                        kind=kind,
                                        chani=chani,
                                        sirange=sirange,
                                        plot=False)
        corrss.append(corrs)
        sis.append(si)
    corrs = np.hstack(corrss)
    si = np.hstack(sis)

    # plot
    f = pl.figure(figsize=figsize)
    a = f.add_subplot(111)

    #ylim = corrs[layeris].min(), corrs[layeris].max()
    #yrange = ylim[1] - ylim[0]
    #extra = yrange*0.03 # 3 %
    #ylim = ylim[0]-extra, ylim[1]+extra
    ylim = uns['SCLIMITS']

    # keep only those points whose synchrony index falls within sirange:
    if sirange == None:
        finitesi = si[np.isfinite(si)]
        sirange = finitesi.min(), finitesi.max()
    sirange = np.asarray(sirange)
    keepis = (sirange[0] <= si[0]) * (si[0] <= sirange[1]
                                      )  # boolean index array
    si = si[:, keepis]
    corrs = corrs[:, keepis]
    # plot linear regressions of corrs vs si[0]:
    if 'all' in layers:
        m0, b0, r0, p0, stderr0 = linregress(si[0], corrs[0])
        a.plot(sirange, m0 * sirange + b0, 'e--')
    if 'sup' in layers:
        m1, b1, r1, p1, stderr1 = linregress(si[0], corrs[1])
        a.plot(sirange, m1 * sirange + b1, 'r--')
    if 'mid' in layers:
        m2, b2, r2, p2, stderr2 = linregress(si[0], corrs[2])
        a.plot(sirange, m2 * sirange + b2, 'g--')
    if 'deep' in layers:
        m3, b3, r3, p3, stderr3 = linregress(si[0], corrs[3])
        a.plot(sirange, m3 * sirange + b3, 'b--')
    if 'other' in layers:
        m4, b4, r4, p4, stderr4 = linregress(si[0], corrs[4])
        a.plot(sirange, m4 * sirange + b4, 'y--', zorder=0)

    # scatter plot corrs vs si, one colour per laminarity:
    if 'all' in layers:
        a.plot(si[0],
               corrs[0],
               'e.',
               ms=ms,
               label='all, m=%.3f, r=%.3f' % (m0, r0))
    if 'sup' in layers:
        a.plot(si[0],
               corrs[1],
               'r.',
               ms=ms,
               label='superficial, m=%.3f, r=%.3f' % (m1, r1))
    if 'mid' in layers:
        a.plot(si[0],
               corrs[2],
               'g.',
               ms=ms,
               label='middle, m=%.3f, r=%.3f' % (m2, r2))
    if 'deep' in layers:
        a.plot(si[0],
               corrs[3],
               'b.',
               ms=ms,
               label='deep, m=%.3f, r=%.3f' % (m3, r3))
    if 'other' in layers:
        a.plot(si[0],
               corrs[4],
               'y.',
               ms=ms,
               label='other, m=%.3f, r=%.3f' % (m4, r4),
               zorder=0)
    #a.set_xlim(sirange)
    if kind[0] == 'n':
        a.set_xlim(-1, 1)
    a.set_ylim(ylim)
    #a.autoscale(enable=True, axis='y', tight=True)
    a.set_xlabel('%s SI (%s)' % (sisource.upper(), kind))
    a.set_ylabel(ylabel)
    titlestr = lastcmd()
    gcfm().window.setWindowTitle(titlestr)
    a.set_title(titlestr)
    a.legend(loc='upper left',
             handlelength=1,
             handletextpad=0.5,
             labelspacing=0.1)
    f.tight_layout(pad=0.3)  # crop figure to contents
Пример #42
0
 def plot(self,
          t0=None,
          t1=None,
          chanis=None,
          gain=1,
          c='k',
          alpha=1.0,
          yunits='um',
          yticks=None,
          title=True,
          xlabel=True,
          relative2t0=False,
          lim2stim=False,
          scalebar=True,
          lw=4,
          figsize=(20, 6.5)):
     """Plot chanis of LFP data between t0 and t1 in sec. Unfortunatley, setting an alpha <
     1 doesn't seem to reveal detail when a line obscures itself, such as when plotting a
     very long time series. relative2t0 controls whether to plot relative to t0, or
     relative to start of ADC clock. lim2stim limits the time range only to when a stimulus
     was on screen, i.e. to the outermost times of non-NULL din. If only one chan is
     requested, it's plotted on a mV scale instead of a spatial scale."""
     self.get_data()
     ts = self.get_tssec()  # full set of timestamps, in sec
     if t0 == None:
         t0, t1 = ts[0], ts[-1]
     if t1 == None:
         t1 = t0 + 10  # 10 sec window
     if chanis == None:
         chanis = range(len(self.chans))  # all chans
     if lim2stim:
         t0, t1 = self.apply_lim2stim(t0, t1)
     t0i, t1i = ts.searchsorted((t0, t1))
     ts = ts[t0i:t1i]  # constrained set of timestamps, in sec
     chanis = tolist(chanis)
     nchans = len(chanis)
     # grab desired channels and time range:
     data = self.data[chanis][:, t0i:t1i]
     if nchans > 1:  # convert uV to um:
         totalgain = self.UV2UM * gain
         data = data * totalgain
     else:  # convert uV to mV:
         data = data / 1000
         yunits = 'mV'
     nt = len(ts)
     assert nt == data.shape[1]
     if relative2t0:
         # convert ts to time from t0, otherwise plot time from start of ADC clock:
         ts -= t0
     x = np.tile(ts, nchans)
     x.shape = nchans, nt
     segments = np.zeros((nchans, nt, 2))  # x vals in col 0, yvals in col 1
     segments[:, :, 0] = x
     if nchans > 1:
         segments[:, :,
                  1] = -data  # set to -ve here because of invert_yaxis() below
     else:
         segments[:, :, 1] = data
     if nchans > 1:  # add y offsets:
         maxypos = 0
         for chanii, chani in enumerate(chanis):
             chan = self.chans[chani]
             ypos = self.chanpos[chan][1]  # in um
             segments[chanii, :,
                      1] += ypos  # vertical distance below top of probe
             maxypos = max(maxypos, ypos)
         if yunits == 'mm':  # convert from um to mm
             segments[:, :, 1] /= 1000
             maxypos = maxypos / 1000  # convert from int to float
             totalgain = totalgain / 1000
     lc = LineCollection(segments,
                         linewidth=1,
                         linestyle='-',
                         colors=c,
                         alpha=alpha,
                         antialiased=True,
                         visible=True)
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     a.add_collection(lc)  # add to axes' pool of LCs
     if scalebar:  # add vertical scale bar at end of last channel to represent 1 mV:
         if nchans > 1:
             ymin, ymax = maxypos - 500 * totalgain, maxypos + 500 * totalgain  # +/- 0.5 mV
         else:
             ymin, ymax = -0.5, 0.5  # mV
         a.vlines(ts.max() * 0.99, ymin, ymax, lw=lw, colors='e')
     a.autoscale(enable=True, tight=True)
     # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started:
     a.set_xlim(xmin=0)
     if nchans > 1:
         a.invert_yaxis()  # for spatial scale
     if yticks != None:
         a.set_yticks(yticks)
     # turn off annoying "+2.41e3" type offset on x axis:
     formatter = mpl.ticker.ScalarFormatter(useOffset=False)
     a.xaxis.set_major_formatter(formatter)
     if xlabel:
         a.set_xlabel("time (s)")
     if yunits == 'um':
         a.set_ylabel("depth ($\mu$m)")
     elif yunits == 'mm':
         a.set_ylabel("depth (mm)")
     elif yunits == 'mV':
         a.set_ylabel("LFP (mV)")
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     if title:
         a.set_title(titlestr)
         a.text(0.998,
                0.99,
                '%s' % self.r.name,
                transform=a.transAxes,
                horizontalalignment='right',
                verticalalignment='top')
     f.tight_layout(pad=0.3)  # crop figure to contents
     self.f = f
     return self