Exemple #1
0
    def pospdf(self, neurons=None, dim='y', nbins=10, a=None, stats=False, figsize=(7.5, 6.5)):
        """Plot PDF of cell positions ('x' or 'y') along the polytrode
        to get an idea of how cells are distributed in space"""
        if neurons == 'all':
            neurons = self.alln.values()
        elif neurons == 'quiet':
            neurons = self.qn.values()
        else:
            neurons = self.n.values()
        dimi = {'x':0, 'y':1}[dim]
        p = [ n.pos[dimi] for n in neurons ] # all position values
        nbins = max(nbins, 2*intround(np.sqrt(self.nneurons)))
        n, p = np.histogram(p, bins=nbins) # p includes rightmost bin edge
        binwidth = p[1] - p[0] # take width of first bin in p

        if stats:
            mean = np.mean(p)
            median = np.median(p)
            argmode = n.argmax()
            mode = p[argmode] + binwidth / 2 # middle of tallest bin
            stdev = np.std(p)

        if a == None:
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
        else: # add to existing axes
            a.hold(True)
            f = pl.gcf()

        # use CLUSTERCOLOURDICT for familiarity with len 10 1-based id to colour mapping
        #color = CLUSTERCOLOURDICT[int(self.id)]
        color = 'k'

        # exclude rightmost bin edge in p
        a.bar(left=p[:-1], height=n, width=binwidth, bottom=0, color=color, ec=color,
              yerr=None, xerr=None, capsize=3)
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        a.set_title(titlestr)
        a.set_xlabel('neuron %s position (um)' % dim)
        a.set_ylabel('neuron count')

        if stats:
            # add stuff to top right of plot:
            uns = get_ipython().user_ns
            a.text(0.99, 0.99, 'mean = %.3f\n'
                               'median = %.3f\n'
                               'mode = %.3f\n'
                               'stdev = %.3f\n'
                               'minrate = %.2f Hz\n'
                               'nneurons = %d\n'
                               'dt = %d min'
                               % (mean, median, mode, stdev,
                                  uns['MINRATE'], self.nneurons, intround(self.dtmin)),
                               transform = a.transAxes,
                               horizontalalignment='right',
                               verticalalignment='top')
        f.tight_layout(pad=0.3) # crop figure to contents
        f.canvas.draw() # this is needed if a != None when passed as arg
        return a
Exemple #2
0
 def __init__(self, neuron=None, experiment=None, trange=None, nt=10):
     self.neuron = neuron
     self.experiment = experiment
     if trange == None:
         self.trange = self.experiment.trange
     else:
         self.trange = trange
     assert type(self.experiment.e) == Movie
     self.movie = self.experiment.e
     try:
         self.movie.frames # check if movie frames have been loaded from file
     except AttributeError:
         # Load as 3D array instead of as a list of 2D arrays, more convenient for
         # analysis, although will cause memory problems for really big (>1GB movies).
         # Don't flip the movie frames vertically for OpenGL's bottom left origin, since
         # we aren't using OpenGL for analysis:
         self.movie.load(asarray=True, flip=False)
     self.nt = nt # number of revcorr timepoints
     self.tis = range(0, nt, 1) # revcorr timepoint indices
     # revcorr timepoint values, stored in a list, not an array. Bad behaviour happens
     # during __eq__ below if attribs are numpy arrays cuz comparing numpy arrays returns
     # an array of booleans, not just a simple boolean:
     self.ts = [ intround(ti * self.movie.dynamic.sweepSec * 1000) for ti in self.tis ]
     self.ndinperframe = intround(self.movie.dynamic.sweepSec * 1000000 /
                                  self.experiment.REFRESHTIME)
     #self.movie.frames = np.asarray(self.movie.frames)
     self.width = self.movie.frames.shape[-1] # (nframes, height, width)
     self.height = self.movie.frames.shape[-2]
     self.done = False # hasn't yet successfully completed its calc() method
Exemple #3
0
    def get_blockranges(self, bs, bx):
        """Generate time ranges for slightly overlapping blocks of contiguous data that
        span self.trange, given blocksize and blockexcess"""
        stream = self.sort.stream
        bs = abs(bs)
        bx = abs(bx)
        if not self.trange[0] <= self.trange[1]: # not a forward search
            raise RuntimeError('backward detection not allowed')

        tranges = stream.tranges
        # pick out all tranges that overlap with self.trange
        trangesi = (self.trange[0] < tranges[:, 1]) & (tranges[:, 0] < self.trange[1])
        tranges = tranges[trangesi]

        blockranges = []
        for trange in tranges: # iterate over contiguous time ranges
            br = [] # list of blockranges for this trange
            # constrain in case self.trange falls within just one trange
            t0 = intround(max(trange[0], self.trange[0]))
            t1 = intround(min(trange[1], self.trange[1]))
            es = range(t0, t1, bs) # left edges of data blocks
            for e in es:
                br.append([e-bx, e+bs+bx]) # time range to give to .searchblock()
            br = np.asarray(br)
            # limit br to trange
            br[0, 0], br[-1, 1] = trange[0], trange[1]
            blockranges.append(br)

        blockranges = np.concatenate(blockranges)
        # limit blockranges to self.trange
        blockranges[0, 0], blockranges[-1, 1] = self.trange[0], self.trange[1]
        return np.asarray(blockranges)
Exemple #4
0
 def __init__(self, neuron=None, experiment=None, trange=None, nt=10):
     self.neuron = neuron
     self.experiment = experiment
     if trange == None:
         self.trange = self.experiment.trange
     else:
         self.trange = trange
     assert type(self.experiment.e) == Movie
     self.movie = self.experiment.e
     try:
         self.movie.frames  # check if movie frames have been loaded from file
     except AttributeError:
         # Load as 3D array instead of as a list of 2D arrays, more convenient for
         # analysis, although will cause memory problems for really big (>1GB movies).
         # Don't flip the movie frames vertically for OpenGL's bottom left origin, since
         # we aren't using OpenGL for analysis:
         self.movie.load(asarray=True, flip=False)
     self.nt = nt  # number of revcorr timepoints
     self.tis = range(0, nt, 1)  # revcorr timepoint indices
     # revcorr timepoint values, stored in a list, not an array. Bad behaviour happens
     # during __eq__ below if attribs are numpy arrays cuz comparing numpy arrays returns
     # an array of booleans, not just a simple boolean:
     self.ts = [
         intround(ti * self.movie.dynamic.sweepSec * 1000)
         for ti in self.tis
     ]
     self.ndinperframe = intround(self.movie.dynamic.sweepSec * 1000000 /
                                  self.experiment.REFRESHTIME)
     #self.movie.frames = np.asarray(self.movie.frames)
     self.width = self.movie.frames.shape[-1]  # (nframes, height, width)
     self.height = self.movie.frames.shape[-2]
     self.done = False  # hasn't yet successfully completed its calc() method
Exemple #5
0
    def get_blockranges(self, bs, bx):
        """Generate time ranges for slightly overlapping blocks of contiguous data that
        span self.trange, given blocksize and blockexcess"""
        stream = self.sort.stream
        bs = abs(bs)
        bx = abs(bx)
        if not self.trange[0] <= self.trange[1]:  # not a forward search
            raise RuntimeError('backward detection not allowed')

        tranges = stream.tranges
        # pick out all tranges that overlap with self.trange
        trangesi = (self.trange[0] < tranges[:, 1]) & (tranges[:, 0] <
                                                       self.trange[1])
        tranges = tranges[trangesi]

        blockranges = []
        for trange in tranges:  # iterate over contiguous time ranges
            br = []  # list of blockranges for this trange
            # constrain in case self.trange falls within just one trange
            t0 = intround(max(trange[0], self.trange[0]))
            t1 = intround(min(trange[1], self.trange[1]))
            es = range(t0, t1, bs)  # left edges of data blocks
            for e in es:
                br.append([e - bx, e + bs + bx
                           ])  # time range to give to .searchblock()
            br = np.asarray(br)
            # limit br to trange
            br[0, 0], br[-1, 1] = trange[0], trange[1]
            blockranges.append(br)

        blockranges = np.concatenate(blockranges)
        # limit blockranges to self.trange
        blockranges[0, 0], blockranges[-1, 1] = self.trange[0], self.trange[1]
        return np.asarray(blockranges)
Exemple #6
0
    def __call__(self, start, stop, chans=None):
        """Called when Stream object is called using (). start and stop indicate start and end
        timepoints in us wrt t=0. Returns the corresponding WaveForm object with just the
        specified chans"""
        if chans is None:
            chans = self.chans
        if not set(chans).issubset(self.chans):
            raise ValueError("requested chans %r are not a subset of available enabled "
                             "chans %r in %s stream" % (chans, self.chans, self.kind))
        nchans = len(chans)
        chanis = self.ADchans.searchsorted(chans)
        rawtres = self.rawtres # float us
        resample = self.sampfreq != self.rawsampfreq or self.shcorrect == True
        if resample:
            # excess data in us at either end, to eliminate interpolation distortion at
            # key.start and key.stop
            xs = KERNELSIZE * rawtres # float us
        else:
            xs = 0.0
        # stream limits, in sample indices:
        t0i = intround(self.t0 / rawtres)
        t1i = intround(self.t1 / rawtres)
        # get a slightly greater range of raw data (with xs) than might be needed:
        t0xsi = intfloor((start - xs) / rawtres) # round down to nearest mult of rawtres
        t1xsi = intceil((stop + xs) / rawtres) # round up to nearest mult of rawtres
        # stay within stream limits, thereby avoiding interpolation edge effects:
        t0xsi = max(t0xsi, t0i)
        t1xsi = min(t1xsi, t1i)
        # convert back to nearest float us:
        t0xs = t0xsi * rawtres
        t1xs = t1xsi * rawtres
        # these are slice indices, so don't add 1:
        ntxs = t1xsi - t0xsi # int
        tsxs = np.linspace(t0xs, t0xs+(ntxs-1)*rawtres, ntxs)
        #print('ntxs: %d' % ntxs)

        # slice out excess data on requested channels, init as int32 so we have bitwidth
        # to rescale and zero, convert to int16 later:
        dataxs = np.int32(self.wavedata[chanis, t0xsi:t1xsi])

        # bitshift left by 4 to scale 12 bit values to use full 16 bit dynamic range, same as
        # * 2**(16-12) == 16. This provides more fidelity for interpolation, reduces uV per
        # AD to about 0.02
        if self.bitshift:
            dataxs <<= self.bitshift # data is still int32 at this point

        # do any resampling if necessary:
        if resample:
            #tresample = time.time()
            dataxs, tsxs = self.resample(dataxs, tsxs, chans)
            #print('resample took %.3f sec' % (time.time()-tresample))

        # now trim down to just the requested time range:
        lo, hi = tsxs.searchsorted([start, stop])
        data = dataxs[:, lo:hi]
        ts = tsxs[lo:hi]

        # should be safe to convert back down to int16 now:
        data = np.int16(data)
        return WaveForm(data=data, ts=ts, chans=chans)
Exemple #7
0
 def load_stim_mat(self):
     stimd = loadmat(self.path, squeeze_me=True) # dict
     self.p = recarray2dict(stimd['p']) # stim parameters struct `p`, as a dict
     self.t0s = intround(stimd['stimONTimes'] * 1e6) # usec
     self.t1s = intround(stimd['stimOFFTimes'] * 1e6) # usec
     self.trange = self.t0s[0], self.t1s[-1]
     self.ttranges = np.vstack([self.t0s, self.t1s]).T
Exemple #8
0
 def load_stim_mat(self):
     stimd = loadmat(self.path, squeeze_me=True)  # dict
     self.p = recarray2dict(
         stimd['p'])  # stim parameters struct `p`, as a dict
     self.t0s = intround(stimd['stimONTimes'] * 1e6)  # usec
     self.t1s = intround(stimd['stimOFFTimes'] * 1e6)  # usec
     self.trange = self.t0s[0], self.t1s[-1]
     self.ttranges = np.vstack([self.t0s, self.t1s]).T
Exemple #9
0
 def load(self):
     with open(self.fname, 'rb') as f:
         d = np.load(f)
         stdnames = [
             'chanpos', 'chans', 'data', 't0', 't1', 'tres', 'uVperAD'
         ]
         optnames = ['chan0', 'probename']
         # bind standard array names in .lfp.zip file to self:
         for key in stdnames:
             assert key in d
             val = d[key]
             # pull some singleton vals out of their arrays:
             if key in ['t0', 't1', 'tres']:  # should all be us
                 val = int(val)
             elif key == 'uVperAD':
                 val = float(val)
             self.__setattr__(key, val)
         # bind optional array names in .lfp.zip file to self:
         for key in optnames:
             if key in d:
                 val = d[key]
                 # pull some singleton vals out of their arrays:
                 if key == 'chan0':
                     val = int(val)
                 elif key == 'probename':
                     val = val.tostring().decode(
                     )  # convert from bytes to py3 unicode str
                 self.__setattr__(key, val)
     try:
         self.chan0
     except AttributeError:  # try and figure out base of channel numbering
         nchans, nprobechans = len(self.chans), len(self.chanpos)
         if nchans < nprobechans:
             # it's probably from a .srf recording with only a subset of chans selected for
             # separate analog LFP recording
             self.chan0 = 0  # base of channel numbering, always 0-based for .srf recordings
             print(
                 "Found %d LFP channels, assuming 0-based channel numbering from .srf "
                 "recording" % nchans)
         elif nchans == nprobechans:  # all probe channels have LFP
             self.chan0 = min(self.chans)  # base of channel numbering
         else:  # nchans > nprobechans
             raise ValueError(
                 "don't know how to handle nchans=%d > nprobechans=%d" %
                 (nchans, nprobechans))
     assert self.chan0 in [0, 1]  # either 0- or 1-based
     # make sure chans are in vertical spatial order:
     ypos = self.chanpos[self.chans - self.chan0][:, 1]
     if not issorted(ypos):
         print("LFP chans in %s aren't sorted by depth, sorting them now" %
               self.fname)
         sortis = ypos.argsort()
         self.chans = self.chans[sortis]
         self.data = self.data[sortis]
         newypos = self.chanpos[self.chans - self.chan0][:, 1]
         assert issorted(newypos)
     self.sampfreq = intround(1e6 / self.tres)  # in Hz
     assert self.sampfreq == 1000  # should be 1000 Hz
     self.data = self.data * self.uVperAD  # convert to float uV
     self.UV2UM = 0.05  # transforms LFP voltage in uV to position in um
Exemple #10
0
 def export_dat(self, dt=None):
     """Export contiguous data packet to .dat file, in the original (ti, chani) order
     using same base file name in the same folder. dt is duration to export from start
     of recording, in sec"""
     if dt == None:
         nt = self.nt
         dtstr = ''
     else:
         nt = intround(dt * self.fileheader.sampfreq)
         dtstr = str(dt)
     assert self.is_open()
     nchanstotal = self.fileheader.nchanstotal
     nbytes = nt * nchanstotal * 2 # number of bytes requested, 2 bytes per datapoint
     offset = self.datapacket.dataoffset
     self.f.seek(offset)
     datbasefname = os.path.splitext(self.fname)[0]
     if dtstr == '':
         datfname = '%s.dat' % datbasefname
     else:
         datfname = '%s_%ss.dat' % (datbasefname, dtstr)
     fulldatfname = self.join(datfname)
     print('writing raw ephys data to %r' % fulldatfname)
     print('starting from dataoffset at %d bytes' % offset)
     with open(fulldatfname, 'wb') as datf:
         datf.write(self.f.read(nbytes))
     nbyteswritten = self.f.tell() - offset
     print('%d bytes written' % nbyteswritten)
     print('%d attempted, %d actual timepoints written' % (nt, nbyteswritten/nchanstotal/2))
     print('voltage gain: %g uV/AD' % self.fileheader.AD2uVx)
     print('sample rate: %d Hz' % self.fileheader.sampfreq)
     print('total number of chans: %d' % nchanstotal)
     print('total number of ephys chans: %d' % self.fileheader.nchans)
Exemple #11
0
 def plot(self, nbins=None, rate=False, figsize=(7.5, 6.5)):
     """style can be 'rate', but defaults to count"""
     if nbins == None:
         nbins = intround(np.sqrt(len(self.dts))) # good heuristic
     dts = self.dts / 1000 # in ms, converts to float64 array
     trange = self.trange / 1000 # in ms, converts to float64 array
     nbins = max(20, nbins) # enforce min nbins
     nbins = min(200, nbins) # enforce max nbins
     t = np.linspace(start=trange[0], stop=trange[1], num=nbins, endpoint=True)
     n = np.histogram(dts, bins=t, density=False)[0]
     binwidth = t[1] - t[0] # all should be equal width
     if rate: # normalize by binwidth and convert to float:
         n = n / float(binwidth)
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     a.bar(left=t[:-1], height=n, width=binwidth) # omit last right edge in t
     a.set_xlim(t[0], t[-1])
     a.set_xlabel('ISI (ms)')
     if rate:
         a.set_ylabel('spike rate (Hz)')
     else:
         a.set_ylabel('count')
     #a.set_title('n%d spikes relative to n%d spikes' % (self.n1.id, self.n0.id))
     title = lastcmd() + ', binwidth: %.2f ms' % binwidth
     a.set_title(title)
     gcfm().window.setWindowTitle(title)
     f.tight_layout(pad=0.3) # crop figure to contents
     self.f = f
     return self
Exemple #12
0
    def export_dat(self, dt=None):
        """Export contiguous data packet to .dat file, in the original (ti, chani) order
        using same base file name in the same folder. Also export companion .json metadata
        file. dt is duration to export from start of recording, in sec"""
        if dt == None:
            nt = self.nt
            dtstr = ''
        else:
            nt = intround(dt * self.fileheader.sampfreq)
            dtstr = str(dt)
        assert self.is_open()
        fh = self.fileheader
        nbytes = nt * fh.nchanstotal * 2  # number of bytes requested, 2 bytes per datapoint
        offset = self.datapacket.dataoffset
        self.f.seek(offset)
        basefname = os.path.splitext(self.fname)[0]
        if dtstr:
            basefname = '%s_%ss' % (basefname, dtstr)
        datfname = basefname + '.dat'
        jsonfname = datfname + '.json'
        fulldatfname = self.join(datfname)
        fulljsonfname = self.join(jsonfname)

        # export .dat file:
        print('writing raw ephys data to %r' % fulldatfname)
        print('starting from dataoffset at %d bytes' % offset)
        with open(fulldatfname, 'wb') as datf:
            datf.write(self.f.read(nbytes))
        nbyteswritten = self.f.tell() - offset
        print('%d bytes written' % nbyteswritten)
        print('%d attempted, %d actual timepoints written' %
              (nt, nbyteswritten / fh.nchanstotal / 2))

        # export companion .json metadata file:
        od = odict()
        fh = self.fileheader
        od['nchans'] = fh.nchanstotal
        od['sample_rate'] = fh.sampfreq
        od['dtype'] = 'int16'  # hard-coded, only dtype supported for now
        od['uV_per_AD'] = fh.AD2uVx
        od['chan_layout_name'] = self.hpstream.probe.name
        od['chans'] = list(fh.chans)
        od['aux_chans'] = list(fh.auxchans)
        od['nsamples_offset'] = self.t0i
        od['datetime'] = fh.datetime.isoformat()
        od['author'] = ''
        od['version'] = ''  # no way to extract Blackrock NSP version from .nsx?
        od['notes'] = fh.comment
        with open(fulljsonfname, 'w') as jsonf:
            ## TODO: make list fields not have a newline for each entry
            json.dump(od, jsonf, indent=0)  # write contents of odict to file
            jsonf.write('\n')  # end with a blank line
        print('wrote metadata file %r' % fulljsonfname)

        # print the important metadata:
        print('total chans: %d' % fh.nchanstotal)
        print('ephys chans: %d' % fh.nchans)
        print('sample rate: %d Hz' % fh.sampfreq)
        print('voltage gain: %g uV/AD' % fh.AD2uVx)
        print('chan layout: %s' % self.hpstream.probe.name)
Exemple #13
0
 def calc(self):
     """.t and .s attribs are commented out to save substantial memory"""
     self.t = []  # bin times
     #self.s = [] # relevant spike times, potentially shifted by self.shift
     self.c = []  # code values for each bin
     shift = intround(self.shift *
                      1000)  # convert self.shift in ms to int us
     for trange in self.tranges:
         # make the start of the timepoints be an even multiple of self.tres. Round down
         # to the nearest multiple. This way, timepoints will line up for different code
         # objects
         # left edge of first code bin:
         tstart = trange[0] - (trange[0] % self.tres)
         if self.phase:  # add phase offset relative to tstart
             tstart += self.phase / 360.0 * self.tres
         tstart = intround(tstart)  # keep it int
         tend = intround(trange[1])  # ditto
         # t sequence demarcates left bin edges, add extra tres to end to make t end
         # inclusive, keep 'em in us integers:
         t = np.arange(tstart, tend + self.tres,
                       self.tres)  # should come out as int64
         # get relevant spike times s, cut over originally specified trange, not from
         # start to end of newly generated code bin timepoints:
         lo, hi = self.spikes.searchsorted(trange)
         s = self.spikes[lo:hi] + shift
         c = np.empty(len(t), dtype=np.int8)  # init binary code array
         c[:] = self.codevals[0]  # init code to low value
         # searchsorted returns indices where s fits into t. Sometimes more than one
         # spike will fit into the same time bin, which means searchsorted will return
         # multiple occurences of the same index. You can set c at these indices to 1 a
         # multiple number of times, or more efficiently, do an np.unique on it to
         # only set each index to 1 once.
         # dec index by 1 so that you get indices that point to the most recent bin edge.
         # For each bin that has at least 1 spike in it, set its value to high:
         c[np.unique(t.searchsorted(s)) - 1] = self.codevals[1]
         self.t.append(t)
         #self.s.append(s)
         self.c.append(c)
     # horizontally concatenate results from each trange:
     self.t = np.hstack(self.t)
     #self.s = np.hstack(self.s)
     self.c = np.hstack(self.c)
     del self.spikes  # no need for spikes any more, save memory
Exemple #14
0
 def __init__(self, nids=None, experiment=None, trange=None, nt=10):
     self.experiment = experiment
     self.nids, self.neurons = self.parse_nids(nids)
     if trange == None:
         trange = self.experiment.trange
     self.trange = trange
     self.nt = nt # number of revcorr timepoints
     self.tis = range(0, nt, 1) # revcorr timepoint indices
     self.ts = [ intround(ti * self.experiment.e.dynamic.sweepSec * 1000)
                 for ti in self.tis ] # revcorr timepoint values, in ms
Exemple #15
0
 def load(self):
     with open(self.fname, 'rb') as f:
         d = np.load(f)
         stdnames = ['chanpos', 'chans', 'data', 't0', 't1', 'tres', 'uVperAD']
         optnames = ['chan0', 'probename']
         # bind standard array names in .lfp.zip file to self:
         for key in stdnames:
             assert key in d
             val = d[key]
             # pull some singleton vals out of their arrays:
             if key in ['t0', 't1', 'tres']: # should all be us
                 val = int(val)
             elif key == 'uVperAD':
                 val = float(val)
             self.__setattr__(key, val)
         # bind optional array names in .lfp.zip file to self:
         for key in optnames:
             if key in d:
                 val = d[key]
                 # pull some singleton vals out of their arrays:
                 if key == 'chan0':
                     val = int(val)
                 elif key == 'probename':
                     val = val.tostring().decode() # convert from bytes to py3 unicode str
                 self.__setattr__(key, val)
     try:
         self.chan0
     except AttributeError: # try and figure out base of channel numbering
         nchans, nprobechans = len(self.chans), len(self.chanpos)
         if nchans < nprobechans:
             # it's probably from a .srf recording with only a subset of chans selected for
             # separate analog LFP recording
             self.chan0 = 0 # base of channel numbering, always 0-based for .srf recordings
             print("Found %d LFP channels, assuming 0-based channel numbering from .srf "
                   "recording" % nchans)
         elif nchans == nprobechans: # all probe channels have LFP
             self.chan0 = min(self.chans) # base of channel numbering
         else: # nchans > nprobechans
             raise ValueError("don't know how to handle nchans=%d > nprobechans=%d" %
                              (nchans, nprobechans))
     assert self.chan0 in [0, 1] # either 0- or 1-based
     # make sure chans are in vertical spatial order:
     ypos = self.chanpos[self.chans - self.chan0][:, 1]
     if not issorted(ypos):
         print("LFP chans in %s aren't sorted by depth, sorting them now" % self.fname)
         sortis = ypos.argsort()
         self.chans = self.chans[sortis]
         self.data = self.data[sortis]
         newypos = self.chanpos[self.chans - self.chan0][:, 1]
         assert issorted(newypos)
     self.sampfreq = intround(1e6 / self.tres) # in Hz
     assert self.sampfreq == 1000 # should be 1000 Hz
     self.data = self.data * self.uVperAD # convert to float uV
     self.UV2UM = 0.05 # transforms LFP voltage in uV to position in um
Exemple #16
0
 def calc(self):
     """.t and .s attribs are commented out to save substantial memory"""
     self.t = [] # bin times
     #self.s = [] # relevant spike times, potentially shifted by self.shift
     self.c = [] # code values for each bin
     shift = intround(self.shift * 1000) # convert self.shift in ms to int us
     for trange in self.tranges:
         # make the start of the timepoints be an even multiple of self.tres. Round down
         # to the nearest multiple. This way, timepoints will line up for different code
         # objects
         # left edge of first code bin:
         tstart = trange[0] - (trange[0] % self.tres)
         if self.phase: # add phase offset relative to tstart
             tstart += self.phase / 360.0 * self.tres
         tstart = intround(tstart) # keep it int
         tend = intround(trange[1]) # ditto
         # t sequence demarcates left bin edges, add extra tres to end to make t end
         # inclusive, keep 'em in us integers:
         t = np.arange(tstart, tend+self.tres, self.tres) # should come out as int64
         # get relevant spike times s, cut over originally specified trange, not from
         # start to end of newly generated code bin timepoints:
         lo, hi = self.spikes.searchsorted(trange)
         s = self.spikes[lo:hi] + shift
         c = np.empty(len(t), dtype=np.int8) # init binary code array
         c[:] = self.codevals[0] # init code to low value
         # searchsorted returns indices where s fits into t. Sometimes more than one
         # spike will fit into the same time bin, which means searchsorted will return
         # multiple occurences of the same index. You can set c at these indices to 1 a
         # multiple number of times, or more efficiently, do an np.unique on it to
         # only set each index to 1 once.
         # dec index by 1 so that you get indices that point to the most recent bin edge.
         # For each bin that has at least 1 spike in it, set its value to high:
         c[np.unique(t.searchsorted(s)) - 1] = self.codevals[1]
         self.t.append(t)
         #self.s.append(s)
         self.c.append(c)
     # horizontally concatenate results from each trange:
     self.t = np.hstack(self.t)
     #self.s = np.hstack(self.s)
     self.c = np.hstack(self.c)
     del self.spikes # no need for spikes any more, save memory
Exemple #17
0
 def __init__(self, nids=None, experiment=None, trange=None, nt=10):
     self.experiment = experiment
     self.nids, self.neurons = self.parse_nids(nids)
     if trange == None:
         trange = self.experiment.trange
     self.trange = trange
     self.nt = nt  # number of revcorr timepoints
     self.tis = range(0, nt, 1)  # revcorr timepoint indices
     self.ts = [
         intround(ti * self.experiment.e.dynamic.sweepSec * 1000)
         for ti in self.tis
     ]  # revcorr timepoint values, in ms
Exemple #18
0
 def get_thresh(self):
     """Return array of thresholds in AD units, one per chan in self.chans,
     according to threshmethod and noisemethod"""
     self.fixedthresh = self.sort.converter.uV2AD(
         self.fixedthreshuV)  # convert to AD units
     if self.threshmethod == 'GlobalFixed':  # all chans have the same fixed threshold
         thresh = np.tile(self.fixedthresh, len(self.chans))
     elif self.threshmethod == 'ChanFixed':  # each chan has its own fixed threshold
         # randomly sample self.fixednoisewin's worth of data from self.trange in
         # blocks of self.blocksize, without replacement
         tload = time.time()
         print('loading data to calculate noise')
         if self.fixednoisewin >= abs(self.trange[1] - self.trange[0]):
             # sample width meets or exceeds search trange
             blockranges = [
                 self.trange
             ]  # use a single block of data, as defined by trange
         else:
             nblocks = intround(self.fixednoisewin / self.blocksize)
             blockranges = RandomBlockRanges(self.trange,
                                             bs=self.blocksize,
                                             bx=0,
                                             maxntranges=nblocks,
                                             replacement=False)
         # preallocating memory doesn't seem to help here, all the time is in loading
         # from stream:
         data = []
         for blockrange in blockranges:
             wave = self.sort.stream(blockrange[0], blockrange[1],
                                     self.chans)
             data.append(wave.data)
         data = np.concatenate(data, axis=1)  # int16 AD units
         info('loading data to calc noise took %.3f sec' %
              (time.time() - tload))
         tnoise = time.time()
         noise = self.get_noise(data)  # float AD units
         info('get_noise took %.3f sec' % (time.time() - tnoise))
         thresh = noise * self.noisemult  # float AD units
         thresh = np.int16(np.round(thresh))  # int16 AD units
         # clip so that all thresholds are at least fixedthresh
         thresh = thresh.clip(self.fixedthresh, thresh.max())
     elif self.threshmethod == 'Dynamic':
         # dynamic thresholds are calculated on the fly during the search, so leave
         # as zero for now
         thresh = np.zeros(len(self.chans), dtype=np.int16)
     else:
         raise ValueError
     return thresh
Exemple #19
0
 def load(self):
     with open(self.fname, 'rb') as f:
         d = np.load(f)
         assert sorted(d.keys()) == ['chanpos', 'chans', 'data', 't0', 't1', 'tres',
                                     'uVperAD']
         # bind arrays in .lfp.zip file to self:
         for key, val in d.iteritems():
             # pull some singleton vals out of their arrays:
             if key in ['t0', 't1', 'tres']: # should all be us
                 val = int(val)
             elif key == 'uVperAD':
                 val = float(val)
             self.__setattr__(key, val)
     # make sure chans are in vertical spatial order:
     assert issorted(self.chanpos[self.chans][1])
     self.sampfreq = intround(1e6 / self.tres) # in Hz
     assert self.sampfreq == 1000 # should be 1000 Hz
     self.data = self.data * self.uVperAD # convert to float uV
     self.UV2UM = 0.05 # transforms LFP voltage in uV to position in um
Exemple #20
0
 def get_thresh(self):
     """Return array of thresholds in AD units, one per chan in self.chans,
     according to threshmethod and noisemethod"""
     self.fixedthresh = self.sort.converter.uV2AD(self.fixedthreshuV) # convert to AD units
     if self.threshmethod == 'GlobalFixed': # all chans have the same fixed thresh
         thresh = np.tile(self.fixedthresh, len(self.chans))
     elif self.threshmethod == 'ChanFixed': # each chan has its own fixed thresh
         # randomly sample self.fixednoisewin's worth of data from self.trange in
         # blocks of self.blocksize, without replacement
         tload = time.time()
         print('loading data to calculate noise')
         if self.fixednoisewin >= abs(self.trange[1] - self.trange[0]):
             # sample width exceeds search trange
             blockranges = [self.trange] # use a single block of data, as defined by trange
         else:
             nblocks = intround(self.fixednoisewin / self.blocksize)
             blockranges = RandomBlockRanges(self.trange, bs=self.blocksize, bx=0,
                                             maxntranges=nblocks, replacement=False)
         # preallocating memory doesn't seem to help here, all the time is in loading
         # from stream:
         data = []
         for blockrange in blockranges:
             wave = self.sort.stream(blockrange[0], blockrange[1], self.chans)
             data.append(wave.data)
         data = np.concatenate(data, axis=1) # int16 AD units
         info('loading data to calc noise took %.3f sec' % (time.time()-tload))
         tnoise = time.time()
         noise = self.get_noise(data) # float AD units
         info('get_noise took %.3f sec' % (time.time()-tnoise))
         thresh = noise * self.noisemult # float AD units
         thresh = np.int16(np.round(thresh)) # int16 AD units
         # clip so that all threshes are at least fixedthresh
         thresh = thresh.clip(self.fixedthresh, thresh.max())
     elif self.threshmethod == 'Dynamic':
         # dynamic threshes are calculated on the fly during the search, so leave
         # as zero for now
         thresh = np.zeros(len(self.chans), dtype=np.int16)
     else:
         raise ValueError
     return thresh
Exemple #21
0
 def parse(self, f):
     # Record type 'L'
     self.UffType = f.read(1)
     # hack to skip next 7 bytes
     f.seek(7, 1)
     # Time stamp, 64 bit signed int
     self.TimeStamp, = unpack('q', f.read(8))
     # SURF major version number (2)
     self.SurfMajor, = unpack('B', f.read(1))
     # SURF minor version number (1)
     self.SurfMinor, = unpack('B', f.read(1))
     # hack to skip next 2 bytes
     f.seek(2, 1)
     # ADC/precision CT master clock frequency (1Mhz for DT3010)
     self.MasterClockFreq, = unpack('i', f.read(4))
     # undecimated base sample frequency per channel (25kHz)
     self.BaseSampleFreq, = unpack('i', f.read(4))
     # true (1) if Stimulus DIN acquired
     self.DINAcquired, = unpack('B', f.read(1))
     # hack to skip next byte
     f.seek(1, 1)
     # probe number
     self.Probe, = unpack('h', f.read(2))
     # =E,S,C for epochspike, spikestream, or continuoustype
     self.ProbeSubType = f.read(1)
     # hack to skip next byte
     f.seek(1, 1)
     # number of channels in the probe (54, 1)
     self.nchans, = unpack('h', f.read(2))
     # number of samples displayed per waveform per channel (25, 100)
     self.pts_per_chan, = unpack('h', f.read(2))
     # hack to skip next 2 bytes
     f.seek(2, 1)
     # {n/a to cat9} total number of samples per file buffer for this probe
     # (redundant with SS_REC.NumSamples) (135000, 100)
     self.pts_per_buffer, = unpack('i', f.read(4))
     # pts before trigger (7)
     self.trigpt, = unpack('h', f.read(2))
     # Lockout in pts (2)
     self.lockout, = unpack('h', f.read(2))
     # A/D board threshold for trigger (0-4096)
     self.threshold, = unpack('h', f.read(2))
     # A/D sampling decimation factor (1, 25)
     self.skippts, = unpack('h', f.read(2))
     # S:H delay offset for first channel of this probe (1)
     self.sh_delay_offset, = unpack('h', f.read(2))
     # hack to skip next 2 bytes
     f.seek(2, 1)
     # A/D sampling frequency specific to this probe (ie. after decimation,
     # if any) (25000, 1000)
     self.sampfreqperchan, = unpack('i', f.read(4))
     # us, store it here for convenience
     self.tres = intround(1 / float(self.sampfreqperchan) * 1e6)  # us
     # MOVE BACK TO AFTER SHOFFSET WHEN FINISHED WITH CAT 9!!! added May 21, 1999
     # only the first self.nchans are filled (5000), the rest are junk values that pad to 64 channels
     self.extgain = np.asarray(
         unpack('H' * self.SURF_MAX_CHANNELS,
                f.read(2 * self.SURF_MAX_CHANNELS)))
     # throw away the junk values
     self.extgain = self.extgain[:self.nchans]
     # A/D board internal gain (1,2,4,8) <--MOVE BELOW extgain after finished with CAT9!!!!!
     self.intgain, = unpack('h', f.read(2))
     # (0 to 53 for highpass, 54 to 63 for lowpass, + junk values that pad
     # to 64 channels) v1.0 had ADchanlist to be an array of 32 ints.  Now it
     # is an array of 64, so delete 32*4=128 bytes from end
     self.ADchanlist = unpack('h' * self.SURF_MAX_CHANNELS,
                              f.read(2 * self.SURF_MAX_CHANNELS))
     # throw away the junk values
     self.ADchanlist = np.asarray(self.ADchanlist[:self.nchans])
     # hack to skip next byte
     f.seek(1, 1)
     # ShortString (uMap54_2a, 65um spacing)
     self.probe_descrip = f.read(255).rstrip(NULL)
     # hack to skip next byte
     f.seek(1, 1)
     # ShortString (uMap54_2a)
     self.electrode_name = f.read(255).rstrip(NULL)
     # hack to skip next 2 bytes
     f.seek(2, 1)
     # MOVE BELOW ADCHANLIST FOR CAT 9
     # v1.0 had ProbeWinLayout to be 4*32*2=256 bytes, now only 4*4=16 bytes, so add 240 bytes of pad
     self.probewinlayout = ProbeWinLayout()
     self.probewinlayout.parse(f)
     # array[0..879 {remove for cat 9!!!-->}- 4{pts_per_buffer} - 2{SHOffset}] of BYTE;
     # {pad for future expansion/modification}, no need to save it, skip it instead
     f.seek(880 - 4 - 2, 1)
     # hack to skip next 6 bytes, or perhaps pad should be 4+2 longer
     f.seek(6, 1)
from psth_funcs import get_psth_peaks_simple, plot_psth

rec = ptc22.tr1.r08
strange = REC2STATETRANGES[rec.absname][1] # r08 synched, us
nids = [5, 23, 24] # 3 example inactive yet responsive nids in ptc22.tr1.r08

EPS = np.spacing(1) # epsilon, smallest representable non-zero number

BINW, TRES = 0.02, 0.0001 # PSTH time bins, sec
# 2.5 Hz thresh is 1 spike in the same 20 ms wide bin every 20 trials, assuming 0 baseline:
MINTHRESH = 3 # peak detection thresh, Hz
MEDIANX = 2 # PSTH median multiplier, Hz
FWFRACTION = 0.5 # full width fraction of max
WIDTHMAX = 200 # maximum width, ms
WIDTHMAXPOINTS = intround(WIDTHMAX / 1000 / TRES) # maximum width, number of PSTH timepoints

# plotting params:
PLOTPSTH = True
FIGSIZE = 3.14, 2
YMAX = 6 # Hz
YTICKS = 0, 3, 6
MS = 5

t, psths, spikets = rec.psth(nids=nids, natexps=False, strange=strange, plot=False,
                             binw=BINW, tres=TRES, norm='ntrials')

psthparams = {} # params returned for each PSTH by get_psth_peaks
for nid, psth in zip(nids, psths):
    psthparams[nid] = get_psth_peaks_simple(t, psth, nid, WIDTHMAXPOINTS, minthresh=MINTHRESH,
                                            medianx=MEDIANX, fwfraction=FWFRACTION)
Exemple #23
0
"""Calculate MUA coupling and its effect on natscene response reliability and precision. Run
from within neuropy using `run -i scripts/MUA_coupling.py`"""

from scipy.stats import mannwhitneyu, linregress

import core
from core import sparseness, intround, ceilsigfig, floorsigfig

from psth_funcs import get_nids_psths

BLANK = False  # consider blank periods between trials?
WEIGHT = False  # weight trials by spike count for reliability measure?
BINW, TRES = 0.02, 0.001  # PSTH and MUA time bins, sec
BINWMS = '%dms' % intround(BINW * 1000)
GAUSS = True  # calculate PSTH and single trial rates by convolving with Gaussian kernel?
if GAUSS:
    KERNEL = 'gauss'
else:
    KERNEL = 'square'
KIND = 'responsive'  # which type of neurons to use? 'responsive' or 'active'
MEDIANX = 2  # PSTH median multiplier, Hz
MINTHRESH = 3  # peak detection thresh, Hz

# plotting params:
LOGNULLREL = -3
NULLREL = 10**LOGNULLREL
FIGSIZE = 3, 3
COUPMIN, COUPMAX, COUPBINW = -0.4, 1, 0.1
coupbins = np.arange(COUPMIN, COUPMAX + COUPBINW,
                     COUPBINW)  # left edges + rightmost edge
couprange = np.asarray([coupbins[0], coupbins[-1]])
Exemple #24
0
    def specgram(self, t0=None, t1=None, f0=0.1, f1=100, p0=-60, p1=None, chanis=-1,
                 width=None, tres=None, cm='jet', colorbar=False,
                 showstates=False, lw=4, alpha=1, relative2t0=False, lim2stim=False,
                 title=True, reclabel=True, swapaxes=False, figsize=None):
        """Plot a spectrogram from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values
        from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most
        superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of
        specified chanis. width and tres are in sec. As an alternative to cm.jet (the
        default), cm.gray, cm.hsv cm.terrain, and cm.cubehelix_r colormaps seem to bring out
        the most structure in the spectrogram. showstates controls whether to plot lines
        demarcating desynchronized and synchronized periods. relative2t0 controls whether to
        plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range
        only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din"""
        uns = get_ipython().user_ns
        self.get_data()
        ts = self.get_tssec() # full set of timestamps, in sec
        if t0 == None:
            t0, t1 = ts[0], ts[-1] # full duration
        if t1 == None:
            t1 = t0 + 10 # 10 sec window
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        dt = t1 - t0
        if width == None:
            width = uns['LFPSPECGRAMWIDTH'] # sec
        if tres == None:
            tres = uns['LFPSPECGRAMTRES'] # sec
        assert tres <= width
        NFFT = intround(width * self.sampfreq)
        noverlap = intround(NFFT - tres * self.sampfreq)
        t0i, t1i = ts.searchsorted((t0, t1))
        #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
        data = self.data[:, t0i:t1i] # slice data
        if figsize == None:
            # convert from recording duration time to width in inches, 0.87 accommodates
            # padding around the specgram:
            figwidth = (dt / 1000) * 5 + 0.87
            figheight = 2.5 # inches
            figsize = figwidth, figheight
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if iterable(chanis):
            data = data[chanis].mean(axis=0) # take mean of data on chanis
        else:
            data = data[chanis] # get single row of data at chanis
        #data = filter.notch(data)[0] # remove 60 Hz mains noise
        # convert data from uV to mV, returned t is midpoints of time bins in sec from
        # start of data. I think P is in mV^2?:
        P, freqs, t = mpl.mlab.specgram(data/1e3, NFFT=NFFT, Fs=self.sampfreq,
                                        noverlap=noverlap)
        if not relative2t0:
            t += t0 # convert t to time from start of ADC clock:
        # keep only freqs between f0 and f1:
        if f0 == None:
            f0 = freqs[0]
        if f1 == None:
            f1 = freqs[-1]
        df = f1 - f0
        lo, hi = freqs.searchsorted([f0, f1])
        P, freqs = P[lo:hi], freqs[lo:hi]
        # check for and replace zero power values (ostensibly due to gaps in recording)
        # before attempting to convert to dB:
        zis = np.where(P == 0.0) # row and column indices where P has zero power
        if len(zis[0]) > 0: # at least one hit
            P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float
            minnzval = P.min() # get minimum nonzero value
            P[zis] = minnzval # replace with min nonzero values
        P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2?
        # for better visualization, clip power values to within (p0, p1) dB
        if p0 != None:
            P[P < p0] = p0
        if p1 != None:
            P[P > p1] = p1
        #self.P = P

        # plot horizontal bars over time demarcating different ranges of SI values,
        # or manually defined desynched and synched periods:
        statelinepos = f0 - df*0.015 # plot horizontal bars just below x axis
        if showstates:
            if showstates in [True, 'auto']:
                print("TODO: there's an offset plotting bug for 'auto', compare with 'manual'")
                si, t = self.si(plot=False)
                stranges, states = self.si_split(si, t) # sec
                STATECOLOURS = uns['LFPPRBINCOLOURS']
            elif showstates == 'manual':
                stranges, states = [], []
                for state in uns['MANUALSTATES']:
                    for strange in uns['REC2STATE2TRANGES'][self.r.absname][state]:
                        stranges.append(strange)
                        states.append(state)
                stranges = np.vstack(stranges) # 2D array
                STATECOLOURS = uns['MANUALSTATECOLOURS']
            else:
                raise ValueError('invalid value showstates=%r' % showstates)
            # clip stranges to t0, t1:
            stranges[0, 0] = max(stranges[0, 0], t0)
            stranges[-1, 1] = min(stranges[-1, 1], t1)
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            for strange, state in zip(stranges, states):
                clr = STATECOLOURS[state]
                lines(statelinepos, strange[0], strange[1], colors=clr, lw=lw, alpha=alpha,
                      clip_on=False)

        # Label far left, right, top and bottom edges of imshow image. imshow interpolates
        # between these to place the axes ticks. Time limits are
        # set from midpoints of specgram time bins
        extent = t[0], t[-1], freqs[0], freqs[-1]
        #print('specgram extent: %r' % (extent,))
        # flip P vertically for compatibility with imshow:
        im = a.imshow(P[::-1], extent=extent, cmap=cm)
        a.autoscale(enable=True, tight=True)
        a.axis('tight')
        # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started:
        a.set_xlim(xmin=0, xmax=t[-1])
        a.set_ylim(ymin=freqs[0], ymax=freqs[-1])
        # turn off annoying "+2.41e3" type offset on x axis:
        formatter = mpl.ticker.ScalarFormatter(useOffset=False)
        a.xaxis.set_major_formatter(formatter)
        a.set_xlabel("time (s)")
        a.set_ylabel("frequency (Hz)")
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        if title:
            a.set_title(titlestr)
        if reclabel:
            a.text(0.994, 0.95, '%s' % self.r.absname, color='w', transform=a.transAxes,
                   horizontalalignment='right', verticalalignment='top')
        f.tight_layout(pad=0.3) # crop figure to contents
        if colorbar:
            f.colorbar(im, pad=0) # creates big whitespace to the right for some reason
        self.f = f
        return P, freqs, t
Exemple #25
0
F0, F1 = 0.2, 110 # Hz
P0, P1 = None, None
chanis = -1
width, tres = 10, 5 # sec
figsize = (3.5, 3.5)
XSCALE = 'linear'

if width == None: # window width
    width = LFPWIDTH # sec
if tres == None: # window tres
    tres = LFPTRES # sec
assert tres <= width

SAMPFREQ = 1000 # Hz, should be the same for all LFPs
NFFT = intround(width * SAMPFREQ)
NOVERLAP = intround(NFFT - tres * SAMPFREQ)

def plot_psd(data, titlestr):
    data = filter.notch(data)[0] # remove 60 Hz mains noise, as for SI calc
    # convert data from uV to mV. I think P is in mV^2?:
    P, freqs = mpl.mlab.psd(data/1e3, NFFT=NFFT, Fs=SAMPFREQ, noverlap=NOVERLAP)
    # keep only freqs between F0 and F1:
    f0, f1 = F0, F1 # need to set different local names, since they're not read-only
    if f0 == None:
        f0 = freqs[0]
    if f1 == None:
        f1 = freqs[-1]
    lo, hi = freqs.searchsorted([f0, f1])
    P, freqs = P[lo:hi], freqs[lo:hi]
    # check for and replace zero power values (ostensibly due to gaps in recording)
Exemple #26
0
    def check_wave(self, wave, cutrange):
        """Check which threshold-exceeding peaks in wave data look like spikes
        and return only events that fall within cutrange. Search local spatiotemporal
        window around threshold-exceeding peak for biggest peak-to-peak sharpness.
        Finally, test that the sharpest peak and its neighbour exceed Vp and Vpp thresholds"""
        sort = self.sort
        AD2uV = sort.converter.AD2uV
        if self.extractparamsondetect:
            weights2f = sort.extractor.weights2spatial
            f = sort.extractor.f
        # holds time indices for each enabled chan until which each enabled chani is
        # locked out, updated on every found spike
        lockouts = np.zeros(self.nchans, dtype=np.int64)

        tsharp = time.time()
        sharp = util.sharpness2D(wave.data) # sharpness of all zero-crossing separated peaks
        info('%s: sharpness2D() took %.3f sec' % (ps().name, time.time()-tsharp))
        targthreshsharp = time.time()
        # threshold-exceeding peak indices (2D, columns are [tis, cis])
        peakis = util.argthreshsharp(wave.data, self.thresh, sharp)
        info('%s: argthreshsharp() took %.3f sec' % (ps().name, time.time()-targthreshsharp))

        maxti = len(wave.ts) - 1
        dti = self.dti
        twi = sort.twi
        sdti = dti // 2 # spatial dti: max dti allowed between maxchan and all other chans
        nspikes = 0
        npeaks = len(peakis)
        spikes = np.zeros(npeaks, self.SPIKEDTYPE) # nspikes will always be <= npeaks
        ## TODO: test whether np.empty or np.zeros is faster overall in this case
        wavedata = np.empty((npeaks, self.maxnchansperspike, self.maxnt), dtype=np.int16)
        # check each threshold-exceeding peak for validity:
        for peaki, (ti, chani) in enumerate(peakis):
            if DEBUG: self.log('*** trying thresh peak at t=%r chan=%d'
                               % (wave.ts[ti], self.chans[chani]))

            # is this threshold-exceeding peak locked out?
            tlockoutchani = lockouts[chani]
            if ti <= tlockoutchani:
                if DEBUG: self.log('peak is locked out')
                continue # skip to next peak

            # find all enabled chanis within inclnbh of chani, lockouts are checked later:
            chanis = self.inclnbhdi[chani]
            nchans = len(chanis)

            # get search window DT on either side of this peak, for checking sharpness
            t0i = max(ti-dti, 0) # check for lockouts a bit later
            t1i = ti+dti+1 # +1 makes it end inclusive, don't worry about slicing past end
            window = wave.data[chanis, t0i:t1i] # search window, might not be contig
            if DEBUG: self.log('searching window (%d, %d) on chans=%r'
                               % (wave.ts[t0i], wave.ts[t1i], list(self.chans[chanis])))

            # Collect peak-to-peak sharpness for all chans. Save max and adjacent sharpness
            # timepoints for each chan, and keep track of which of the two adjacent non locked
            # out peaks is the sharpest. Note that the localsharp array contain sharpness of
            # all local peaks, not just those that exceed threshold, as in peakis array.
            localsharp = sharp[chanis, t0i:t1i] # sliced the same way as window
            ppsharp = np.zeros(nchans, dtype=np.float32)
            maxsharpis = np.zeros(nchans, dtype=int)
            adjpeakis = np.zeros((nchans, 2), dtype=int)
            maxadjiis = np.zeros(nchans, dtype=int)
            continuepeaki = False # signal to skip to next peaki
            for cii in range(nchans):
                localpeakis, = np.where(localsharp[cii] != 0.0)
                # keep only non-locked out localpeakis on this channel:
                localpeakis = localpeakis[(t0i+localpeakis) > lockouts[chanis[cii]]]
                if len(localpeakis) == 0:
                    continue # localpeakis is empty
                lastpeakii = len(localpeakis) - 1
                maxsharpii = abs(localsharp[cii, localpeakis]).argmax()
                maxsharpi = localpeakis[maxsharpii]
                maxsharpis[cii] = maxsharpi
                # Get one adjacent peak to left and right each. Due to limits, either or
                # both may be identical to the max sharpness peak
                adjpeakis[cii] = localpeakis[[max(maxsharpii-1, 0), min(maxsharpii+1,
                                              lastpeakii)]]
                if localsharp[cii, maxsharpi] < 0:
                    maxadjii = localsharp[cii, adjpeakis[cii]].argmax() # look for +ve adj peak
                else:
                    maxadjii = localsharp[cii, adjpeakis[cii]].argmin() # look for -ve adj peak
                maxadjiis[cii] = maxadjii # save
                adjpi = adjpeakis[cii, maxadjii]
                if maxsharpi != adjpi:
                    ppsharp[cii] = localsharp[cii, maxsharpi] - localsharp[cii, adjpi]
                else: # monophasic spike, set ppsharp == sharpness of single peak:
                    ppsharp[cii] = localsharp[cii, maxsharpi]
                    if chanis[cii] == chani: # trigger chan is monophasic
                        # ensure ppsharp of monophasic spike >= Vppthresh**2/dt, ie ensure that
                        # its Vpp exceeds Vppthresh and has zero crossings on either side,
                        # with no more than dt between. Avoids excessively wide
                        # monophasic peaks from being considered as spikes:
                        if DEBUG: self.log("found monophasic spike")
                        if abs(ppsharp[cii]) < self.ppthresh[chani]**2 / dti:
                            continuepeaki = True
                            if DEBUG: self.log("peak wasn't sharp enough for a monophasic "
                                               "spike")
                            break # out of cii loop

            if continuepeaki:
                continue # skip to next peak

            # Choose chan with biggest ppsharp as maxchan and its sharpest peak as the primary
            # peak, check that these new chani and ti values are identical to the trigger
            # values in peakis, that the peak at [chani, ti] isn't locked out, that it falls
            # within cutrange, and that it meets both Vp and Vpp threshold criteria.

            oldchani, oldti = chani, ti # save
            maxcii = abs(ppsharp).argmax() # choose chan with sharpest peak as new maxchan
            chani = chanis[maxcii] # update maxchan
            maxsharpi = maxsharpis[maxcii] # choose sharpest peak of maxchan, absolute
            ti = t0i + maxsharpi # update ti

            # Search forward through peakis for a future (later) row that matches the
            # (potentially new) [chani, ti] calculated above based on sharpness of local
            # peaks. If that particular tuple is indeed coming up, it is therefore
            # thresh exceeding, and should be waited for. If not, don't wait for it. Something
            # that was thresh exceeding caused the trigger, but this nearby [chani, ti] tuple
            # is according to the sharpness measure the best estimate of the spatiotemporal
            # origin of the trigger-causing event.
            newpeak_coming_up = (peakis[peaki+1:] == [ti, chani]).prod(axis=1).any()
            if chani != oldchani:
                if newpeak_coming_up:
                    if DEBUG:
                        self.log("triggered off peak on chan that isn't max ppsharpness for "
                                 "this event, pass on this peak and wait for the true "
                                 "sharpest peak to come later")
                    continue # skip to next peak
                else:
                    # update all variables that depend on chani that wouldn't otherwise be
                    # updated:
                    tlockoutchani = lockouts[chani]
                    chanis = self.inclnbhdi[chani]
                    nchans = len(chanis)

            if ti > oldti:
                if newpeak_coming_up:
                    if DEBUG:
                        self.log("triggered off early adjacent peak for this event, pass on "
                                 "this peak and wait for the true sharpest peak to come later")
                    continue # skip to next peak
                else:
                    # unlike chani, it seems that are no variables that depend on ti that
                    # wouldn't otherwise be updated:
                    pass

            if ti <= tlockoutchani: # sharpest peak is locked out
                if DEBUG: self.log('sharpest peak at t=%d chan=%d is locked out'
                                   % (wave.ts[ti], self.chans[chani]))
                continue # skip to next peak

            if not (cutrange[0] <= wave.ts[ti] <= cutrange[1]):
                # use %r since wave.ts[ti] is np.int64 and %d gives TypeError if > 2**31:
                if DEBUG: self.log("spike time %r falls outside cutrange for this searchblock "
                                   "call, discarding" % wave.ts[ti])
                continue # skip to next peak

            # check that Vp threshold is exceeded by at least one of the two sharpest peaks
            adjpi = adjpeakis[maxcii, maxadjiis[maxcii]]
            # relative to t0i, not necessarily in temporal order:
            maxchantis = np.array([maxsharpi, adjpi])
            # voltages of the two sharpest peaks, convert int16 to int64 to prevent overflow
            Vs = np.int64(window[maxcii, maxchantis])
            Vp = abs(Vs).max() # grab biggest peak
            if Vp < self.thresh[chani]:
                if DEBUG: self.log('peak at t=%d chan=%d and its adjacent peak are both '
                                   '< Vp=%f uV' % (wave.ts[ti], self.chans[chani], AD2uV(Vp)))
                continue # skip to next peak
            # check that the two sharpest peaks together exceed Vpp threshold:
            Vpp = abs(Vs[0] - Vs[1]) # Vs are of opposite sign, unless monophasic
            if Vpp == 0: # monophasic spike
                Vpp = Vp # use Vp as Vpp
            
            if Vpp < self.ppthresh[chani]:
                if DEBUG: self.log('peaks at t=%r chan=%d are < Vpp = %f'
                                   % (wave.ts[[ti, t0i+adjpi]], self.chans[chani], AD2uV(Vpp)))
                continue # skip to next peak

            if DEBUG: self.log('found biggest thresh exceeding ppsharp at t=%d chan=%d'
                               % (wave.ts[ti], self.chans[chani]))

            # get new spatiotemporal neighbourhood, with full window,
            # align to -ve of the two sharpest peaks
            aligni = localsharp[maxcii, maxchantis].argmin()
            #oldti = ti # save
            ti = t0i + maxchantis[aligni] # new absolute time index to align to
            # cut new window
            oldt0i = t0i
            t0i = max(ti+twi[0], 0)
            t1i = min(ti+twi[1]+1, maxti) # end inclusive
            window = wave.data[chanis, t0i:t1i] # multichan data window, might not be contig
            maxcii, = np.where(chanis == chani)
            maxchantis += oldt0i - t0i # relative to new t0i
            tis = np.zeros((nchans, 2), dtype=int) # holds time indices for each lockchani
            tis[maxcii] = maxchantis

            # pick corresponding peaks on other chans according to how close they are
            # to those on maxchan, Don't consider the sign of the peaks on each
            # chan, just their proximity in time. In other words, allow for spike
            # inversion across space
            localsharp = sharp[chanis, t0i:t1i]
            peak0ti, peak1ti = maxchantis # primary and 2ndary peak tis of maxchan
            for cii in range(nchans):
                if cii == maxcii: # already set
                    continue
                localpeakis, = np.where(localsharp[cii] != 0.0)
                # keep only non-locked out localpeakis on this channel:
                localpeakis = localpeakis[(t0i+localpeakis) > lockouts[chanis[cii]]]
                if len(localpeakis) == 0: # localpeakis is empty
                    tis[cii] = maxchantis # use same tis as maxchan
                    continue
                lastpeakii = len(localpeakis) - 1
                # find peak on this chan that's temporally closest to primary peak on maxchan.
                # If two peaks are equally close, pick the sharpest one
                dt0is = abs(localpeakis-peak0ti)
                if (np.diff(dt0is) == 0).any(): # two peaks equally close, pick sharpest one
                    peak0ii = abs(localsharp[cii, localpeakis]).argmax()
                else:
                    peak0ii = dt0is.argmin()
                # save primary peak for this cii
                dt0i = dt0is[peak0ii]
                if dt0i > sdti: # too distant in time
                    tis[cii, 0] = peak0ti # use same t0i as maxchan
                else: # give it its own t0i
                    tis[cii, 0] = localpeakis[peak0ii]
                # save 2ndary peak for this cii
                if len(localpeakis) == 1: # monophasic, set 2ndary peak same as primary
                    tis[cii, 1] = tis[cii, 0]
                    continue
                if peak0ti <= peak1ti: # primary peak comes first (more common case)
                    peak1ii = min(peak0ii+1, lastpeakii) # 2ndary peak is 1 to the right
                else: # peak1ti < peak0ti, ie 2ndary peak comes first
                    peak1ii = max(peak0ii-1, 0) # 2ndary peak is 1 to the left
                dt1is = abs(localpeakis-peak1ti)
                dt1i = dt1is[peak1ii]
                if dt1i > sdti: # too distant in time
                    tis[cii, 1] = peak1ti # use same t1i as maxchan
                else:
                    tis[cii, 1] = localpeakis[peak1ii]

            # based on maxchan (chani), find inclchanis, incltis, and inclwindow:
            inclchanis = self.inclnbhdi[chani]
            ninclchans = len(inclchanis)
            inclchans = self.chans[inclchanis]
            chan = self.chans[chani]
            inclchani = int(np.where(inclchans == chan)[0]) # != chani!
            inclciis = chanis.searchsorted(inclchanis)
            incltis = tis[inclciis]
            inclwindow = window[inclciis]

            if DEBUG: self.log("final window params: t0=%r, t1=%r, Vs=%r, peakts=\n%r"
                               % (wave.ts[t0i], wave.ts[t1i], list(AD2uV(Vs)),
                                  wave.ts[t0i+tis]))

            if self.extractparamsondetect:
                # Get Vpp at each inclchan's tis, use as spatial weights:
                # see core.rowtake() or util.rowtake_cy() for indexing explanation:
                w = np.float32(inclwindow[np.arange(ninclchans)[:, None], incltis])
                w = abs(w).sum(axis=1)
                x = self.siteloc[inclchanis, 0] # 1D array (row)
                y = self.siteloc[inclchanis, 1]
                params = weights2f(f, w, x, y, inclchani)
                if params == None: # presumably a non-localizable many-channel noise event
                    if DEBUG:
                        treject = intround(wave.ts[ti]) # nearest us
                        self.log("reject spike at t=%d based on fit params" % treject)
                    # no real need to lockout chans for a params-rejected spike
                    continue # skip to next peak

            # build up spike record:
            s = spikes[nspikes]
            # wave.ts might be floats, depending on sampfreq
            s['t'] = intround(wave.ts[ti]) # nearest us
            # leave each spike's chanis in sorted order, as they are in self.inclnbhdi,
            # important assumption used later on, like in sort.get_wave() and
            # Neuron.update_wave()
            ts = wave.ts[t0i:t1i] # potentially floats
            # use ts = np.arange(s['t0'], s['t1'], stream.tres) to reconstruct
            s['t0'], s['t1'] = intround(wave.ts[t0i]), intround(wave.ts[t1i]) # nearest us
            s['tis'][:ninclchans] = incltis # wrt t0i=0
            s['aligni'] = aligni # 0 or 1
            s['dt'] = intround(abs(ts[tis[maxcii, 0]] - ts[tis[maxcii, 1]])) # nearest us
            s['V0'], s['V1'] = AD2uV(Vs) # in uV
            s['Vpp'] = AD2uV(Vpp) # in uV
            s['chan'], s['chans'][:ninclchans], s['nchans'] = chan, inclchans, ninclchans
            s['chani'] = inclchani
            nt = inclwindow.shape[1] # isn't always full width if recording has gaps
            wavedata[nspikes, :ninclchans, :nt] = inclwindow

            if self.extractparamsondetect:
                # Save spatial fit params, and lockout only the channels within lockrx*sx
                # of the fit spatial location of the spike, up to a max of self.inclr.
                s['x0'], s['y0'], s['sx'], s['sy'] = params
                x0, y0 = s['x0'], s['y0']
                # lockout radius for this spike:
                lockr = min(self.lockrx*s['sx'], self.inclr) # in um
                # test y coords of inclchans in y array, ylockchaniis can be used to index
                # into x, y and inclchans:
                ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int
                # test Euclid distance from x0, y0 for each ylockchani:
                lockchaniis = ylockchaniis.copy()
                for ylockchanii in ylockchaniis:
                    if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:
                        lockchaniis = np.delete(lockchaniis, ylockchanii) # dist is too great
                lockchans = inclchans[lockchaniis]
                lockchanis = inclchanis[lockchaniis]
                nlockchans = len(lockchans)
                s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans
                # just for testing:
                #assert (lockchanis == self.chans.searchsorted(lockchans)).all()
                #assert (lockchaniis == chanis.searchsorted(lockchanis)).all()
            else: # in this case, the inclchans and lockchans fields are redundant
                s['lockchans'][:ninclchans], s['nlockchans'] = inclchans, ninclchans
                lockchanis = chanis
                lockchaniis = np.arange(ninclchans)

            # give each chan a distinct lockout, based on how each chan's
            # sharpest peaks line up with those of the maxchan. Respect existing lockouts:
            # on each of the relevant chans, keep whichever lockout ends last
            thislockout = t0i+tis.max(axis=1)[lockchaniis]
            lockouts[lockchanis] = np.max([lockouts[lockchanis], thislockout], axis=0)

            if DEBUG:
                self.log('lockouts=%r\nfor chans=%r' %
                        (list(wave.ts[lockouts[lockchanis]]),
                         list(self.chans[lockchanis])))
                self.log('*** found new spike %d: t=%d chan=%d (%d, %d)' %
                        (nspikes+self.nspikes, s['t'], chan, self.siteloc[chani, 0],
                         self.siteloc[chani, 1]))
            nspikes += 1

        # trim spikes and wavedata arrays down to size
        spikes.resize(nspikes, refcheck=False)
        wds = wavedata.shape
        wavedata.resize((nspikes, wds[1], wds[2]), refcheck=False)
        return spikes, wavedata
    screenheight = e0.I['SCREENHEIGHTCM'] * e0.I['DEGPERCM']
    halfscreenwidth, halfscreenheight = screenwidth / 2, screenheight / 2
    moviewidth, movieheight = e0.s.widthDeg, e0.s.heightDeg  # deg
    halfmoviewidth, halfmovieheight = moviewidth / 2, movieheight / 2
    # manbar center relative to screen center:
    try:
        xorigDeg, yorigDeg = e0.s.xorigDeg, e0.s.yorigDeg
    except AttributeError:
        xorigDeg, yorigDeg = ORIGDEGS[name]
    # movie center position, wrt screen center, deg:
    xpos, ypos = e0.d.xposDeg + xorigDeg, e0.d.yposDeg + yorigDeg
    # x and y indices into frames, spanning range of movie pixels that were on-screen,
    # assumes (x,y) origin of each movie frame is at bottom left:
    mvicenter_wrt_leftscredge = halfscreenwidth + xpos
    leftscredge_wrt_leftmviedge = halfmoviewidth - mvicenter_wrt_leftscredge
    x0i = intround(leftscredge_wrt_leftmviedge / degpermoviepix)
    x1i = intround(
        (leftscredge_wrt_leftmviedge + screenwidth) / degpermoviepix)
    mvicenter_wrt_bottomscredge = halfscreenheight + ypos
    bottomscredge_wrt_bottommviedge = halfmovieheight - mvicenter_wrt_bottomscredge
    y0i = intround(bottomscredge_wrt_bottommviedge / degpermoviepix)
    y1i = intround(
        (bottomscredge_wrt_bottommviedge + screenheight) / degpermoviepix)
    frames = frames[:, y0i:y1i, x0i:x1i]
    print('xis: %d:%d, yis: %d:%d' % (y0i, y1i, x0i, x1i))
    print('movie shape:', frames.shape)

    # optic flow magnitudes, in deg/sec, one per frame interval:
    mot[name] = np.zeros(nframeintervals)
    con[name] = np.zeros(nframeintervals)
    dcon[name] = np.zeros(nframeintervals)
Exemple #28
0
F0, F1 = 0.2, 110  # Hz
P0, P1 = None, None
chanis = -1
width, tres = 10, 5  # sec
figsize = (3, 3)
XSCALE = 'log'
YLIMS = -64, -21

if width == None:  # window width
    width = LFPSPECGRAMWIDTH  # sec
if tres == None:  # window tres
    tres = LFPSPECGRAMTRES  # sec
assert tres <= width

SAMPFREQ = 1000  # Hz, should be the same for all LFPs
NFFT = intround(width * SAMPFREQ)
NOVERLAP = intround(NFFT - tres * SAMPFREQ)


def plot_psd(datas, cs=None, ylims=None, titlestr=''):
    f = figure(figsize=figsize)
    for data, c in zip(datas, cs):
        data = filter.notch(data)[
            0]  # remove 60 Hz mains noise, as for SI calc
        # convert data from uV to mV. I think P is in mV^2?:
        P, freqs = mpl.mlab.psd(data / 1e3,
                                NFFT=NFFT,
                                Fs=SAMPFREQ,
                                noverlap=NOVERLAP)
        # keep only freqs between F0 and F1:
        f0, f1 = F0, F1  # need to set different local names, since they're not read-only
Exemple #29
0
    def _buildLowpassMultiChanRecords(self):
        """Rearrange single channel lowpass records into multichannel lowpass records

        Here's the most correct way to do this, that doesn't assume records fall in any
        order whatsoever, whether channel (probe) order, or temporal order:
        1. Sort all records by time (should probably be a stable sort, so if they're already
        sorted, the order of records with identical timestamps won't change)
        2. find all the unique timestamp values for all the records
        3. For each unique timestamp, find all records that have it. Combine them into a
        single lpmc record. Then, make sure they're sorted by channel (probe number)

        The same should probably be done for highpass records too. The vast majority of the
        time, all the records will be in temporal and probe order, which would make things
        very quick. Do a test to see if this is indeed the case and can be taken advantage
        of. Assume that it's the case, and arrange records accordingly into lpmc records.
        Then check for temporal order across lpmc records, and for each lpmc record, check
        that its chans are in probe order (chan order).

        If either assumption is wrong (sorted by time, with records of identical timestamps
        sorted by Probe), should first sort by Probe, then (stable sort) by time, and then
        the assumptions will be correct.
        """

        try:  # check if any lowpass records exist
            self.lowpassrecords
        except AttributeError:
            return

        # get array of lowpass record timestamps
        rts = self.lowpassrecords['TimeStamp']

        re_sort = False
        if not (rts == np.sort(rts)).all():
            print("Lowpass records aren't in temporal order ")
            re_sort = True
        probes = self.lowpassrecords['Probe']
        uprobes = np.unique(probes)
        nchans = len(uprobes)
        if len(probes) % nchans != 0:
            raise RuntimeError("Lowpass probes have unequal record numbers")
        nrepeats = intround(len(probes) / nchans)
        if not (probes == np.tile(uprobes, nrepeats)).all():
            print(
                "Lowpass records aren't stored in regular alternating order of probes"
            )
            re_sort = True

        if re_sort:  # resort lowpass records by timestamp and Probe
            self.lowpassrecords.sort(order=['TimeStamp', 'Probe'])  # in-place
            # don't need to reassign new sorted array to rts or probes
            assert (rts == np.sort(rts)).all(), 'detected problems in file'
            assert (probes == np.tile(
                uprobes, nrepeats)).all(), 'detected problems in file'

        NumSamples = np.unique(self.lowpassrecords['NumSamples'])
        if len(NumSamples) > 1:
            raise RuntimeError(
                "Don't know how to deal with lowpass records that are of "
                "different lengths. NumSamples = %r" % NumSamples)

        self.nlowpassmultichanrecords = nrepeats
        self.lowpassmultichanrecords = np.empty(self.nlowpassmultichanrecords,
                                                dtype=LPMCRECORDDTYPE)

        probe = len(self.layoutrecords)
        lpmclayout = self.get_LowPassMultiChanLayout(uprobes, probe)
        self.layoutrecords.append(lpmclayout)

        self.lowpassmultichanrecords['TimeStamp'] = np.unique(rts)
        self.lowpassmultichanrecords['Probe'] = probe
        self.lowpassmultichanrecords[
            'NumSamples'] = NumSamples * lpmclayout.nchans
        self.lowpassmultichanrecords['lpreci'] = np.arange(
            0, self.nlowpassrecords, nchans)
Exemple #30
0
    def templates(self, chans='max', cindex='nidi'):
        """Plot cell templates in their polytrode layout. chans can be 'max', 'nneigh', 'all'.
        cindex can be 'nidi' or 'nid', but best to colour cells by nidi to maximize
        alternation."""
        from colour import CCBLACKDICT0, CCBLACKDICT1
        from matplotlib.collections import LineCollection

        HUMPERINCH = 80 # for setting figure size in inches
        VUMPERINCH = 160 # for setting figure size in inches
        USPERUM = 15
        UVPERUM = 3
        HBORDERUS = 50 # us, horizontal border around chans
        VBORDERUV = 150 # uV, vertical border around plots
        HBORDER = HBORDERUS / USPERUM
        VBORDER = VBORDERUV / UVPERUM
        BG = 'black'
        SCALE = 500, 100 # scalebar size in (us, uV)
        SCALE = SCALE[0]/USPERUM, SCALE[1]/UVPERUM # um
        SCALEXOFFSET = 2 # um
        SCALEYOFFSET = 4 # um

        if chans not in ['max', 'nneigh', 'all',]:
            raise ValueError('unknown chans arg %r' % chans)
        if cindex == 'nidi':
            ccdict = CCBLACKDICT0 # use nidi to maximize colour alternation
        elif cindex == 'nid':
            ccdict = CCBLACKDICT1 # use nid to have colours that correspond to those in spyke
        else:
            raise ValueError('unknown cindex arg %r' % cindex)

        # for mpl, convert probe chanpos to center bottom origin instead of center top,
        # i.e. invert the y values:
        chanpos = self.sort.chanpos.copy()
        maxy = chanpos[:, 1].max()
        for chan, (x, y) in enumerate(chanpos):
            chanpos[chan, 1] = maxy - y

        if chans == 'nneigh': # generate dict of nearest neighbours indexed by maxchan
            dm = core.eucd(chanpos) # distance matrix
            minspace = dm[dm!=0].min()
            rincl = minspace * 1.1 # inclusion radius
            nneighs = {}
            for maxchan, pos in enumerate(chanpos):
                d = dm[maxchan]
                nnchans = np.where(d < rincl)[0]
                nneighs[maxchan] = nnchans

        colxs = np.unique(chanpos[:, 0]) # unique column x positions, sorted
        rowys = np.unique(chanpos[:, 1]) # unique row y positions, sorted
        ncols = len(colxs)
        nrows = len(rowys)
        hspace = (colxs[-1]-colxs[0]) / (ncols-1)
        vspace = (rowys[-1]-rowys[0]) / (nrows-1)

        # setting figure size actually sets window size, including toolbar and statusbar
        figwidth = (ncols*hspace + 2*HBORDER) / HUMPERINCH # inches
        figheight = (nrows*vspace + 2*VBORDER) / VUMPERINCH # inches
        dpi = mpl.rcParams['figure.dpi']
        #figwidth = (ncols*hspace) / HUMPERINCH # inches
        #figheight = (nrows*vspace) / VUMPERINCH # inches
        figwidth = intround(figwidth * dpi) / dpi # inches, rounded to nearest pixel
        figheight = intround(figheight * dpi) / dpi # inches, rounded to nearest pixel
        figsize = figwidth, figheight
        f = pl.figure(figsize=figsize, facecolor=BG, edgecolor=BG)
        a = f.add_subplot(111)

        # plot chan lines? maybe just the vertical lines?
        #for pos in chanpos:

        tres = self.sort.tres # time resolution, in us
        nts = np.unique([ neuron.nt for neuron in self.alln.values() ])
        if len(nts) != 1:
            raise RuntimeError("Not all neuron templates have the same number of timepoints. "
                               "That's probably bad.")
        nt = nts[0]
        ts = np.arange(0, neuron.nt*tres, tres) # time values in us

        nids = sorted(self.alln)
        for nidi, nid in enumerate(nids):
            colour = ccdict[eval(cindex)]
            neuron = self.alln[nid]
            # ncs (neuron channels) should be 0-based channel IDs:
            if chans == 'max':
                ncs = [neuron.maxchan]
            elif chans == 'nneigh':
                ncs = nneighs[neuron.maxchan]
            elif chans == 'all':
                ncs = neuron.chans
            # exclude channels of data within neigh that are missing from wavedata
            ncs = [ nc for nc in ncs if nc in neuron.chans ]
            # indices into neuron.chans, use to index into wavedata:
            ncis = np.hstack([ np.where(neuron.chans == nc)[0] for nc in ncs ])
            #import pdb; pdb.set_trace()
            wavedata = neuron.wavedata[ncis]
            # much less efficient, but much simpler than spyke code:
            for c, wd in zip(ncs, wavedata):
                x = chanpos[c, 0] + ts / USPERUM # um
                y = chanpos[c, 1] + wd / UVPERUM # um
                a.plot(x, y, ls='-', marker=None, lw=1, c=colour)

        a.set_axis_bgcolor(BG)
        a.set_xlabel('')
        a.set_ylabel('')
        a.xaxis.set_ticks([])
        a.yaxis.set_ticks([]) # if displayed, y ticks would be distance from bottom chan

        a.set_xlim(colxs[0]-HBORDER, colxs[-1]+nt*tres/USPERUM+HBORDER) # um
        a.set_ylim(rowys[0]-VBORDER, rowys[-1]+VBORDER) # um

        # add scale bars:
        r, b = a.get_xlim()[1]-SCALEXOFFSET, a.get_ylim()[0]+SCALEYOFFSET # um
        hbar = (r-SCALE[0], b), (r, b) # um
        vbar = (r, b), (r, b+SCALE[1]) # um
        scale = LineCollection([hbar, vbar], lw=1, colors='white', zorder=-1,
                               antialiased=True, visible=True)
        a.add_collection(scale) # add to axes' pool of LCs

        f.tight_layout(pad=0)
        #f.canvas.toolbar.hide()
        #f.canvas.window().statusBar().hide()
        f.canvas.set_window_title(lastcmd())
Exemple #31
0
    def __call__(self, start, stop, chans=None):
        """Called when Stream object is called using (). start and stop indicate start and end
        timepoints in us wrt t=0. Returns the corresponding WaveForm object with just the
        specified chans"""
        if chans is None:
            chans = self.chans
        if not set(chans).issubset(self.chans):
            raise ValueError("requested chans %r are not a subset of available enabled "
                             "chans %r in %s stream" % (chans, self.chans, self.kind))
        nchans = len(chans)
        rawtres = self.rawtres # float us
        resample = self.sampfreq != self.rawsampfreq or self.shcorrect == True
        if resample:
            # excess data in us at either end, to eliminate interpolation distortion at
            # key.start and key.stop
            xs = KERNELSIZE * rawtres # float us
        else:
            xs = 0.0
        # stream limits, in sample indices:
        t0i = intround(self.t0 / rawtres)
        t1i = intround(self.t1 / rawtres)
        # get a slightly greater range of raw data (with xs) than might be needed:
        t0xsi = intfloor((start - xs) / rawtres) # round down to nearest mult of rawtres
        t1xsi = intceil((stop + xs) / rawtres) # round up to nearest mult of rawtres
        # stay within stream limits, thereby avoiding interpolation edge effects:
        t0xsi = max(t0xsi, t0i)
        t1xsi = min(t1xsi, t1i)
        # convert back to nearest float us:
        t0xs = t0xsi * rawtres
        t1xs = t1xsi * rawtres
        # these are slice indices, so don't add 1:
        ntxs = t1xsi - t0xsi # int
        tsxs = np.linspace(t0xs, t0xs+(ntxs-1)*rawtres, ntxs)
        #print('ntxs: %d' % ntxs)

        # init data as int32 so we have bitwidth to rescale and zero, convert to int16 later
        dataxs = np.zeros((nchans, ntxs), dtype=np.int32) # any gaps will have zeros

        # Find all contiguous tranges that t0xs and t1xs span, if any. Note that this
        # can now deal with case where len(trangeis) > 1. Test by asking for a slice
        # longer than any one trange or gap between tranges, like by calling:
        # >>> self.hpstream(201900000, 336700000)
        # on file ptc15.74.
        trangeis, = np.where((self.tranges[:, 0] <= t1xs) & (t0xs < self.tranges[:, 1]))
        tranges = []
        if len(trangeis) > 0:
            tranges = self.tranges[trangeis]
        #print('tranges:'); print(tranges)
        # collect relevant records from spanned tranges, if any:
        records = []
        for trange in tranges:
            trrec0i, trrec1i = self.records['TimeStamp'].searchsorted(trange)
            trrecis = np.arange(trrec0i, trrec1i)
            trrts = self.records['TimeStamp'][trrecis]
            trrecs = self.records[trrecis]
            rec0i, rec1i = trrts.searchsorted([t0xs, t1xs])
            rec0i = max(rec0i-1, 0)
            recis = np.arange(rec0i, rec1i)
            records.append(trrecs[recis])
        if len(records) > 0:
            records = np.concatenate(records)

        # load up data+excess, from all relevant records
        # TODO: fix code duplication
        #tload = time.time()
        if self.kind == 'highpass': # straightforward
            chanis = self.layout.ADchanlist.searchsorted(chans)
            for record in records: # iterating over highpass records
                d = self.f.loadContinuousRecord(record)[chanis] # record's data on chans
                nt = d.shape[1]
                t0i = intround(record['TimeStamp'] / rawtres)
                t1i = t0i + nt
                # source indices
                st0i = max(t0xsi - t0i, 0)
                st1i = min(t1xsi - t0i, nt)
                # destination indices
                dt0i = max(t0i - t0xsi, 0)
                dt1i = min(t1i - t0xsi, ntxs)
                dataxs[:, dt0i:dt1i] = d[:, st0i:st1i]
        else: # kind == 'lowpass', need to load chans from subsequent records
            chanis = [ int(np.where(chan == self.layout.chans)[0]) for chan in chans ]
            """NOTE: if the above raises an error it may be because this particular
            combination of LFP chans was incorrectly parsed due to a bug in the .srf file,
            and a manual remapping needs to be added to Surf.File.fixLFPlabels()"""
            # assume all lpmc records are same length:
            nt = intround(records[0]['NumSamples'] / self.nADchans)
            d = np.zeros((nchans, nt), dtype=np.int32)
            for record in records: # iterating over lowpassmultichan records
                for i, chani in enumerate(chanis):
                    lprec = self.f.lowpassrecords[record['lpreci']+chani]
                    d[i] = self.f.loadContinuousRecord(lprec)
                t0i = intround(record['TimeStamp'] / rawtres)
                t1i = t0i + nt
                # source indices
                st0i = max(t0xsi - t0i, 0)
                st1i = min(t1xsi - t0i, nt)
                # destination indices
                dt0i = max(t0i - t0xsi, 0)
                dt1i = min(t1i - t0xsi, ntxs)
                dataxs[:, dt0i:dt1i] = d[:, st0i:st1i]
        #print('record.load() took %.3f sec' % (time.time()-tload))

        # bitshift left to scale 12 bit values to use full 16 bit dynamic range, same as
        # * 2**(16-12) == 16. This provides more fidelity for interpolation, reduces uV per
        # AD to about 0.02
        dataxs <<= 4 # data is still int32 at this point

        # do any resampling if necessary:
        if resample:
            #tresample = time.time()
            dataxs, tsxs = self.resample(dataxs, tsxs, chans)
            #print('resample took %.3f sec' % (time.time()-tresample))

        # now trim down to just the requested time range:
        lo, hi = tsxs.searchsorted([start, stop])
        data = dataxs[:, lo:hi]
        ts = tsxs[lo:hi]

        # should be safe to convert back down to int16 now:
        data = np.int16(data)
        return WaveForm(data=data, ts=ts, chans=chans)
Exemple #32
0
    def __call__(self, start, stop, chans=None):
        """Called when Stream object is called using (). start and stop indicate start and end
        timepoints in us wrt t=0. Returns the corresponding WaveForm object with just the
        specified chans"""
        if chans is None:
            chans = self.chans
        if not set(chans).issubset(self.chans):
            raise ValueError("requested chans %r are not a subset of available enabled "
                             "chans %r in %s stream" % (chans, self.chans, self.kind))
        nchans = len(chans)
        chanis = self.f.fileheader.chans.searchsorted(chans)

        rawtres = self.rawtres
        resample = self.sampfreq != self.rawsampfreq or self.shcorrect == True
        # excess data in us at either end, to eliminate filtering and interpolation
        # edge effects:
        #print('NSXXSPOINTS: %d' % NSXXSPOINTS)
        xs = intround(NSXXSPOINTS * rawtres)
        #print('xs: %d, rawtres: %g' % (xs, rawtres))

        # stream limits, in us and in sample indices, wrt t=0 and sample=0:
        t0, t1, nt = self.t0, self.t1, self.f.nt
        t0i, t1i = self.f.t0i, self.f.t1i
        # get a slightly greater range of raw data (with xs) than might be needed:
        t0xsi = intfloor((start - xs) / rawtres) # round down to nearest mult of rawtres
        t1xsi = intceil((stop + xs) / rawtres) # round up to nearest mult of rawtres
        # stay within stream limits, thereby avoiding interpolation edge effects:
        t0xsi = max(t0xsi, t0i)
        t1xsi = min(t1xsi, t1i)
        # convert back to nearest float us:
        t0xs = t0xsi * rawtres
        t1xs = t1xsi * rawtres
        # these are slice indices, so don't add 1:
        ntxs = t1xsi - t0xsi # int
        tsxs = np.linspace(t0xs, t0xs+(ntxs-1)*rawtres, ntxs)
        #print('ntxs: %d' % ntxs)

        # init data as int32 so we have bitwidth to rescale and zero, then convert to int16
        dataxs = np.zeros((nchans, ntxs), dtype=np.int32) # any gaps will have zeros

        #tload = time.time()
        # load up data+excess, same data for high and low pass, difference will only be in the
        # filtering. It would be convenient to immediately subsample to get lowpass, but that's
        # not a valid thing to do: you can only subsample after filtering.
        # source indices:
        st0i = max(t0xsi - t0i, 0)
        st1i = min(t1xsi - t0i, nt)
        assert st1i-st0i == ntxs
        # destination indices:
        dt0i = max(t0i - t0xsi, 0)
        dt1i = min(t1i - t0xsi, ntxs)
        dataxs[:, dt0i:dt1i] = self.f.data[chanis, st0i:st1i]
        #print('data load took %.3f sec' % (time.time()-tload))

        #print('filtmeth: %s' % self.filtmeth)
        if self.filtmeth == None:
            pass
        elif self.filtmeth == 'BW':
            # high-pass filter using butterworth filter:
            dataxs, b, a = filterord(dataxs, sampfreq=self.rawsampfreq, f0=BWF0, f1=None,
                                     order=BWORDER, rp=None, rs=None,
                                     btype='highpass', ftype='butter')
        elif self.filtmeth == 'WMLDR':
            # high-pass filter using wavelet multi-level decomposition and reconstruction:
            ## TODO: fix weird slow wobbling of amplitude as a function of exactly what
            ## the WMLDR filtering time range happens to be. Setting a much bigger xs
            ## helps, but only until you move xs amount of time away from the start of
            ## the recording
            dataxs = WMLDR(dataxs)
        else:
            raise ValueError('unknown filter method %s' % self.filtmeth)

        # do any resampling if necessary:
        if resample:
            #tresample = time.time()
            dataxs, tsxs = self.resample(dataxs, tsxs, chans)
            #print('resample took %.3f sec' % (time.time()-tresample))

        #nresampletxs = len(tsxs)
        #print('ntxs, nresampletxs: %d, %d' % (ntxs, nresampletxs))
        #assert ntxs == len(tsxs)

        # now trim down to just the requested time range:
        lo, hi = tsxs.searchsorted([start, stop])
        data = dataxs[:, lo:hi]
        ts = tsxs[lo:hi]

        #print(0, lo, hi, nresampletxs)

        # should be safe to convert back down to int16 now:
        data = np.int16(data)
        return WaveForm(data=data, ts=ts, chans=chans)
Exemple #33
0
    def resample(self, rawdata, rawts, chans):
        """Return potentially sample-and-hold corrected and Nyquist interpolated
        data and timepoints. See Blanche & Swindale, 2006"""
        #print('sampfreq, rawsampfreq, shcorrect = (%r, %r, %r)' %
        #      (self.sampfreq, self.rawsampfreq, self.shcorrect))
        rawtres = self.rawtres # float us
        tres = self.tres # float us
        if self.sampfreq % self.rawsampfreq != 0:
            raise ValueError('only integer multiples of rawsampfreq allowed for interpolated '
                             'sampfreq')
        # resample factor: n output resampled points per input raw point:
        resamplex = intround(self.sampfreq / self.rawsampfreq)
        assert resamplex >= 1, 'no decimation allowed'
        N = KERNELSIZE
        #print('N = %d' % N)

        # generate kernels if necessary:
        try:
            self.kernels
        except AttributeError:
            ADchans = None
            if self.shcorrect:
                ADchans = self.layout.ADchanlist
            self.kernels = self.get_kernels(resamplex, N, chans, ADchans=ADchans)

        # convolve the data with each kernel
        nrawts = len(rawts)
        nchans = len(chans)
        # all the interpolated points have to fit in between the existing raw
        # points, so there are nrawts - 1 interpolated points:
        #nt = nrawts + (resamplex - 1) * (nrawts - 1)
        # the above can be simplified to:
        nt = nrawts*resamplex - resamplex + 1
        tstart = rawts[0]
        # generate interpolated timepoints, use intfloor in case tres is a float, otherwise
        # arange might give one too many timepoints:
        #ts = np.arange(tstart, intfloor(tstart+tres*nt), tres)
        # safer to user linspace in case of float tres, deals with endpoints better and gives
        # slightly more accurate output float timestamps:
        ts = np.linspace(tstart, tstart+(nt-1)*tres, nt)
        #print('len(ts) is %r' % len(ts))
        assert len(ts) == nt
        # resampled data, leave as int32 for convolution, then convert to int16:
        data = np.empty((nchans, nt), dtype=np.int32)
        #print('data.shape = %r' % (data.shape,))
        #tconvolve = time.time()
        #tconvolvesum = 0
        # Only the chans that are actually needed are resampled and returned.
        # Assume that chans index into ADchans. Normally they should map 1 to 1, ie chan 0
        # taps off of ADchan 0, but for probes like pt16a_HS27 and pt16b_HS27, it seems
        # ADchans start at 4.
        for chani, chan in enumerate(chans):
            for point, kernel in enumerate(self.kernels[chan]):
                """np.convolve(a, v, mode)
                for mode='same', only the K middle values are returned starting at n = (M-1)/2
                where K = len(a)-1 and M = len(v) - 1 and K >= M
                for mode='valid', you get the middle len(a) - len(v) + 1 number of values"""
                #tconvolveonce = time.time()
                row = np.convolve(rawdata[chani], kernel, mode='same')
                #tconvolvesum += (time.time()-tconvolveonce)
                #print('len(rawdata[chani]) = %r' % len(rawdata[chani]))
                #print('len(kernel) = %r' % len(kernel))
                #print('len(row): %r' % len(row))
                # interleave by assigning from point to end in steps of resamplex
                # index to start filling data from for this kernel's points:
                ti0 = (resamplex - point) % resamplex
                # index of first data point to use from convolution result 'row':
                rowti0 = int(point > 0)
                # discard the first data point from interpolant's convolutions, but not for
                # raw data's convolutions, since interpolated values have to be bounded on both
                # sides by raw values?
                data[chani, ti0::resamplex] = row[rowti0:]
        #print('convolve loop took %.3f sec' % (time.time()-tconvolve))
        #print('convolve calls took %.3f sec total' % (tconvolvesum))
        #tundoscaling = time.time()
        data >>= 16 # undo kernel scaling, shift 16 bits right in place, same as //= 2**16
        #print('undo kernel scaling took %.3f sec total' % (time.time()-tundoscaling))
        return data, ts
Exemple #34
0
    def pospdf(self,
               neurons='all',
               dim='y',
               edges=None,
               nbins=10,
               stats=False,
               labels=True,
               a=None,
               figsize=(7.5, 6.5)):
        """Plot PDF of cell positions ('x' or 'y') along the polytrode
        to get an idea of how cells are distributed in space"""
        if neurons == 'all':
            neurons = list(self.alln.values())
        elif neurons == 'quiet':
            neurons = list(self.qn.values())
        elif neurons == 'active':
            neurons = list(self.n.values())
        dimi = {'x': 0, 'y': 1}[dim]
        p = [n.pos[dimi] for n in neurons]  # all position values
        if edges != None:
            nbins = len(edges) - 1
            bins = edges  # assume it includes rightmost bin edge
        else:
            nbins = max(nbins, 2 * intround(np.sqrt(self.nneurons)))
            bins = nbins
        n, p = np.histogram(p, bins=bins)  # p includes rightmost bin edge
        binwidth = p[1] - p[0]  # take width of first bin in p

        if stats:
            mean = np.mean(p)
            median = np.median(p)
            argmode = n.argmax()
            mode = p[argmode] + binwidth / 2  # middle of tallest bin
            stdev = np.std(p)

        if a == None:
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
        else:  # add to existing axes
            a.hold(True)
            f = pl.gcf()

        # use CCWHITEDICT1 for familiarity with len 10 1-based id to colour mapping
        #color = CCWHITEDICT1[int(self.id)]
        color = 'k'

        # exclude rightmost bin edge in p
        a.bar(left=p[:-1],
              height=n,
              width=binwidth,
              bottom=0,
              color=color,
              ec=color)
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        if labels:
            a.set_title(titlestr)
            a.set_xlabel('neuron %s position (um)' % dim)
            a.set_ylabel('neuron count')

        if stats:
            # add stuff to top right of plot:
            uns = get_ipython().user_ns
            a.text(0.99,
                   0.99,
                   'mean = %.3f\n'
                   'median = %.3f\n'
                   'mode = %.3f\n'
                   'stdev = %.3f\n'
                   'minrate = %.2f Hz\n'
                   'nneurons = %d\n'
                   'dt = %d min' % (mean, median, mode, stdev, uns['MINRATE'],
                                    self.nneurons, intround(self.dtmin)),
                   transform=a.transAxes,
                   horizontalalignment='right',
                   verticalalignment='top')
        f.tight_layout(pad=0.3)  # crop figure to contents
        f.canvas.draw()  # this is needed if a != None when passed as arg
        return a
Exemple #35
0
    def load(self):
        f = open(self.path, 'rb')
        self.din = np.fromfile(f, dtype=np.int64).reshape(-1, 2) # reshape to nrows x 2 cols
        f.close()
        try:
            txthdrpath = rstrip(self.path, '.din') + '.textheader'
            f = open(txthdrpath, 'rU') # use universal newline support
            self.textheader = f.read() # read it all in
            f.close()
        except IOError:
            warn("couldn't load text header associated with '%s'" % self.name)
            self.textheader = '' # set to empty

        treestr = self.level*TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)

        if self.textheader != '':
            # comment out all lines starting with "from dimstim"
            self.textheader = self.textheader.replace('from dimstim', '#from dimstim')
            names1 = locals().copy() # namespace before execing the textheader
            exec(self.textheader)
            names2 = locals().copy() # namespace after
            # names that were added to the namespace, excluding the 'names1' name itself:
            newnames = [ n2 for n2 in names2 if n2 not in names1 and n2 != 'names1' ]
            try:
                # dimstim up to Cat 15 didn't have a version, neither did NVS display
                self.__version__ = eval('__version__')
            except NameError:
                self.__version__ = 0.0
            if self.__version__ >= 0.16: # after major refactoring of dimstim
                for newname in newnames:
                    # bind each variable in the textheader as an attrib of self
                    self.__setattr__(newname, eval(newname))
                self.sweeptable = SweepTable(experiment=self.e) # build the sweep table
                self.st = self.sweeptable.data # synonym, used a lot by experiment subclasses
                # this doesn't work for textheaders generated by dimstim 0.16, since
                # xorigDeg and yorigDeg were accidentally omitted from all the experiment
                # scripts and hence the textheaders too:
                '''
                self.e.xorig = deg2pix(self.e.static.xorigDeg, self.I) + self.I.SCREENWIDTH / 2
                self.e.yorig = deg2pix(self.e.static.yorigDeg, self.I) + self.I.SCREENHEIGHT / 2
                '''
                self.REFRESHTIME = intround(1 / float(self.I.REFRESHRATE) * 1000000) # us
                # prevent replication of movie frame data in memory
                if type(self.e) == Movie:
                    fname = os.path.split(self.e.static.fname)[-1] # pathless fname
                    if fname not in _MOVIES:
                        # add movie experiment, indexed according to movie data file name,
                        # to prevent from ever loading its frames more than once
                        _MOVIES[fname] = e
            else:
                self.oldparams = dictattr()
                for newname in newnames:
                    # bind each variable in the textheader to oldparams
                    self.oldparams[newname] = eval(newname)
                self.loadCat15exp()
        else:
            # use the time difference between the first two din instead
            self.REFRESHTIME = self.din[1, 0] - self.din[0, 0]

        # add an extra refresh time after last din, that's when screen actually turns off
        self.trange = (self.din[0, 0], self.din[-1, 0] + self.REFRESHTIME)
Exemple #36
0
    def cch(self,
            nid0,
            nid1=None,
            trange=50,
            binw=None,
            shift=None,
            nshifts=10,
            rate=False,
            norm=False,
            c='k',
            title=True,
            figsize=(7.5, 6.5)):
        """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1.
        If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift
        (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then
        subtract that from the unshifted CCH to get the shift corrected CCH"""
        if nid1 == None:
            nid1 = nid0
        autocorr = nid0 == nid1
        n0 = self.alln[nid0]
        n1 = self.alln[nid1]
        calctrange = trange * 1000  # calculation trange, in us
        if shift:
            assert nshifts > 0
            shift *= 1000  # convert to us
            maxshift = nshifts * shift
            calctrange = trange + maxshift  # expand calculated trange to encompass shifts
        calctrange = np.array([-calctrange,
                               calctrange])  # convert to a +/- array, in us
        dts = util.xcorr(n0.spikes, n1.spikes, calctrange)  # in us
        if autocorr:
            dts = dts[dts != 0]  # remove 0s for autocorr
        if shift:  # calculate dts for shift corrector
            shiftis = range(-nshifts, nshifts + 1)
            shiftis.remove(
                0
            )  # don't shift by 0, that's the original which we'll subtract from
            shifts = np.asarray(shiftis) * shift
            shiftdts = np.hstack([dts + s for s in shifts])  # in us
            print('shifts =', shifts / 1000)

        if not binw:
            nbins = intround(np.sqrt(len(dts)))  # good heuristic
            nbins = max(20, nbins)  # enforce min nbins
            nbins = min(200, nbins)  # enforce max nbins
        else:
            nbins = intround(2 * trange / binw)

        dts = dts / 1000  # in ms, converts to float64 array
        t = np.linspace(start=-trange,
                        stop=trange,
                        num=nbins + 1,
                        endpoint=True)  # ms
        binw = t[1] - t[0]  # all should be equal width, ms
        n = np.histogram(dts, bins=t, density=False)[0]
        if shift:  # subtract shift corrector
            shiftdts = shiftdts / 1000  # in ms, converts to float64 array
            shiftn = np.histogram(shiftdts, bins=t,
                                  density=False)[0] / (nshifts * 2)
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
            a.bar(left=t[:-1], height=shiftn,
                  width=binw)  # omit last right edge in t
            a.set_xlim(t[0], t[-1])
            a.set_xlabel('spike interval (ms)')
            n -= shiftn
        if norm:  # normalize and convert to float:
            n = n / n.max()
        elif rate:  # normalize by binw and convert to float:
            n = n / binw
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        a.bar(left=t[:-1], height=n, width=binw, color=c,
              ec=c)  # omit last right edge in t
        a.set_xlim(t[0], t[-1])
        a.set_xlabel('spike interval (ms)')
        if norm:
            a.set_ylabel('coincidence rate (AU)')
            a.set_yticks([0, 1])
        elif rate:
            a.set_ylabel('coincidence rate (Hz)')
        else:
            a.set_ylabel('count')
        if title:
            a.set_title('spike times of n%d wrt n%d' %
                        (self.n1.id, self.n0.id))
        wtitlestr = lastcmd()  # + ', binw=%.1f ms' % binw
        gcfm().window.setWindowTitle(wtitlestr)
        f.tight_layout(pad=0.3)  # crop figure to contents
Exemple #37
0
    def loadCat15exp(self):
        ## TODO: - fake a .e dimstim.Experiment object, to replace what used to be the
        ## .stims object for movie experiments
        '''           - self.movie = self.experiment.stims[0]
                - need to convert sweeptimeMsec to sweepSec
                   - assert len(self.experiment.stims) == 1
                   - self.movie = self.experiment.stims[0]
                   - self.movie.load() # ensure the movie's data is loaded

            if self.movie.oname == 'mseq32':
                frameis = frameis[frameis != 65535] # remove all occurences of 65535
            elif self.movie.oname == 'mseq16':
                frameis = frameis[frameis != 16383] # remove all occurences of 16383
        '''
        # Add .static and .dynamic params to fake dimstim experiment
        self.e = dictattr()
        self.I = dictattr() # fake InternalParams object
        self.e.static = dictattr() # fake StaticParams object
        self.e.dynamic = dictattr() # fake DynamicParams object
        # maps Cat 15 param names to dimstim 0.16 param types and names, wherever possible
        ## TODO: fill in params for experiment types other than Movie??
        _15to16 = {'EYE': ('I', 'EYE'),
                   'PIXPERCM': ('I', 'PIXPERCM'),
                   'REFRESHRATE': ('I', 'REFRESHRATE'),
                   'SCREENDISTANCECM': ('I', 'SCREENDISTANCECM'),
                   'SCREENHEIGHT': ('I', 'SCREENHEIGHT'),
                   'SCREENHEIGHTCM': ('I', 'SCREENHEIGHTCM'),
                   'SCREENWIDTH': ('I', 'SCREENWIDTH'),
                   'SCREENWIDTHCM': ('I', 'SCREENWIDTHCM'),

                   'fname': ('static', 'fname'),
                   'preexpSec': ('static', 'preexpSec'),
                   'postexpSec': ('static', 'postexpSec'),
                   'orioff': ('static', 'orioff'),
                   'regionwidthDeg': ('static', 'widthDeg'),
                   'regionheightDeg': ('static', 'heightDeg'),
                   'mask': ('static', 'mask'),
                   'diameterDeg': ('static', 'diameterDeg'),
                   'GAMMA': ('static', 'gamma'),

                   'framei': ('dynamic', 'framei'),
                   'ori': ('dynamic', 'ori'),
                   'polarity': ('dynamic', 'invert'),
                   'bgbrightness': ('dynamic', 'bgbrightness'),
                   'sweeptimeMsec': ('dynamic', 'sweepSec'),
                   'postsweepMsec': ('dynamic', 'postsweepSec'),
                   }

        # collect any Cat 15 movie attribs and add them to self.oldparams
        try:
            # can't really handle more than 1 movie, since dimstim 0.16 doesn't
            assert len(np.unique(self.oldparams.playlist)) == 1
            # bind it, movie was the only possible stim object anyway in Cat 15
            self.movie = self.oldparams.playlist[0]
            # returns dict of name:val pair attribs excluding __ and methods:
            movieparams = self.oldparams[self.movie.oname].__dict__
            self.oldparams.update(movieparams)
        except AttributeError:
            # no playlist, no movies, and therefore no movie attribs to deal with
            pass

        # convert Cat 15 params to dimstim 0.16
        for oldname, val in self.oldparams.items():
            if 'msec' in oldname.lower():
                val = val / 1000. # convert to sec
            elif oldname == 'polarity':
                val = bool(val) # convert from 0/1 to boolean
            if oldname == 'origDeg': # split old origDeg into new separate xposDeg and yposDeg
                self.e.dynamic.xposDeg = val[0]
                self.e.dynamic.yposDeg = val[1]
            else:
                try:
                    paramtype, newname = _15to16[oldname]
                    if paramtype == 'I':
                        # bind InternalParams directly to self, not to self.e:
                        self.I[newname] = val
                    self.e[paramtype][newname] = val
                except KeyError: # oldname doesn't have a newname equivalent
                    pass

        try:
            m = self.movie
        except AttributeError:
            m = None

        if m:
            # make fake dimstim experiment a Cat15Movie object, bind all of the attribs of
            # the existing fake dimstim experiment
            old_e = self.e
            self.e = m
            for name, val in old_e.__dict__.items():
                # bind each variable in the textheader as an attrib of self
                self.e.__setattr__(name, val)
            # deal with movie filename:
            # didn't have a chance to pass this exp as the parent in the movie init,
            # so just set the attribute manually:
            m.e = self
            # if fname refers to a movie whose local name is different, rename it to match
            # the local movie name
            _old2new = {'mseq16.m': MSEQ16, 'mseq32.m': MSEQ32}
            try:
                m.fname = _old2new[m.fname]
            except KeyError:
                pass # old name not in _old2new, leave it be
            self.e.static.fname = m.fname # update fake dimstim experiment's fname too
            # extensionless fname, fname should've been defined in the textheader
            m.name = os.path.splitext(m.fname)[0]
            if m.name not in _MOVIES:
                # and it very well may not be, cuz the textheader inits movies with no args,
                # leaving fname==None at first, which prevents it from being added to
                # _MOVIES
                _MOVIES[m.name] = m # add m to _MOVIES dictattr
            # Search self.e.moviepath string (from textheader) for 'Movies' word. Everything
            # after that is the relative path to your base movies folder. Eg, if
            # self.e.moviepath = 'C:\\Desktop\\Movies\\reliability\\e\\single\\', then set
            # self.e.relpath = '\\reliability\\e\\single\\'
            spath = self.oldparams.moviepath.split('\\') # Cat15 has purely windows seperators
            matchi = spath.index('Movies')
            relpath = joinpath(spath[matchi+1 ::])
            MOVIEPATH = get_ipython().user_ns['MOVIEPATH']
            path = os.path.join(MOVIEPATH, relpath)
            m.fname = os.path.join(path, m.fname)
            self.e.static.fname = m.fname # update

            # Generate the sweeptable:
            # dict of lists, ie sweeptable={'ori':[0,45,90], 'sfreq':[1,1,1]}, so you index
            # into it with self.sweeptable['var'][sweepi]
            #self.sweeptable = {[]}
            #vars = self.sweeptable.keys()
            # need to check if varlist exists, if so use it (we're dealing with Cat 15),
            # if not, use revamped dimstim.SweepTable class
            varvals = {} # init a dictionary that will contain variable values
            for var in m.varlist:
                # generate dict with var:val entries, to pass to buildSweepTable
                varvals[var] = eval('m.' + var)
            # pass varlist by reference, dim indices end up being modified:
            m.sweepTable = self.buildCat15SweepTable(m.varlist, varvals, m.nruns,
                                                     m.shuffleRuns, m.blankSweep,
                                                     m.shuffleBlankSweeps,
                                                     makeSweepTableText=0)[0]
        else: # this is a simple stim (not object oriented movie)
            varvals = {} # init a dictionary that will contain variable values
            for var in self.oldparams.varlist:
                # generate dict with var:val entries, to pass to buildSweepTable
                varvals[var] = eval('self.oldparams.' + var)
            # pass varlist by reference, dim indices end up being modified:
            self.sweepTable = self.buildCat15SweepTable(self.oldparams.varlist, varvals,
                                                        self.oldparams.nruns,
                                                        self.oldparams.shuffleRuns,
                                                        self.oldparams.blankSweep,
                                                        self.oldparams.shuffleBlankSweeps,
                                                        makeSweepTableText=0)[0]
        try:
            self.REFRESHTIME = intround(1 / float(self.oldparams.REFRESHRATE) * 1000000) # us
        except AttributeError:
            pass
from psth_funcs import get_psth_peaks_simple, plot_psth

rec = ptc22.tr1.r08
strange = REC2STATETRANGES[rec.absname][1]  # r08 synched, us
nids = [5, 23, 24]  # 3 example inactive yet responsive nids in ptc22.tr1.r08

EPS = np.spacing(1)  # epsilon, smallest representable non-zero number

BINW, TRES = 0.02, 0.0001  # PSTH time bins, sec
# 2.5 Hz thresh is 1 spike in the same 20 ms wide bin every 20 trials, assuming 0 baseline:
MINTHRESH = 3  # peak detection thresh, Hz
MEDIANX = 2  # PSTH median multiplier, Hz
FWFRACTION = 0.5  # full width fraction of max
WIDTHMAX = 200  # maximum width, ms
WIDTHMAXPOINTS = intround(WIDTHMAX / 1000 /
                          TRES)  # maximum width, number of PSTH timepoints

# plotting params:
PLOTPSTH = True
FIGSIZE = 3.14, 2
YMAX = 6  # Hz
YTICKS = 0, 3, 6
MS = 5

t, psths, spikets = rec.psth(nids=nids,
                             natexps=False,
                             strange=strange,
                             plot=False,
                             binw=BINW,
                             tres=TRES,
                             norm='ntrials')
Exemple #39
0
    def check_wave(self, wave, cutrange):
        """Check which threshold-exceeding peaks in wave data look like spikes
        and return only events that fall within cutrange. Search local spatiotemporal
        window around threshold-exceeding peak for biggest peak-to-peak sharpness.
        Finally, test that the sharpest peak and its neighbour exceed Vp and Vpp thresholds"""
        sort = self.sort
        AD2uV = sort.converter.AD2uV
        if self.extractparamsondetect:
            weights2f = sort.extractor.weights2spatial
            f = sort.extractor.f
        # holds time indices for each enabled chan until which each enabled chani is
        # locked out, updated on every found spike
        lockouts = np.zeros(self.nchans, dtype=np.int64)

        tsharp = time.time()
        sharp = util.sharpness2D(
            wave.data)  # sharpness of all zero-crossing separated peaks
        info('%s: sharpness2D() took %.3f sec' %
             (ps().name, time.time() - tsharp))
        targthreshsharp = time.time()
        # threshold-exceeding peak indices (2D, columns are [tis, cis])
        peakis = util.argthreshsharp(wave.data, self.thresh, sharp)
        info('%s: argthreshsharp() took %.3f sec' %
             (ps().name, time.time() - targthreshsharp))

        maxti = len(wave.ts) - 1
        dti = self.dti
        twi = sort.twi
        sdti = dti // 2  # spatial dti: max dti allowed between maxchan and all other chans
        nspikes = 0
        npeaks = len(peakis)
        spikes = np.zeros(npeaks,
                          self.SPIKEDTYPE)  # nspikes will always be <= npeaks
        ## TODO: test whether np.empty or np.zeros is faster overall in this case
        wavedata = np.empty((npeaks, self.maxnchansperspike, self.maxnt),
                            dtype=np.int16)
        # check each threshold-exceeding peak for validity:
        for peaki, (ti, chani) in enumerate(peakis):
            if DEBUG:
                self.log('*** trying thresh peak at t=%r chan=%d' %
                         (wave.ts[ti], self.chans[chani]))

            # is this threshold-exceeding peak locked out?
            tlockoutchani = lockouts[chani]
            if ti <= tlockoutchani:
                if DEBUG: self.log('peak is locked out')
                continue  # skip to next peak

            # find all enabled chanis within inclnbh of chani, lockouts are checked later:
            chanis = self.inclnbhdi[chani]
            nchans = len(chanis)

            # get search window DT on either side of this peak, for checking sharpness
            t0i = max(ti - dti, 0)  # check for lockouts a bit later
            t1i = ti + dti + 1  # +1 makes it end inclusive, don't worry about slicing past end
            window = wave.data[chanis,
                               t0i:t1i]  # search window, might not be contig
            if DEBUG:
                self.log(
                    'searching window (%d, %d) on chans=%r' %
                    (wave.ts[t0i], wave.ts[t1i], list(self.chans[chanis])))

            # Collect peak-to-peak sharpness for all chans. Save max and adjacent sharpness
            # timepoints for each chan, and keep track of which of the two adjacent non locked
            # out peaks is the sharpest. Note that the localsharp array contain sharpness of
            # all local peaks, not just those that exceed threshold, as in peakis array.
            localsharp = sharp[chanis,
                               t0i:t1i]  # sliced the same way as window
            ppsharp = np.zeros(nchans, dtype=np.float32)
            maxsharpis = np.zeros(nchans, dtype=int)
            adjpeakis = np.zeros((nchans, 2), dtype=int)
            maxadjiis = np.zeros(nchans, dtype=int)
            continuepeaki = False  # signal to skip to next peaki
            for cii in range(nchans):
                localpeakis, = np.where(localsharp[cii] != 0.0)
                # keep only non-locked out localpeakis on this channel:
                localpeakis = localpeakis[(
                    t0i + localpeakis) > lockouts[chanis[cii]]]
                if len(localpeakis) == 0:
                    continue  # localpeakis is empty
                lastpeakii = len(localpeakis) - 1
                maxsharpii = abs(localsharp[cii, localpeakis]).argmax()
                maxsharpi = localpeakis[maxsharpii]
                maxsharpis[cii] = maxsharpi
                # Get one adjacent peak to left and right each. Due to limits, either or
                # both may be identical to the max sharpness peak
                adjpeakis[cii] = localpeakis[[
                    max(maxsharpii - 1, 0),
                    min(maxsharpii + 1, lastpeakii)
                ]]
                if localsharp[cii, maxsharpi] < 0:
                    maxadjii = localsharp[
                        cii, adjpeakis[cii]].argmax()  # look for +ve adj peak
                else:
                    maxadjii = localsharp[
                        cii, adjpeakis[cii]].argmin()  # look for -ve adj peak
                maxadjiis[cii] = maxadjii  # save
                adjpi = adjpeakis[cii, maxadjii]
                if maxsharpi != adjpi:
                    ppsharp[cii] = localsharp[cii,
                                              maxsharpi] - localsharp[cii,
                                                                      adjpi]
                else:  # monophasic spike, set ppsharp == sharpness of single peak:
                    ppsharp[cii] = localsharp[cii, maxsharpi]
                    if chanis[cii] == chani:  # trigger chan is monophasic
                        # ensure ppsharp of monophasic spike >= Vppthresh**2/dt, ie ensure that
                        # its Vpp exceeds Vppthresh and has zero crossings on either side,
                        # with no more than dt between. Avoids excessively wide
                        # monophasic peaks from being considered as spikes:
                        if DEBUG: self.log("found monophasic spike")
                        if abs(ppsharp[cii]) < self.ppthresh[chani]**2 / dti:
                            continuepeaki = True
                            if DEBUG:
                                self.log(
                                    "peak wasn't sharp enough for a monophasic "
                                    "spike")
                            break  # out of cii loop

            if continuepeaki:
                continue  # skip to next peak

            # Choose chan with biggest ppsharp as maxchan and its sharpest peak as the primary
            # peak, check that these new chani and ti values are identical to the trigger
            # values in peakis, that the peak at [chani, ti] isn't locked out, that it falls
            # within cutrange, and that it meets both Vp and Vpp threshold criteria.

            oldchani, oldti = chani, ti  # save
            maxcii = abs(ppsharp).argmax(
            )  # choose chan with sharpest peak as new maxchan
            chani = chanis[maxcii]  # update maxchan
            maxsharpi = maxsharpis[
                maxcii]  # choose sharpest peak of maxchan, absolute
            ti = t0i + maxsharpi  # update ti

            # Search forward through peakis for a future (later) row that matches the
            # (potentially new) [chani, ti] calculated above based on sharpness of local
            # peaks. If that particular tuple is indeed coming up, it is therefore
            # thresh exceeding, and should be waited for. If not, don't wait for it. Something
            # that was thresh exceeding caused the trigger, but this nearby [chani, ti] tuple
            # is according to the sharpness measure the best estimate of the spatiotemporal
            # origin of the trigger-causing event.
            newpeak_coming_up = (peakis[peaki + 1:] == [ti, chani
                                                        ]).prod(axis=1).any()
            if chani != oldchani:
                if newpeak_coming_up:
                    if DEBUG:
                        self.log(
                            "triggered off peak on chan that isn't max ppsharpness for "
                            "this event, pass on this peak and wait for the true "
                            "sharpest peak to come later")
                    continue  # skip to next peak
                else:
                    # update all variables that depend on chani that wouldn't otherwise be
                    # updated:
                    tlockoutchani = lockouts[chani]
                    chanis = self.inclnbhdi[chani]
                    nchans = len(chanis)

            if ti > oldti:
                if newpeak_coming_up:
                    if DEBUG:
                        self.log(
                            "triggered off early adjacent peak for this event, pass on "
                            "this peak and wait for the true sharpest peak to come later"
                        )
                    continue  # skip to next peak
                else:
                    # unlike chani, it seems that are no variables that depend on ti that
                    # wouldn't otherwise be updated:
                    pass

            if ti <= tlockoutchani:  # sharpest peak is locked out
                if DEBUG:
                    self.log('sharpest peak at t=%d chan=%d is locked out' %
                             (wave.ts[ti], self.chans[chani]))
                continue  # skip to next peak

            if not (cutrange[0] <= wave.ts[ti] <= cutrange[1]):
                # use %r since wave.ts[ti] is np.int64 and %d gives TypeError if > 2**31:
                if DEBUG:
                    self.log(
                        "spike time %r falls outside cutrange for this searchblock "
                        "call, discarding" % wave.ts[ti])
                continue  # skip to next peak

            # check that Vp threshold is exceeded by at least one of the two sharpest peaks
            adjpi = adjpeakis[maxcii, maxadjiis[maxcii]]
            # relative to t0i, not necessarily in temporal order:
            maxchantis = np.array([maxsharpi, adjpi])
            # voltages of the two sharpest peaks, convert int16 to int64 to prevent overflow
            Vs = np.int64(window[maxcii, maxchantis])
            Vp = abs(Vs).max()  # grab biggest peak
            if Vp < self.thresh[chani]:
                if DEBUG:
                    self.log(
                        'peak at t=%d chan=%d and its adjacent peak are both '
                        '< Vp=%f uV' %
                        (wave.ts[ti], self.chans[chani], AD2uV(Vp)))
                continue  # skip to next peak
            # check that the two sharpest peaks together exceed Vpp threshold:
            Vpp = abs(Vs[0] -
                      Vs[1])  # Vs are of opposite sign, unless monophasic
            if Vpp == 0:  # monophasic spike
                Vpp = Vp  # use Vp as Vpp

            if Vpp < self.ppthresh[chani]:
                if DEBUG:
                    self.log('peaks at t=%r chan=%d are < Vpp = %f' %
                             (wave.ts[[ti, t0i + adjpi
                                       ]], self.chans[chani], AD2uV(Vpp)))
                continue  # skip to next peak

            if DEBUG:
                self.log(
                    'found biggest thresh exceeding ppsharp at t=%d chan=%d' %
                    (wave.ts[ti], self.chans[chani]))

            # get new spatiotemporal neighbourhood, with full window,
            # align to -ve of the two sharpest peaks
            aligni = localsharp[maxcii, maxchantis].argmin()
            #oldti = ti # save
            ti = t0i + maxchantis[
                aligni]  # new absolute time index to align to
            # cut new window
            oldt0i = t0i
            t0i = max(ti + twi[0], 0)
            t1i = min(ti + twi[1] + 1, maxti)  # end inclusive
            window = wave.data[
                chanis, t0i:t1i]  # multichan data window, might not be contig
            maxcii, = np.where(chanis == chani)
            maxchantis += oldt0i - t0i  # relative to new t0i
            tis = np.zeros((nchans, 2),
                           dtype=int)  # holds time indices for each lockchani
            tis[maxcii] = maxchantis

            # pick corresponding peaks on other chans according to how close they are
            # to those on maxchan, Don't consider the sign of the peaks on each
            # chan, just their proximity in time. In other words, allow for spike
            # inversion across space
            localsharp = sharp[chanis, t0i:t1i]
            peak0ti, peak1ti = maxchantis  # primary and 2ndary peak tis of maxchan
            for cii in range(nchans):
                if cii == maxcii:  # already set
                    continue
                localpeakis, = np.where(localsharp[cii] != 0.0)
                # keep only non-locked out localpeakis on this channel:
                localpeakis = localpeakis[(
                    t0i + localpeakis) > lockouts[chanis[cii]]]
                if len(localpeakis) == 0:  # localpeakis is empty
                    tis[cii] = maxchantis  # use same tis as maxchan
                    continue
                lastpeakii = len(localpeakis) - 1
                # find peak on this chan that's temporally closest to primary peak on maxchan.
                # If two peaks are equally close, pick the sharpest one
                dt0is = abs(localpeakis - peak0ti)
                if (np.diff(dt0is) == 0
                    ).any():  # two peaks equally close, pick sharpest one
                    peak0ii = abs(localsharp[cii, localpeakis]).argmax()
                else:
                    peak0ii = dt0is.argmin()
                # save primary peak for this cii
                dt0i = dt0is[peak0ii]
                if dt0i > sdti:  # too distant in time
                    tis[cii, 0] = peak0ti  # use same t0i as maxchan
                else:  # give it its own t0i
                    tis[cii, 0] = localpeakis[peak0ii]
                # save 2ndary peak for this cii
                if len(localpeakis
                       ) == 1:  # monophasic, set 2ndary peak same as primary
                    tis[cii, 1] = tis[cii, 0]
                    continue
                if peak0ti <= peak1ti:  # primary peak comes first (more common case)
                    peak1ii = min(peak0ii + 1,
                                  lastpeakii)  # 2ndary peak is 1 to the right
                else:  # peak1ti < peak0ti, ie 2ndary peak comes first
                    peak1ii = max(peak0ii - 1,
                                  0)  # 2ndary peak is 1 to the left
                dt1is = abs(localpeakis - peak1ti)
                dt1i = dt1is[peak1ii]
                if dt1i > sdti:  # too distant in time
                    tis[cii, 1] = peak1ti  # use same t1i as maxchan
                else:
                    tis[cii, 1] = localpeakis[peak1ii]

            # based on maxchan (chani), find inclchanis, incltis, and inclwindow:
            inclchanis = self.inclnbhdi[chani]
            ninclchans = len(inclchanis)
            inclchans = self.chans[inclchanis]
            chan = self.chans[chani]
            inclchani = int(np.where(inclchans == chan)[0])  # != chani!
            inclciis = chanis.searchsorted(inclchanis)
            incltis = tis[inclciis]
            inclwindow = window[inclciis]

            if DEBUG:
                self.log(
                    "final window params: t0=%r, t1=%r, Vs=%r, peakts=\n%r" %
                    (wave.ts[t0i], wave.ts[t1i], list(
                        AD2uV(Vs)), wave.ts[t0i + tis]))

            if self.extractparamsondetect:
                # Get Vpp at each inclchan's tis, use as spatial weights:
                # see core.rowtake() or util.rowtake_cy() for indexing explanation:
                w = np.float32(inclwindow[np.arange(ninclchans)[:, None],
                                          incltis])
                w = abs(w).sum(axis=1)
                x = self.siteloc[inclchanis, 0]  # 1D array (row)
                y = self.siteloc[inclchanis, 1]
                params = weights2f(f, w, x, y, inclchani)
                if params == None:  # presumably a non-localizable many-channel noise event
                    if DEBUG:
                        treject = intround(wave.ts[ti])  # nearest us
                        self.log("reject spike at t=%d based on fit params" %
                                 treject)
                    # no real need to lockout chans for a params-rejected spike
                    continue  # skip to next peak

            # build up spike record:
            s = spikes[nspikes]
            # wave.ts might be floats, depending on sampfreq
            s['t'] = intround(wave.ts[ti])  # nearest us
            # leave each spike's chanis in sorted order, as they are in self.inclnbhdi,
            # important assumption used later on, like in sort.get_wave() and
            # Neuron.update_wave()
            ts = wave.ts[t0i:t1i]  # potentially floats
            # use ts = np.arange(s['t0'], s['t1'], stream.tres) to reconstruct
            s['t0'], s['t1'] = intround(wave.ts[t0i]), intround(
                wave.ts[t1i])  # nearest us
            s['tis'][:ninclchans] = incltis  # wrt t0i=0
            s['aligni'] = aligni  # 0 or 1
            s['dt'] = intround(abs(ts[tis[maxcii, 0]] -
                                   ts[tis[maxcii, 1]]))  # nearest us
            s['V0'], s['V1'] = AD2uV(Vs)  # in uV
            s['Vpp'] = AD2uV(Vpp)  # in uV
            s['chan'], s['chans'][:ninclchans], s[
                'nchans'] = chan, inclchans, ninclchans
            s['chani'] = inclchani
            nt = inclwindow.shape[
                1]  # isn't always full width if recording has gaps
            wavedata[nspikes, :ninclchans, :nt] = inclwindow

            if self.extractparamsondetect:
                # Save spatial fit params, and lockout only the channels within lockrx*sx
                # of the fit spatial location of the spike, up to a max of self.inclr.
                s['x0'], s['y0'], s['sx'], s['sy'] = params
                x0, y0 = s['x0'], s['y0']
                # lockout radius for this spike:
                lockr = min(self.lockrx * s['sx'], self.inclr)  # in um
                # test y coords of inclchans in y array, ylockchaniis can be used to index
                # into x, y and inclchans:
                ylockchaniis, = np.where(
                    np.abs(y - y0) <= lockr)  # convert bool arr to int
                # test Euclid distance from x0, y0 for each ylockchani:
                lockchaniis = ylockchaniis.copy()
                for ylockchanii in ylockchaniis:
                    if dist((x[ylockchanii], y[ylockchanii]),
                            (x0, y0)) > lockr:
                        lockchaniis = np.delete(
                            lockchaniis, ylockchanii)  # dist is too great
                lockchans = inclchans[lockchaniis]
                lockchanis = inclchanis[lockchaniis]
                nlockchans = len(lockchans)
                s['lockchans'][:nlockchans], s[
                    'nlockchans'] = lockchans, nlockchans
                # just for testing:
                #assert (lockchanis == self.chans.searchsorted(lockchans)).all()
                #assert (lockchaniis == chanis.searchsorted(lockchanis)).all()
            else:  # in this case, the inclchans and lockchans fields are redundant
                s['lockchans'][:ninclchans], s[
                    'nlockchans'] = inclchans, ninclchans
                lockchanis = chanis
                lockchaniis = np.arange(ninclchans)

            # give each chan a distinct lockout, based on how each chan's
            # sharpest peaks line up with those of the maxchan. Respect existing lockouts:
            # on each of the relevant chans, keep whichever lockout ends last
            thislockout = t0i + tis.max(axis=1)[lockchaniis]
            lockouts[lockchanis] = np.max([lockouts[lockchanis], thislockout],
                                          axis=0)

            if DEBUG:
                self.log('lockouts=%r\nfor chans=%r' %
                         (list(wave.ts[lockouts[lockchanis]]),
                          list(self.chans[lockchanis])))
                self.log('*** found new spike %d: t=%d chan=%d (%d, %d)' %
                         (nspikes + self.nspikes, s['t'], chan,
                          self.siteloc[chani, 0], self.siteloc[chani, 1]))
            nspikes += 1

        # trim spikes and wavedata arrays down to size
        spikes.resize(nspikes, refcheck=False)
        wds = wavedata.shape
        wavedata.resize((nspikes, wds[1], wds[2]), refcheck=False)
        return spikes, wavedata
Exemple #40
0
    def load(self):
        f = open(self.path, 'rb')
        self.din = np.fromfile(f, dtype=np.int64).reshape(-1, 2) # reshape to 2 cols
        f.close()
        try:
            txthdrpath = rstrip(self.path, '.din') + '.textheader'
            f = open(txthdrpath, 'rU') # use universal newline support
            self.textheader = f.read() # read it all in
            f.close()
        except IOError:
            print("WARNING: couldn't load text header associated with '%s'" % self.name)
            self.textheader = '' # set to empty

        treestr = self.level*TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)

        if self.textheader != '':
            # comment out all lines starting with "from dimstim"
            self.textheader = self.textheader.replace('from dimstim', '#from dimstim')
            # execute any remaining 'import' lines first, so that any modules imported
            # aren't artefactually detected as having been added to the namespace:
            for line in self.textheader.splitlines():
                if line.startswith('import'):
                    exec(line)
            thns = {} # textheader namespace
            # compiling to code and then executing that is supposed to be faster than directly
            # executing a string, according to
            # http://lucumr.pocoo.org/2011/2/1/exec-in-python/, but doesn't seem to make
            # a difference here:
            code = compile(self.textheader, "<string>", "exec")
            # don't exec in current namespace, load name:val pairs into thns instead:
            exec(code, None, thns)
            try:
                # dimstim up to ptc15 didn't have a version, neither did NVS display
                self.__version__ = thns['__version__']
            except KeyError:
                self.__version__ = 0.0
            if self.__version__ >= 0.16: # after major refactoring of dimstim
                for name, val in thns.items():
                    # bind each variable in the textheader as an attrib of self
                    self.__setattr__(name, val)
                # this doesn't work for textheaders generated by dimstim 0.16, since
                # xorigDeg and yorigDeg were accidentally omitted from all the experiment
                # scripts and hence the textheaders too:
                '''
                self.e.xorig = deg2pix(self.e.static.xorigDeg, self.I) + self.I.SCREENWIDTH / 2
                self.e.yorig = deg2pix(self.e.static.yorigDeg, self.I) + self.I.SCREENHEIGHT / 2
                '''
                self.REFRESHTIME = intround(1 / float(self.I.REFRESHRATE) * 1000000) # us
                # prevent replication of movie frame data in memory
                if type(self.e) == Movie:
                    fname = os.path.split(self.e.static.fname)[-1] # pathless fname
                    uns = get_ipython().user_ns
                    if fname not in uns['MOVIES']:
                        # add movie experiment, indexed according to movie data file name,
                        # to prevent from ever loading its frames more than once
                        uns['MOVIES'][fname] = self.e
            else:
                self.oldparams = dictattr()
                for name, val in thns.items():
                    # bind each variable in the textheader to oldparams
                    self.oldparams[name] = val
                self.loadptc15exp()
        else:
            # use the time difference between the first two din instead
            self.REFRESHTIME = self.din[1, 0] - self.din[0, 0]

        # add an extra refresh time after last din, that's when screen actually turns off
        self.trange = (self.din[0, 0], self.din[-1, 0] + self.REFRESHTIME)

        # these are static, no need for properties:
        self.dt = self.trange[1] - self.trange[0] # duration (us)
        self.dtsec = self.dt / 1e6
        self.dtmin = self.dtsec / 60
        self.dthour = self.dtmin / 60
Exemple #41
0
class Sort(object):
    """A sort is a single spike extraction. Generally, there is one sort per recording,
    and sorts of the same name within the same track were extracted in the same spike
    sorting session"""
    def __init__(self, path, id=None, recording=None):
        self.level = 4 # level in the hierarchy
        self.treebuf = StringIO() # create a string buffer to print tree hierarchy to
        self.path = path
        self.id = id
        self.r = recording
        self.alln = {} # dict to store all Neurons

    def get_n(self):
        """Return dict of neurons that meet MINRATE"""
        n = {}
        MINRATE = get_ipython().user_ns['MINRATE']
        for neuron in self.alln.values():
            if neuron.meanrate >= MINRATE:
                n[neuron.id] = neuron
        return n

    n = property(get_n)

    def get_qn(self):
        """Return dict of quiet neurons, ie those that fail to meet MINRATE"""
        qn = {}
        MINRATE = get_ipython().user_ns['MINRATE']
        for neuron in self.alln.values():
            if neuron.meanrate < MINRATE:
                qn[neuron.id] = neuron
        return qn

    qn = property(get_qn)

    name = property(lambda self: os.path.split(self.path)[-1])
    nneurons = property(lambda self: len(self.n))
    nqneurons = property(lambda self: len(self.qn))
    nallneurons = property(lambda self: len(self.alln))
    nspikes = property(lambda self: self.header.nspikes)
    # .ptcs specific properties:
    # datetime object, calculated from header.datetime days since EPOCH"""
    datetime = property(lambda self: EPOCH + datetime.timedelta(days=self.header.datetime))
    pttype = property(lambda self: self.header.pttype)
    chanpos = property(lambda self: self.header.chanpos)
    samplerate = property(lambda self: self.header.samplerate)
    tres = property(lambda self: intround(1 / self.samplerate * 1e6)) # us

    def tree(self):
        """Print tree hierarchy"""
        print(self.treebuf.getvalue(), end='')

    def writetree(self, string):
        """Write to self's tree buffer and to parent's too"""
        self.treebuf.write(string)
        self.r.writetree(string)
    
    def load(self):
        treestr = self.level*TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)
        
        if os.path.isfile(self.path):
            if self.path.endswith('.ptcs'): # it's a single .ptcs file
                self.loadptcs()
            elif self.path.endswith('spikes.mat'): # it's a single .mat file
                self.loadmat()
            else:
                raise ValueError('unknown sort type %r' % self.path)
        elif os.path.isdir(self.path):
            # it's a directory of .spk files
            self.loadspk()
        else:
            raise RuntimeError

    def loadptcs(self):
        """Load neurons from a single .ptcs file"""
        self.header = PTCSHeader()
        with open(self.path, 'rb') as f:
            self.header.read(f)
            for i in range(self.header.nneurons):
                neuron = Neuron(self.path, sort=self)
                neuron.loadptcs(f, self.header)
                self.alln[neuron.id] = neuron # save it
            assert eof(f), 'File %s has unexpected length' % self.path

    def loadmat(self):
        """Load neurons from a single .mat file"""
        self.header = MATHeader()
        nrecs = self.header.read(self.path)
        for nrec in nrecs:
            neuron = Neuron(self.path, sort=self)
            neuron.loadmat(nrec)
            self.alln[neuron.id] = neuron # save it

    def loadspk(self):
        """Load neurons from multiple .spk files"""
        self.header = SPKHeader(self.path)
        for spkfname in self.header.spkfnames:
            path = os.path.join(self.path, spkfname)
            neuron = Neuron(path, sort=self)
            self.header.read(neuron)
            self.alln[neuron.id] = neuron # save it
Exemple #42
0
    def si(self, kind=None, chani=-1, width=None, tres=None,
           lfpwidth=None, lfptres=None, loband=None, hiband=None, plot=True,
           showstates='auto', statelinepos=[0.2], lw=4, alpha=1, relative2t0=False,
           lim2stim=False, showxlabel=True, showylabel=True, showtitle=True, title=None,
           reclabel=True, swapaxes=False, figsize=None):
        """Calculate an LFP synchrony index, using potentially overlapping windows of width
        and tres, in sec, from the LFP spectrogram, itself composed of bins of lfpwidth and
        lfptres. relative2t0 controls whether to plot relative to t0, or relative to start of
        ADC clock. lim2stim limits the time range only to when a stimulus was presented, i.e.
        to the outermost times of non-NULL din.

        Note that for power ratio methods (kind: L/(L+H) or L/H),
        width and tres are not used, only lfpwidth and lfptres. Options for kind are:

        'L/(L+H)': fraction of power in low band vs total power (Saleem2010)

        'L/H': low to highband power ratio (Li, Poo, Dan 2009)

        'cv': coefficient of variation (std / mean) of all power

        'ncv': normalized CV: (std - mean) / (std + mean)

        'nstdmed': normalized stdmed: (std - med) / (std + med)

        'n2stdmean': normalized 2stdmean: (2*std - mean) / (2*std + mean)

        'n3stdmean': normalized 3stdmean: (3*std - mean) / (3*std + mean)

        """
        uns = get_ipython().user_ns
        if kind == None:
            kind = uns['LFPSIKIND']
        if kind in ['L/(L+H)', 'L/H', 'nLH']: # it's a power ratio measure
            pr = True
        else:
            pr = False

        data = self.get_data()
        ts = self.get_tssec() # full set of timestamps, in sec
        t0, t1 = ts[0], ts[-1]
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        dt = t1 - t0
        if figsize == None:
            # convert from recording duration time to width in inches, 0.87 accommodates
            # padding around the SI plot:
            figwidth = (dt / 1000) * 5 + 0.87
            figheight = 2.5 # inches
            figsize = figwidth, figheight

        t0i, t1i = ts.searchsorted((t0, t1))
        x = data[chani, t0i:t1i] / 1e3 # slice data, convert from uV to mV
        x = filter.notch(x)[0] # remove 60 Hz mains noise
        try:
            rr = self.r.e0.I['REFRESHRATE']
        except AttributeError: # probably a recording with no experiment
            rr = 200 # assume 200 Hz refresh rate
        if rr <= 100: # CRT was at low vertical refresh rate
            print('filtering out %d Hz from LFP in %s' % (intround(rr), self.r.name))
            x = filter.notch(x, freq=rr)[0] # remove CRT interference

        if width == None:
            width = uns['LFPSIWIDTH'] # sec
        if tres == None:
            tres = uns['LFPSITRES'] # sec
        if lfpwidth == None:
            lfpwidth = uns['LFPPRWIDTH'] if pr else uns['LFPSPECGRAMWIDTH'] # sec
        if lfptres == None:
            lfptres = uns['LFPPRTRES'] if pr else uns['LFPSPECGRAMTRES'] # sec
        if loband == None:
            loband = uns['LFPPRLOBAND']
        f0, f1 = loband
        if hiband == None:
            hiband = uns['LFPPRHIBAND']
        f2, f3 = hiband

        assert lfptres <= lfpwidth
        NFFT = intround(lfpwidth * self.sampfreq)
        noverlap = intround(NFFT - lfptres * self.sampfreq)
        #print('len(x), NFFT, noverlap: %d, %d, %d' % (len(x), NFFT, noverlap))
        # t is midpoints of timebins in sec from start of data. P is in mV^2?:
        P, freqs, Pt = mpl.mlab.specgram(x, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap)
        # don't convert power to dB, just washes out the signal in the ratio:
        #P = 10. * np.log10(P)
        if not relative2t0:
            Pt += t0 # convert t to time from start of ADC clock:
        nfreqs = len(freqs)

        # keep only freqs between f0 and f1, and f2 and f3:
        f0i, f1i, f2i, f3i = freqs.searchsorted([f0, f1, f2, f3])
        lP = P[f0i:f1i] # nsubfreqs x nt
        hP = P[f2i:f3i] # nsubfreqs x nt
        lP = lP.sum(axis=0) # nt
        hP = hP.sum(axis=0) # nt

        if pr:
            t = Pt
            ylabel = 'SI (%s)' % kind
        else:
            # potentially overlapping bin time ranges:
            trange = Pt[0], Pt[-1]
            tranges = split_tranges([trange], width, tres) # in sec
            ntranges = len(tranges)
            tis = Pt.searchsorted(tranges) # ntranges x 2 array
            # number of timepoints to use for each trange, almost all will be the same width:
            binnt = intround((tis[:, 1] - tis[:, 0]).mean())
            binhP = np.zeros((ntranges, binnt)) # init appropriate array
            for trangei, t0i in enumerate(tis[:, 0]):
                binhP[trangei] = hP[t0i:t0i+binnt]
            # get midpoint of each trange:
            t = tranges.mean(axis=1)

        #old_settings = np.seterr(all='ignore') # suppress div by 0 errors
        # plot power signal to be analyzed
        #self.si_plot(hP, Pt, t0=0, t1=t[-1], ylim=None, ylabel='highband power',
        #             title=lastcmd()+' highband power', text=self.r.name)

        # set some plotting defaults:
        hlines = []
        if pr:
            ylim = 0, 1
            yticks = 0, 0.2, 0.4, 0.6, 0.8, 1
        else:
            ylim = -1, 1
            yticks = -1, 0, 1
            hlines = [0]

        # calculate some metric of each column, i.e. each bin:
        if kind == 'L/(L+H)':
            si = lP/(lP + hP)
        elif kind == 'L/H':
            si = lP/hP
        elif kind == 'nLH':
            t = Pt
            si = (lP - hP) / (lP + hP)
            ylabel = 'LFP (L - H) / (L + H)'
        elif kind == 'cv':
            si = binhP.std(axis=1) / binhP.mean(axis=1)
            ylim = 0, 2
            ytiks = 0, 1, 2
            ylabel = 'LFP power CV'
        elif kind == 'ncv':
            s = binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s - mean) / (s + mean)
            ylabel = 'LFP power (std - mean) / (std + mean)'
            #pl.plot(t, s)
            #pl.plot(t, mean)
        elif kind == 'n2stdmean':
            s2 = 2 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s2 - mean) / (s2 + mean)
            ylabel = 'LFP power (2*std - mean) / (2*std + mean)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s2)
            #pl.plot(t, mean)
        elif kind == 'n3stdmean':
            s3 = 3 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s3 - mean) / (s3 + mean)
            ylabel = 'LFP power (3*std - mean) / (3*std + mean)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s3)
            #pl.plot(t, mean)
        elif kind == 'n4stdmean':
            s4 = 4 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s4 - mean) / (s4 + mean)
            ylabel = 'LFP power (4*std - mean) / (4*std + mean)'
            #pl.plot(t, s4)
            #pl.plot(t, mean)
        elif kind == 'nstdmed':
            s = binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s - med) / (s + med)
            ylabel = 'LFP power (std - med) / (std + med)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s)
            #pl.plot(t, med)
        elif kind == 'n2stdmed':
            s2 = 2 * binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s2 - med) / (s2 + med)
            ylabel = 'LFP power (2*std - med) / (2*std + med)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s2)
            #pl.plot(t, med)
        elif kind == 'n3stdmed':
            s3 = 3 * binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s3 - med) / (s3 + med)
            ylabel = 'LFP power (3*std - med) / (3*std + med)'
            hlines = [-0.1, 0, 0.1] # demarcate desynched and synched thresholds
            #pl.plot(t, s3)
            #pl.plot(t, med)
        elif kind == 'nstdmin':
            s = binhP.std(axis=1)
            min = binhP.min(axis=1)
            si = (s - min) / (s + min)
            ylabel = 'LFP power (std - min) / (std + min)'
            #pl.plot(t, s)
            #pl.plot(t, min)
        elif kind == 'nmadmean':
            mean = binhP.mean(axis=1)
            mad = (np.abs(binhP - mean[:, None])).mean(axis=1)
            si = (mad - mean) / (mad + mean)
            ylabel = 'MUA (MAD - mean) / (MAD + mean)'
            #pl.plot(t, mad)
            #pl.plot(t, mean)
        elif kind == 'nmadmed':
            med = np.median(binhP, axis=1)
            mad = (np.abs(binhP - med[:, None])).mean(axis=1)
            si = (mad - med) / (mad + med)
            ylabel = 'MUA (MAD - median) / (MAD + median)'
            #pl.plot(t, mad)
            #pl.plot(t, med)
        elif kind == 'nvarmin':
            v = binhP.var(axis=1)
            min = binhP.min(axis=1)
            si = (v - min) / (v + min)
            ylabel = 'LFP power (std - min) / (std + min)'
            #pl.plot(t, v)
            #pl.plot(t, min)
        elif kind == 'nptpmean':
            ptp = binhP.ptp(axis=1)
            mean = binhP.mean(axis=1)
            si = (ptp - mean) / (ptp + mean)
            ylabel = 'MUA (ptp - mean) / (ptp + mean)'
            #pl.plot(t, ptp)
            #pl.plot(t, mean)
        elif kind == 'nptpmed':
            ptp = binhP.ptp(axis=1)
            med = np.median(binhP, axis=1)
            si = (ptp - med) / (ptp + med)
            ylabel = 'MUA (ptp - med) / (ptp + med)'
            #pl.plot(t, ptp)
            #pl.plot(t, med)
        elif kind == 'nptpmin':
            ptp = binhP.ptp(axis=1)
            min = binhP.min(axis=1)
            si = (ptp - min) / (ptp + min)
            ylabel = 'MUA (ptp - min) / (ptp + min)'
            #pl.plot(t, ptp)
            #pl.plot(t, min)
        elif kind == 'nmaxmin':
            max = binhP.max(axis=1)
            min = binhP.min(axis=1)
            si = (max - min) / (max + min)
            ylabel = 'MUA (max - min) / (max + min)'
            #pl.plot(t, max)
            #pl.plot(t, min)
        else:
            raise ValueError('unknown kind %r' % kind)
        if plot:
            # calculate xlim, always start from 0, add half a bin width to xmax:
            if pr:
                xlim = (0, t[-1]+lfpwidth/2)
            else:
                xlim = (0, t[-1]+width/2)
            self.si_plot(si, t, t0=t0, t1=t1, xlim=xlim, ylim=ylim, yticks=yticks,
                         ylabel=ylabel, showxlabel=showxlabel, showylabel=showylabel,
                         showtitle=showtitle, title=title,
                         reclabel=reclabel, hlines=hlines,
                         showstates=showstates, statelinepos=statelinepos, lw=lw,
                         alpha=alpha, swapaxes=swapaxes, figsize=figsize)
        #np.seterr(**old_settings) # restore old settings
        return si, t # t are midpoints of bins, offset depends on relative2t0
Exemple #43
0
 def psd(self, t0=None, t1=None, f0=0.2, f1=110, p0=None, p1=None, chanis=-1,
         width=None, tres=None, xscale='log', figsize=(5, 5)):
     """Plot power spectral density from t0 to t1 in sec, from f0 to f1 in Hz, and clip
     power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0
     uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1,
     take mean of specified chanis. width and tres are in sec."""
     uns = get_ipython().user_ns
     self.get_data()
     ts = self.get_tssec() # full set of timestamps, in sec
     if t0 == None:
         t0, t1 = ts[0], ts[-1] # full duration
     if t1 == None:
         t1 = t0 + 10 # 10 sec window
     if width == None:
         width = uns['LFPSPECGRAMWIDTH'] # sec
     if tres == None:
         tres = uns['LFPSPECGRAMTRES'] # sec
     assert tres <= width
     NFFT = intround(width * self.sampfreq)
     noverlap = intround(NFFT - tres * self.sampfreq)
     t0i, t1i = ts.searchsorted((t0, t1))
     #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
     data = self.data[:, t0i:t1i] # slice data
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     if iterable(chanis):
         data = data[chanis].mean(axis=0) # take mean of data on chanis
     else:
         data = data[chanis] # get single row of data at chanis
     #data = filter.notch(data)[0] # remove 60 Hz mains noise
     # convert data from uV to mV. I think P is in mV^2?:
     P, freqs = mpl.mlab.psd(data/1e3, NFFT=NFFT, Fs=self.sampfreq, noverlap=noverlap)
     # keep only freqs between f0 and f1:
     if f0 == None:
         f0 = freqs[0]
     if f1 == None:
         f1 = freqs[-1]
     lo, hi = freqs.searchsorted([f0, f1])
     P, freqs = P[lo:hi], freqs[lo:hi]
     # check for and replace zero power values (ostensibly due to gaps in recording)
     # before attempting to convert to dB:
     zis = np.where(P == 0.0) # row and column indices where P has zero power
     if len(zis[0]) > 0: # at least one hit
         P[zis] = np.finfo(np.float64).max # temporarily replace zeros with max float
         minnzval = P.min() # get minimum nonzero value
         P[zis] = minnzval # replace with min nonzero values
     P = 10. * np.log10(P) # convert power to dB wrt 1 mV^2?
     # for better visualization, clip power values to within (p0, p1) dB
     if p0 != None:
         P[P < p0] = p0
     if p1 != None:
         P[P > p1] = p1
     #self.P = P
     a.plot(freqs, P, 'k-')
     # add SI frequency band limits:
     LFPPRLOBAND, LFPPRHIBAND = uns['LFPPRLOBAND'], uns['LFPPRHIBAND']
     a.axvline(x=LFPPRLOBAND[0], c='r', ls='--')
     a.axvline(x=LFPPRLOBAND[1], c='r', ls='--')
     a.axvline(x=LFPPRHIBAND[0], c='b', ls='--')
     a.axvline(x=LFPPRHIBAND[1], c='b', ls='--')
     a.axis('tight')
     a.set_xscale(xscale)
     a.set_xlabel("frequency (Hz)")
     a.set_ylabel("power (dB)")
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     a.set_title(titlestr)
     a.text(0.998, 0.99, '%s' % self.r.name, color='k', transform=a.transAxes,
            horizontalalignment='right', verticalalignment='top')
     f.tight_layout(pad=0.3) # crop figure to contents
     self.f = f
     return P, freqs
Exemple #44
0
    def si(self,
           kind=None,
           chani=-1,
           width=None,
           tres=None,
           lfpwidth=None,
           lfptres=None,
           loband=None,
           hiband=None,
           plot=True,
           showstates='auto',
           statelinepos=[0.2],
           lw=4,
           alpha=1,
           relative2t0=False,
           lim2stim=False,
           showxlabel=True,
           showylabel=True,
           showtitle=True,
           title=None,
           reclabel=True,
           swapaxes=False,
           figsize=None):
        """Calculate an LFP synchrony index, using potentially overlapping windows of width
        and tres, in sec, from the LFP spectrogram, itself composed of bins of lfpwidth and
        lfptres. relative2t0 controls whether to plot relative to t0, or relative to start of
        ADC clock. lim2stim limits the time range only to when a stimulus was presented, i.e.
        to the outermost times of non-NULL din.

        Note that for power ratio methods (kind: L/(L+H) or L/H),
        width and tres are not used, only lfpwidth and lfptres. Options for kind are:

        'L/(L+H)': fraction of power in low band vs total power (Saleem2010)

        'L/H': low to highband power ratio (Li, Poo, Dan 2009)

        'cv': coefficient of variation (std / mean) of all power

        'ncv': normalized CV: (std - mean) / (std + mean)

        'nstdmed': normalized stdmed: (std - med) / (std + med)

        'n2stdmean': normalized 2stdmean: (2*std - mean) / (2*std + mean)

        'n3stdmean': normalized 3stdmean: (3*std - mean) / (3*std + mean)

        """
        uns = get_ipython().user_ns
        if kind == None:
            kind = uns['LFPSIKIND']
        if kind in ['L/(L+H)', 'L/H', 'nLH']:  # it's a power ratio measure
            pr = True
        else:
            pr = False

        data = self.get_data()
        ts = self.get_tssec()  # full set of timestamps, in sec
        t0, t1 = ts[0], ts[-1]
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        dt = t1 - t0
        if figsize == None:
            # convert from recording duration time to width in inches, 0.87 accommodates
            # padding around the SI plot:
            figwidth = (dt / 1000) * 5 + 0.87
            figheight = 2.5  # inches
            figsize = figwidth, figheight

        t0i, t1i = ts.searchsorted((t0, t1))
        x = data[chani, t0i:t1i] / 1e3  # slice data, convert from uV to mV
        x = filter.notch(x)[0]  # remove 60 Hz mains noise
        try:
            rr = self.r.e0.I['REFRESHRATE']
        except AttributeError:  # probably a recording with no experiment
            rr = 200  # assume 200 Hz refresh rate
        if rr <= 100:  # CRT was at low vertical refresh rate
            print('filtering out %d Hz from LFP in %s' %
                  (intround(rr), self.r.name))
            x = filter.notch(x, freq=rr)[0]  # remove CRT interference

        if width == None:
            width = uns['LFPSIWIDTH']  # sec
        if tres == None:
            tres = uns['LFPSITRES']  # sec
        if lfpwidth == None:
            lfpwidth = uns['LFPPRWIDTH'] if pr else uns[
                'LFPSPECGRAMWIDTH']  # sec
        if lfptres == None:
            lfptres = uns['LFPPRTRES'] if pr else uns['LFPSPECGRAMTRES']  # sec
        if loband == None:
            loband = uns['LFPPRLOBAND']
        f0, f1 = loband
        if hiband == None:
            hiband = uns['LFPPRHIBAND']
        f2, f3 = hiband

        assert lfptres <= lfpwidth
        NFFT = intround(lfpwidth * self.sampfreq)
        noverlap = intround(NFFT - lfptres * self.sampfreq)
        #print('len(x), NFFT, noverlap: %d, %d, %d' % (len(x), NFFT, noverlap))
        # t is midpoints of timebins in sec from start of data. P is in mV^2?:
        P, freqs, Pt = mpl.mlab.specgram(x,
                                         NFFT=NFFT,
                                         Fs=self.sampfreq,
                                         noverlap=noverlap)
        # don't convert power to dB, just washes out the signal in the ratio:
        #P = 10. * np.log10(P)
        if not relative2t0:
            Pt += t0  # convert t to time from start of ADC clock:
        nfreqs = len(freqs)

        # keep only freqs between f0 and f1, and f2 and f3:
        f0i, f1i, f2i, f3i = freqs.searchsorted([f0, f1, f2, f3])
        lP = P[f0i:f1i]  # nsubfreqs x nt
        hP = P[f2i:f3i]  # nsubfreqs x nt
        lP = lP.sum(axis=0)  # nt
        hP = hP.sum(axis=0)  # nt

        if pr:
            t = Pt
            ylabel = 'SI (%s)' % kind
        else:
            # potentially overlapping bin time ranges:
            trange = Pt[0], Pt[-1]
            tranges = split_tranges([trange], width, tres)  # in sec
            ntranges = len(tranges)
            tis = Pt.searchsorted(tranges)  # ntranges x 2 array
            # number of timepoints to use for each trange, almost all will be the same width:
            binnt = intround((tis[:, 1] - tis[:, 0]).mean())
            binhP = np.zeros((ntranges, binnt))  # init appropriate array
            for trangei, t0i in enumerate(tis[:, 0]):
                binhP[trangei] = hP[t0i:t0i + binnt]
            # get midpoint of each trange:
            t = tranges.mean(axis=1)

        #old_settings = np.seterr(all='ignore') # suppress div by 0 errors
        # plot power signal to be analyzed
        #self.si_plot(hP, Pt, t0=0, t1=t[-1], ylim=None, ylabel='highband power',
        #             title=lastcmd()+' highband power', text=self.r.name)

        # set some plotting defaults:
        hlines = []
        if pr:
            ylim = 0, 1
            yticks = 0, 0.2, 0.4, 0.6, 0.8, 1
        else:
            ylim = -1, 1
            yticks = -1, 0, 1
            hlines = [0]

        # calculate some metric of each column, i.e. each bin:
        if kind == 'L/(L+H)':
            si = lP / (lP + hP)
        elif kind == 'L/H':
            si = lP / hP
        elif kind == 'nLH':
            t = Pt
            si = (lP - hP) / (lP + hP)
            ylabel = 'LFP (L - H) / (L + H)'
        elif kind == 'cv':
            si = binhP.std(axis=1) / binhP.mean(axis=1)
            ylim = 0, 2
            ytiks = 0, 1, 2
            ylabel = 'LFP power CV'
        elif kind == 'ncv':
            s = binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s - mean) / (s + mean)
            ylabel = 'LFP power (std - mean) / (std + mean)'
            #pl.plot(t, s)
            #pl.plot(t, mean)
        elif kind == 'n2stdmean':
            s2 = 2 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s2 - mean) / (s2 + mean)
            ylabel = 'LFP power (2*std - mean) / (2*std + mean)'
            hlines = [-0.1, 0,
                      0.1]  # demarcate desynched and synched thresholds
            #pl.plot(t, s2)
            #pl.plot(t, mean)
        elif kind == 'n3stdmean':
            s3 = 3 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s3 - mean) / (s3 + mean)
            ylabel = 'LFP power (3*std - mean) / (3*std + mean)'
            hlines = [-0.1, 0,
                      0.1]  # demarcate desynched and synched thresholds
            #pl.plot(t, s3)
            #pl.plot(t, mean)
        elif kind == 'n4stdmean':
            s4 = 4 * binhP.std(axis=1)
            mean = binhP.mean(axis=1)
            si = (s4 - mean) / (s4 + mean)
            ylabel = 'LFP power (4*std - mean) / (4*std + mean)'
            #pl.plot(t, s4)
            #pl.plot(t, mean)
        elif kind == 'nstdmed':
            s = binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s - med) / (s + med)
            ylabel = 'LFP power (std - med) / (std + med)'
            hlines = [-0.1, 0,
                      0.1]  # demarcate desynched and synched thresholds
            #pl.plot(t, s)
            #pl.plot(t, med)
        elif kind == 'n2stdmed':
            s2 = 2 * binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s2 - med) / (s2 + med)
            ylabel = 'LFP power (2*std - med) / (2*std + med)'
            hlines = [-0.1, 0,
                      0.1]  # demarcate desynched and synched thresholds
            #pl.plot(t, s2)
            #pl.plot(t, med)
        elif kind == 'n3stdmed':
            s3 = 3 * binhP.std(axis=1)
            med = np.median(binhP, axis=1)
            si = (s3 - med) / (s3 + med)
            ylabel = 'LFP power (3*std - med) / (3*std + med)'
            hlines = [-0.1, 0,
                      0.1]  # demarcate desynched and synched thresholds
            #pl.plot(t, s3)
            #pl.plot(t, med)
        elif kind == 'nstdmin':
            s = binhP.std(axis=1)
            min = binhP.min(axis=1)
            si = (s - min) / (s + min)
            ylabel = 'LFP power (std - min) / (std + min)'
            #pl.plot(t, s)
            #pl.plot(t, min)
        elif kind == 'nmadmean':
            mean = binhP.mean(axis=1)
            mad = (np.abs(binhP - mean[:, None])).mean(axis=1)
            si = (mad - mean) / (mad + mean)
            ylabel = 'MUA (MAD - mean) / (MAD + mean)'
            #pl.plot(t, mad)
            #pl.plot(t, mean)
        elif kind == 'nmadmed':
            med = np.median(binhP, axis=1)
            mad = (np.abs(binhP - med[:, None])).mean(axis=1)
            si = (mad - med) / (mad + med)
            ylabel = 'MUA (MAD - median) / (MAD + median)'
            #pl.plot(t, mad)
            #pl.plot(t, med)
        elif kind == 'nvarmin':
            v = binhP.var(axis=1)
            min = binhP.min(axis=1)
            si = (v - min) / (v + min)
            ylabel = 'LFP power (std - min) / (std + min)'
            #pl.plot(t, v)
            #pl.plot(t, min)
        elif kind == 'nptpmean':
            ptp = binhP.ptp(axis=1)
            mean = binhP.mean(axis=1)
            si = (ptp - mean) / (ptp + mean)
            ylabel = 'MUA (ptp - mean) / (ptp + mean)'
            #pl.plot(t, ptp)
            #pl.plot(t, mean)
        elif kind == 'nptpmed':
            ptp = binhP.ptp(axis=1)
            med = np.median(binhP, axis=1)
            si = (ptp - med) / (ptp + med)
            ylabel = 'MUA (ptp - med) / (ptp + med)'
            #pl.plot(t, ptp)
            #pl.plot(t, med)
        elif kind == 'nptpmin':
            ptp = binhP.ptp(axis=1)
            min = binhP.min(axis=1)
            si = (ptp - min) / (ptp + min)
            ylabel = 'MUA (ptp - min) / (ptp + min)'
            #pl.plot(t, ptp)
            #pl.plot(t, min)
        elif kind == 'nmaxmin':
            max = binhP.max(axis=1)
            min = binhP.min(axis=1)
            si = (max - min) / (max + min)
            ylabel = 'MUA (max - min) / (max + min)'
            #pl.plot(t, max)
            #pl.plot(t, min)
        else:
            raise ValueError('unknown kind %r' % kind)
        if plot:
            # calculate xlim, always start from 0, add half a bin width to xmax:
            if pr:
                xlim = (0, t[-1] + lfpwidth / 2)
            else:
                xlim = (0, t[-1] + width / 2)
            self.si_plot(si,
                         t,
                         t0=t0,
                         t1=t1,
                         xlim=xlim,
                         ylim=ylim,
                         yticks=yticks,
                         ylabel=ylabel,
                         showxlabel=showxlabel,
                         showylabel=showylabel,
                         showtitle=showtitle,
                         title=title,
                         reclabel=reclabel,
                         hlines=hlines,
                         showstates=showstates,
                         statelinepos=statelinepos,
                         lw=lw,
                         alpha=alpha,
                         swapaxes=swapaxes,
                         figsize=figsize)
        #np.seterr(**old_settings) # restore old settings
        return si, t  # t are midpoints of bins, offset depends on relative2t0
Exemple #45
0
# sort state change recordings by their absname:
urecs = [eval(recname)
         for recname in sorted(REC2STATETRANGES)]  # unique, no reps, sorted
urecnames = ' '.join([rec.absname for rec in urecs])

sis, rels, spars = [], [], []
for rec in urecs:
    print(rec.absname)
    si, sit = rec.lfp.si(kind='L/(L+H)',
                         lfpwidth=LFPWIDTH,
                         lfptres=LFPTRES,
                         states=False,
                         relative2t0=False,
                         lim2stim=False,
                         plot=False)
    sit = intround(
        sit * 1e6)  # convert from s to us for later comparison with strange
    #nids = sorted(rec.alln)
    nids = get_responsive_nids(rec)
    print('nids:', nids)
    nn = len(nids)

    # get reliability and sparseness of PSTHs calculated from sliding window of TRIALWINWIDTH
    # trials at a time, at tres of TRIALWINTRES trials. Represent SI for each range of trials
    # by the mean over those trials:
    ttranges = rec.trialtranges()[0]
    ntrials = len(ttranges)
    # build up trial indices of sliding window of trials:
    trialiranges = core.split_tranges([(0, ntrials - 1)], TRIALWINWIDTH - 1,
                                      TRIALWINTRES)
    for (trial0i, trial1i) in trialiranges:  # iterate over all trial windows
        # get spike time range to consider for this window, from start of first trial to end
Exemple #46
0
natural scene 5s movie clip recordings with cortical state changes in them. Also plot PSTH
correlation distributions, and correlations as a function of cell pair separation. Run from
within neuropy using `run -i scripts/psthcorr_natstate.py`"""

import pylab as pl
import numpy as np
from scipy.stats import mannwhitneyu, chisquare, linregress # ttest_1samp, ttest_ind, ks_2samp

import core
from core import ceilsigfig, floorsigfig, scatterbin, intround

from psth_funcs import get_nids_psths, get_psth_peaks_gac, get_seps

BLANK = False # consider blank periods between trials?
BINW, TRES = 0.02, 0.001 # PSTH time bins, sec
BINWMS = '%dms' % intround(BINW * 1000)
GAUSS = True # calculate PSTH and single trial rates by convolving with Gaussian kernel?
if GAUSS:
    KERNEL = 'gauss'
else:
    KERNEL = 'square'
KIND = 'responsive' # which type of neurons to use? 'responsive' or 'active'
MEDIANX = 2 # PSTH median multiplier, Hz
MINTHRESH = 3 # peak detection thresh, Hz

# plotting params:
FIGSIZE = (3, 3)
PLOTRHOMATRICES = False
SHOWCOLORBAR = False # show colorbar for rho matrices?
SEPBINW = 200 # separation bin width, um
SEPMAX = 1200 # max pairwise separation, um
Exemple #47
0
            if npeaks > 0:
                rnids.append(nid)
        print() # newline
    rnids = np.unique(rnids)
    return rnids

# sort state change recordings by their absname:
urecs = [ eval(recname) for recname in sorted(REC2STATETRANGES) ] # unique, no reps, sorted
urecnames = ' '.join([rec.absname for rec in urecs])

sis, rels, spars = [], [], []
for rec in urecs:
    print(rec.absname)
    si, sit = rec.lfp.si(kind='L/(L+H)', lfpwidth=LFPWIDTH, lfptres=LFPTRES, states=False,
                         relative2t0=False, lim2stim=False, plot=False)
    sit = intround(sit * 1e6) # convert from s to us for later comparison with strange
    #nids = sorted(rec.alln)
    nids = get_responsive_nids(rec)
    print('nids:', nids)
    nn = len(nids)

    # get reliability and sparseness of PSTHs calculated from sliding window of TRIALWINWIDTH
    # trials at a time, at tres of TRIALWINTRES trials. Represent SI for each range of trials
    # by the mean over those trials:
    ttranges = rec.trialtranges()[0]
    ntrials = len(ttranges)
    # build up trial indices of sliding window of trials:
    trialiranges = core.split_tranges([(0, ntrials-1)], TRIALWINWIDTH-1, TRIALWINTRES)
    for (trial0i, trial1i) in trialiranges: # iterate over all trial windows
        # get spike time range to consider for this window, from start of first trial to end
        # of last trial in this trial range:
Exemple #48
0
plot(modelx, smodel, 'r--', lw=1, alpha=0.5)
plot(modelx, dmodel, 'b--', lw=1, alpha=0.5)
# display geometric means and percentages of response events with <= 1 spike/trial:
text(0.02, 0.984, '$\mu$ = %.1f' % smean, # synched
                  horizontalalignment='left', verticalalignment='top',
                  transform=gca().transAxes, color='r')
text(0.02, 0.904, '$\mu$ = %.1f' % dmean, # desynched
                  horizontalalignment='left', verticalalignment='top',
                  transform=gca().transAxes, color='b')
'''
text(0.98, 0.82, 'p < %.1g' % ceilsigfig(p, 1),
                 horizontalalignment='right', verticalalignment='top',
                 transform=gca().transAxes, color='k')
'''
# get percentage of response events with <= 1 spike/trial, separately for each state
spercentlte1 = intround((peaknspikes[1] <= 1).sum() / len(peaknspikes[1]) * 100)
dpercentlte1 = intround((peaknspikes[0] <= 1).sum() / len(peaknspikes[0]) * 100)
text(0.98, 0.98, '%d%% $\leq$ 1' % spercentlte1,
                 horizontalalignment='right', verticalalignment='top',
                 transform=gca().transAxes, color='r')
text(0.98, 0.90, '%d%% $\leq$ 1' % dpercentlte1,
                 horizontalalignment='right', verticalalignment='top',
                 transform=gca().transAxes, color='b')
annotate('', xy=(smean, (6/7)*ymax), xycoords='data', # synched
             xytext=(smean, ymax), textcoords='data',
             arrowprops=dict(fc='r', ec='none', width=1.3, headwidth=7, frac=0.5))
annotate('', xy=(dmean, (6/7)*ymax), xycoords='data', # desynched
             xytext=(dmean, ymax), textcoords='data',
             arrowprops=dict(fc='b', ec='none', width=1.3, headwidth=7, frac=0.5))
titlestr = 'peak nspikes log %s' % urecnames
gcfm().window.setWindowTitle(titlestr)
Exemple #49
0
    def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10,
            rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)):
        """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1.
        If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift
        (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then
        subtract that from the unshifted CCH to get the shift corrected CCH"""
        if nid1 == None:
            nid1 = nid0
        autocorr = nid0 == nid1
        n0 = self.alln[nid0]
        n1 = self.alln[nid1]
        calctrange = trange * 1000 # calculation trange, in us
        if shift:
            assert nshifts > 0
            shift *= 1000 # convert to us
            maxshift = nshifts * shift
            calctrange = trange + maxshift # expand calculated trange to encompass shifts
        calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us
        dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us
        if autocorr:
            dts = dts[dts != 0] # remove 0s for autocorr
        if shift: # calculate dts for shift corrector
            shiftis = range(-nshifts, nshifts+1)
            shiftis.remove(0) # don't shift by 0, that's the original which we'll subtract from
            shifts = np.asarray(shiftis) * shift
            shiftdts = np.hstack([ dts+s for s in shifts ]) # in us
            print('shifts =', shifts / 1000)

        if not binw:
            nbins = intround(np.sqrt(len(dts))) # good heuristic
            nbins = max(20, nbins) # enforce min nbins
            nbins = min(200, nbins) # enforce max nbins
        else:
            nbins = intround(2 * trange / binw)

        dts = dts / 1000 # in ms, converts to float64 array
        t = np.linspace(start=-trange, stop=trange, num=nbins+1, endpoint=True) # ms
        binw = t[1] - t[0] # all should be equal width, ms
        n = np.histogram(dts, bins=t, density=False)[0]
        if shift: # subtract shift corrector
            shiftdts = shiftdts / 1000 # in ms, converts to float64 array
            shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts*2)
            f = pl.figure(figsize=figsize)
            a = f.add_subplot(111)
            a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t
            a.set_xlim(t[0], t[-1])
            a.set_xlabel('spike interval (ms)')
            n -= shiftn
        if norm: # normalize and convert to float:
            n = n / n.max()
        elif rate: # normalize by binw and convert to float:
            n = n / binw
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t
        a.set_xlim(t[0], t[-1])
        a.set_xlabel('spike interval (ms)')
        if norm:
            a.set_ylabel('coincidence rate (AU)')
            a.set_yticks([0, 1])
        elif rate:
            a.set_ylabel('coincidence rate (Hz)')
        else:
            a.set_ylabel('count')
        if title:
            a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id))
        wtitlestr = lastcmd()# + ', binw=%.1f ms' % binw
        gcfm().window.setWindowTitle(wtitlestr)
        f.tight_layout(pad=0.3) # crop figure to contents
Exemple #50
0
    def specgram(self,
                 t0=None,
                 t1=None,
                 f0=0.1,
                 f1=100,
                 p0=-60,
                 p1=None,
                 chanis=-1,
                 width=None,
                 tres=None,
                 cm='jet',
                 colorbar=False,
                 showstates=False,
                 lw=4,
                 alpha=1,
                 relative2t0=False,
                 lim2stim=False,
                 title=True,
                 reclabel=True,
                 swapaxes=False,
                 figsize=None):
        """Plot a spectrogram from t0 to t1 in sec, from f0 to f1 in Hz, and clip power values
        from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0 uses most
        superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1, take mean of
        specified chanis. width and tres are in sec. As an alternative to cm.jet (the
        default), cm.gray, cm.hsv cm.terrain, and cm.cubehelix_r colormaps seem to bring out
        the most structure in the spectrogram. showstates controls whether to plot lines
        demarcating desynchronized and synchronized periods. relative2t0 controls whether to
        plot relative to t0, or relative to start of ADC clock. lim2stim limits the time range
        only to when a stimulus was on screen, i.e. to the outermost times of non-NULL din"""
        uns = get_ipython().user_ns
        self.get_data()
        ts = self.get_tssec()  # full set of timestamps, in sec
        if t0 == None:
            t0, t1 = ts[0], ts[-1]  # full duration
        if t1 == None:
            t1 = t0 + 10  # 10 sec window
        if lim2stim:
            t0, t1 = self.apply_lim2stim(t0, t1)
        dt = t1 - t0
        if width == None:
            width = uns['LFPSPECGRAMWIDTH']  # sec
        if tres == None:
            tres = uns['LFPSPECGRAMTRES']  # sec
        assert tres <= width
        NFFT = intround(width * self.sampfreq)
        noverlap = intround(NFFT - tres * self.sampfreq)
        t0i, t1i = ts.searchsorted((t0, t1))
        #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
        data = self.data[:, t0i:t1i]  # slice data
        if figsize == None:
            # convert from recording duration time to width in inches, 0.87 accommodates
            # padding around the specgram:
            figwidth = (dt / 1000) * 5 + 0.87
            figheight = 2.5  # inches
            figsize = figwidth, figheight
        f = pl.figure(figsize=figsize)
        a = f.add_subplot(111)
        if iterable(chanis):
            data = data[chanis].mean(axis=0)  # take mean of data on chanis
        else:
            data = data[chanis]  # get single row of data at chanis
        #data = filter.notch(data)[0] # remove 60 Hz mains noise
        # convert data from uV to mV, returned t is midpoints of time bins in sec from
        # start of data. I think P is in mV^2?:
        P, freqs, t = mpl.mlab.specgram(data / 1e3,
                                        NFFT=NFFT,
                                        Fs=self.sampfreq,
                                        noverlap=noverlap)
        if not relative2t0:
            t += t0  # convert t to time from start of ADC clock:
        # keep only freqs between f0 and f1:
        if f0 == None:
            f0 = freqs[0]
        if f1 == None:
            f1 = freqs[-1]
        df = f1 - f0
        lo, hi = freqs.searchsorted([f0, f1])
        P, freqs = P[lo:hi], freqs[lo:hi]
        # check for and replace zero power values (ostensibly due to gaps in recording)
        # before attempting to convert to dB:
        zis = np.where(
            P == 0.0)  # row and column indices where P has zero power
        if len(zis[0]) > 0:  # at least one hit
            P[zis] = np.finfo(
                np.float64).max  # temporarily replace zeros with max float
            minnzval = P.min()  # get minimum nonzero value
            P[zis] = minnzval  # replace with min nonzero values
        P = 10. * np.log10(P)  # convert power to dB wrt 1 mV^2?
        # for better visualization, clip power values to within (p0, p1) dB
        if p0 != None:
            P[P < p0] = p0
        if p1 != None:
            P[P > p1] = p1
        #self.P = P

        # plot horizontal bars over time demarcating different ranges of SI values,
        # or manually defined desynched and synched periods:
        statelinepos = f0 - df * 0.015  # plot horizontal bars just below x axis
        if showstates:
            if showstates in [True, 'auto']:
                print(
                    "TODO: there's an offset plotting bug for 'auto', compare with 'manual'"
                )
                si, t = self.si(plot=False)
                stranges, states = self.si_split(si, t)  # sec
                STATECOLOURS = uns['LFPPRBINCOLOURS']
            elif showstates == 'manual':
                stranges, states = [], []
                for state in uns['MANUALSTATES']:
                    for strange in uns['REC2STATE2TRANGES'][
                            self.r.absname][state]:
                        stranges.append(strange)
                        states.append(state)
                stranges = np.vstack(stranges)  # 2D array
                STATECOLOURS = uns['MANUALSTATECOLOURS']
            else:
                raise ValueError('invalid value showstates=%r' % showstates)
            # clip stranges to t0, t1:
            stranges[0, 0] = max(stranges[0, 0], t0)
            stranges[-1, 1] = min(stranges[-1, 1], t1)
            if swapaxes:
                lines = a.vlines
            else:
                lines = a.hlines
            for strange, state in zip(stranges, states):
                clr = STATECOLOURS[state]
                lines(statelinepos,
                      strange[0],
                      strange[1],
                      colors=clr,
                      lw=lw,
                      alpha=alpha,
                      clip_on=False)

        # Label far left, right, top and bottom edges of imshow image. imshow interpolates
        # between these to place the axes ticks. Time limits are
        # set from midpoints of specgram time bins
        extent = t[0], t[-1], freqs[0], freqs[-1]
        #print('specgram extent: %r' % (extent,))
        # flip P vertically for compatibility with imshow:
        im = a.imshow(P[::-1], extent=extent, cmap=cm)
        a.autoscale(enable=True, tight=True)
        a.axis('tight')
        # depending on relative2t0 above, x=0 represents either t0 or time ADC clock started:
        a.set_xlim(xmin=0, xmax=t[-1])
        a.set_ylim(ymin=freqs[0], ymax=freqs[-1])
        # turn off annoying "+2.41e3" type offset on x axis:
        formatter = mpl.ticker.ScalarFormatter(useOffset=False)
        a.xaxis.set_major_formatter(formatter)
        a.set_xlabel("time (s)")
        a.set_ylabel("frequency (Hz)")
        titlestr = lastcmd()
        gcfm().window.setWindowTitle(titlestr)
        if title:
            a.set_title(titlestr)
        if reclabel:
            a.text(0.994,
                   0.95,
                   '%s' % self.r.absname,
                   color='w',
                   transform=a.transAxes,
                   horizontalalignment='right',
                   verticalalignment='top')
        f.tight_layout(pad=0.3)  # crop figure to contents
        if colorbar:
            f.colorbar(
                im,
                pad=0)  # creates big whitespace to the right for some reason
        self.f = f
        return P, freqs, t
Exemple #51
0
    def loadptc15exp(self):
        ## TODO: - fake a .e dimstim.Experiment object, to replace what used to be the
        ## .stims object for movie experiments
        '''           - self.movie = self.experiment.stims[0]
                - need to convert sweeptimeMsec to sweepSec
                   - assert len(self.experiment.stims) == 1
                   - self.movie = self.experiment.stims[0]
                   - self.movie.load() # ensure the movie's data is loaded

            if self.movie.oname == 'mseq32':
                frameis = frameis[frameis != 65535] # remove all occurences of 65535
            elif self.movie.oname == 'mseq16':
                frameis = frameis[frameis != 16383] # remove all occurences of 16383
        '''
        # Add .static and .dynamic params to fake dimstim experiment
        self.e = dictattr()
        self.I = dictattr()  # fake InternalParams object
        self.e.static = dictattr()  # fake StaticParams object
        self.e.dynamic = dictattr()  # fake DynamicParams object
        # maps ptc15 param names to dimstim 0.16 param types and names, wherever possible
        ## TODO: fill in params for experiment types other than Movie??
        _15to16 = {
            'EYE': ('I', 'EYE'),
            'PIXPERCM': ('I', 'PIXPERCM'),
            'REFRESHRATE': ('I', 'REFRESHRATE'),
            'SCREENDISTANCECM': ('I', 'SCREENDISTANCECM'),
            'SCREENHEIGHT': ('I', 'SCREENHEIGHT'),
            'SCREENHEIGHTCM': ('I', 'SCREENHEIGHTCM'),
            'SCREENWIDTH': ('I', 'SCREENWIDTH'),
            'SCREENWIDTHCM': ('I', 'SCREENWIDTHCM'),
            'fname': ('static', 'fname'),
            'preexpSec': ('static', 'preexpSec'),
            'postexpSec': ('static', 'postexpSec'),
            'orioff': ('static', 'orioff'),
            'regionwidthDeg': ('static', 'widthDeg'),
            'regionheightDeg': ('static', 'heightDeg'),
            'mask': ('static', 'mask'),
            'diameterDeg': ('static', 'diameterDeg'),
            'GAMMA': ('static', 'gamma'),
            'framei': ('dynamic', 'framei'),
            'ori': ('dynamic', 'ori'),
            'polarity': ('dynamic', 'invert'),
            'bgbrightness': ('dynamic', 'bgbrightness'),
            'sweeptimeMsec': ('dynamic', 'sweepSec'),
            'postsweepMsec': ('dynamic', 'postsweepSec'),
        }

        # collect any ptc15 movie attribs and add them to self.oldparams
        try:
            # can't really handle more than 1 movie, since dimstim 0.16 doesn't
            assert len(np.unique(self.oldparams.playlist)) == 1
            # bind it, movie was the only possible stim object anyway in ptc15
            self.movie = self.oldparams.playlist[0]
            # returns dict of name:val pair attribs excluding __ and methods:
            movieparams = self.oldparams[self.movie.oname].__dict__
            self.oldparams.update(movieparams)
        except AttributeError:
            # no playlist, no movies, and therefore no movie attribs to deal with
            pass

        # convert ptc15 params to dimstim 0.16
        for oldname, val in self.oldparams.items():
            if 'msec' in oldname.lower():
                val = val / 1000.  # convert to sec
            elif oldname == 'polarity':
                val = bool(val)  # convert from 0/1 to boolean
            if oldname == 'origDeg':  # split old origDeg into new separate xposDeg and yposDeg
                self.e.dynamic.xposDeg = val[0]
                self.e.dynamic.yposDeg = val[1]
            else:
                try:
                    paramtype, newname = _15to16[oldname]
                    if paramtype == 'I':
                        # bind InternalParams directly to self, not to self.e:
                        self.I[newname] = val
                    self.e[paramtype][newname] = val
                except KeyError:  # oldname doesn't have a newname equivalent
                    pass

        try:
            m = self.movie
        except AttributeError:
            m = None

        if m:
            # make fake dimstim experiment a ptc15 Movie object, bind all of the attribs of
            # the existing fake dimstim experiment
            old_e = self.e
            self.e = m
            for name, val in old_e.__dict__.items():
                # bind each variable in the textheader as an attrib of self
                self.e.__setattr__(name, val)
            # deal with movie filename:
            # didn't have a chance to pass this exp as the parent in the movie init,
            # so just set the attribute manually:
            m.e = self
            # if fname refers to a movie whose local name is different, rename it to match
            # the local movie name
            old2new = {'mseq16.m': 'MSEQ16', 'mseq32.m': 'MSEQ32'}
            try:
                m.fname = old2new[m.fname]
            except KeyError:
                pass  # old name not in old2new, leave it be
            self.e.static.fname = m.fname  # update fake dimstim experiment's fname too
            # extensionless fname, fname should've been defined in the textheader
            m.name = os.path.splitext(m.fname)[0]
            uns = get_ipython().user_ns
            if m.name not in uns['MOVIES']:
                # and it very well may not be, cuz the textheader inits movies with no args,
                # leaving fname==None at first, which prevents it from being added to
                # MOVIES
                uns['MOVIES'][m.name] = m  # add m to MOVIES dictattr
            # Search self.e.moviepath string (from textheader) for 'Movies' word. Everything
            # after that is the relative path to your base movies folder. Eg, if
            # self.e.moviepath = 'C:\\Desktop\\Movies\\reliability\\e\\single\\', then set
            # self.e.relpath = '\\reliability\\e\\single\\'
            spath = self.oldparams.moviepath.split(
                '\\')  # ptc15 has purely MS path separators
            matchi = spath.index('Movies')
            relpath = joinpath(spath[matchi + 1::])
            MOVIEPATH = get_ipython().user_ns['MOVIEPATH']
            path = os.path.join(MOVIEPATH, relpath)
            m.fname = os.path.join(path, m.fname)
            self.e.static.fname = m.fname  # update
        try:
            self.REFRESHTIME = intround(1 / float(self.oldparams.REFRESHRATE) *
                                        1000000)  # us
        except AttributeError:
            pass
Exemple #52
0
 def psd(self,
         t0=None,
         t1=None,
         f0=0.2,
         f1=110,
         p0=None,
         p1=None,
         chanis=-1,
         width=None,
         tres=None,
         xscale='log',
         figsize=(5, 5)):
     """Plot power spectral density from t0 to t1 in sec, from f0 to f1 in Hz, and clip
     power values from p0 to p1 in dB, based on channel index chani of LFP data. chanis=0
     uses most superficial channel, chanis=-1 uses deepest channel. If len(chanis) > 1,
     take mean of specified chanis. width and tres are in sec."""
     uns = get_ipython().user_ns
     self.get_data()
     ts = self.get_tssec()  # full set of timestamps, in sec
     if t0 == None:
         t0, t1 = ts[0], ts[-1]  # full duration
     if t1 == None:
         t1 = t0 + 10  # 10 sec window
     if width == None:
         width = uns['LFPSPECGRAMWIDTH']  # sec
     if tres == None:
         tres = uns['LFPSPECGRAMTRES']  # sec
     assert tres <= width
     NFFT = intround(width * self.sampfreq)
     noverlap = intround(NFFT - tres * self.sampfreq)
     t0i, t1i = ts.searchsorted((t0, t1))
     #ts = ts[t0i:t1i] # constrained set of timestamps, in sec
     data = self.data[:, t0i:t1i]  # slice data
     f = pl.figure(figsize=figsize)
     a = f.add_subplot(111)
     if iterable(chanis):
         data = data[chanis].mean(axis=0)  # take mean of data on chanis
     else:
         data = data[chanis]  # get single row of data at chanis
     #data = filter.notch(data)[0] # remove 60 Hz mains noise
     # convert data from uV to mV. I think P is in mV^2?:
     P, freqs = mpl.mlab.psd(data / 1e3,
                             NFFT=NFFT,
                             Fs=self.sampfreq,
                             noverlap=noverlap)
     # keep only freqs between f0 and f1:
     if f0 == None:
         f0 = freqs[0]
     if f1 == None:
         f1 = freqs[-1]
     lo, hi = freqs.searchsorted([f0, f1])
     P, freqs = P[lo:hi], freqs[lo:hi]
     # check for and replace zero power values (ostensibly due to gaps in recording)
     # before attempting to convert to dB:
     zis = np.where(
         P == 0.0)  # row and column indices where P has zero power
     if len(zis[0]) > 0:  # at least one hit
         P[zis] = np.finfo(
             np.float64).max  # temporarily replace zeros with max float
         minnzval = P.min()  # get minimum nonzero value
         P[zis] = minnzval  # replace with min nonzero values
     P = 10. * np.log10(P)  # convert power to dB wrt 1 mV^2?
     # for better visualization, clip power values to within (p0, p1) dB
     if p0 != None:
         P[P < p0] = p0
     if p1 != None:
         P[P > p1] = p1
     #self.P = P
     a.plot(freqs, P, 'k-')
     # add SI frequency band limits:
     LFPPRLOBAND, LFPPRHIBAND = uns['LFPPRLOBAND'], uns['LFPPRHIBAND']
     a.axvline(x=LFPPRLOBAND[0], c='r', ls='--')
     a.axvline(x=LFPPRLOBAND[1], c='r', ls='--')
     a.axvline(x=LFPPRHIBAND[0], c='b', ls='--')
     a.axvline(x=LFPPRHIBAND[1], c='b', ls='--')
     a.axis('tight')
     a.set_xscale(xscale)
     a.set_xlabel("frequency (Hz)")
     a.set_ylabel("power (dB)")
     titlestr = lastcmd()
     gcfm().window.setWindowTitle(titlestr)
     a.set_title(titlestr)
     a.text(0.998,
            0.99,
            '%s' % self.r.name,
            color='k',
            transform=a.transAxes,
            horizontalalignment='right',
            verticalalignment='top')
     f.tight_layout(pad=0.3)  # crop figure to contents
     self.f = f
     return P, freqs
Exemple #53
0
    def load_din(self):
        self.din = np.fromfile(self.path,
                               dtype=np.int64).reshape(-1,
                                                       2)  # reshape to 2 cols
        # look for a .textheader:
        try:
            txthdrpath = rstrip(self.path, '.din') + '.textheader'
            f = open(txthdrpath, 'rU')  # use universal newline support
            self.textheader = f.read()  # read it all in
            f.close()
        except IOError:
            print("WARNING: couldn't load text header associated with '%s'" %
                  self.name)
            self.textheader = ''  # set to empty

        if self.textheader != '':
            # comment out all lines starting with "from dimstim"
            self.textheader = self.textheader.replace('from dimstim',
                                                      '#from dimstim')
            # execute any remaining 'import' lines first, so that any modules imported
            # aren't artefactually detected as having been added to the namespace:
            for line in self.textheader.splitlines():
                if line.startswith('import'):
                    exec(line)
            thns = {}  # textheader namespace
            # compiling to code and then executing it is supposed to be faster than directly
            # executing a string, according to
            # http://lucumr.pocoo.org/2011/2/1/exec-in-python/, but doesn't seem to make
            # a difference here:
            code = compile(self.textheader, "<string>", "exec")
            # don't exec in current namespace, load name:val pairs into thns instead:
            exec(code, None, thns)
            try:
                # dimstim up to ptc15 didn't have a version, neither did NVS display
                self.__version__ = thns['__version__']  # a string
            except KeyError:
                self.__version__ = '0.0'
            if float(self.__version__
                     ) >= 0.16:  # after major refactoring of dimstim
                for name, val in thns.items():
                    # bind each variable in the textheader as an attrib of self
                    self.__setattr__(name, val)
                # this doesn't work for textheaders generated by dimstim 0.16, since
                # xorigDeg and yorigDeg were accidentally omitted from all the experiment
                # scripts and hence the textheaders too:
                '''
                self.e.xorig = deg2pix(self.e.static.xorigDeg, self.I) + self.I.SCREENWIDTH / 2
                self.e.yorig = deg2pix(self.e.static.yorigDeg, self.I) + self.I.SCREENHEIGHT / 2
                '''
                self.REFRESHTIME = intround(1 / float(self.I.REFRESHRATE) *
                                            1000000)  # us
                # prevent replication of movie frame data in memory
                if type(self.e) == Movie:
                    fname = os.path.split(
                        self.e.static.fname)[-1]  # pathless fname
                    uns = get_ipython().user_ns
                    if fname not in uns['MOVIES']:
                        # add movie experiment, indexed according to movie data file name,
                        # to prevent from ever loading its frames more than once
                        uns['MOVIES'][fname] = self.e
            else:
                self.oldparams = dictattr()
                for name, val in thns.items():
                    # bind each variable in the textheader to oldparams
                    self.oldparams[name] = val
                self.loadptc15exp()
        else:
            # use the time difference between the first two din instead
            self.REFRESHTIME = self.din[1, 0] - self.din[0, 0]

        # add an extra refresh time after last din, that's when screen actually turns off
        self.trange = (self.din[0, 0], self.din[-1, 0] + self.REFRESHTIME)
    screenheight = e0.I['SCREENHEIGHTCM'] * e0.I['DEGPERCM']
    halfscreenwidth, halfscreenheight = screenwidth/2, screenheight/2
    moviewidth, movieheight = e0.s.widthDeg, e0.s.heightDeg # deg
    halfmoviewidth, halfmovieheight = moviewidth/2, movieheight/2
    # manbar center relative to screen center:
    try:
        xorigDeg, yorigDeg = e0.s.xorigDeg, e0.s.yorigDeg
    except AttributeError:
        xorigDeg, yorigDeg = ORIGDEGS[name]
    # movie center position, wrt screen center, deg:
    xpos, ypos = e0.d.xposDeg+xorigDeg, e0.d.yposDeg+yorigDeg
    # x and y indices into frames, spanning range of movie pixels that were on-screen,
    # assumes (x,y) origin of each movie frame is at bottom left:
    mvicenter_wrt_leftscredge = halfscreenwidth + xpos
    leftscredge_wrt_leftmviedge = halfmoviewidth - mvicenter_wrt_leftscredge
    x0i = intround(leftscredge_wrt_leftmviedge / degpermoviepix)
    x1i = intround((leftscredge_wrt_leftmviedge + screenwidth) / degpermoviepix)
    mvicenter_wrt_bottomscredge = halfscreenheight + ypos
    bottomscredge_wrt_bottommviedge = halfmovieheight - mvicenter_wrt_bottomscredge
    y0i = intround(bottomscredge_wrt_bottommviedge / degpermoviepix)
    y1i = intround((bottomscredge_wrt_bottommviedge + screenheight) / degpermoviepix)
    frames = frames[:, y0i:y1i, x0i:x1i]
    print('xis: %d:%d, yis: %d:%d' % (y0i, y1i, x0i, x1i))
    print('movie shape:', frames.shape)

    # optic flow magnitudes, in deg/sec, one per frame interval:
    mot[name] = np.zeros(nframeintervals)
    con[name] = np.zeros(nframeintervals)
    dcon[name] = np.zeros(nframeintervals)
    lum[name] = np.zeros(nframeintervals)
    dlum[name] = np.zeros(nframeintervals)
    color='r')
text(
    0.02,
    0.904,
    '$\mu$ = %.1f' % dmean,  # desynched
    horizontalalignment='left',
    verticalalignment='top',
    transform=gca().transAxes,
    color='b')
'''
text(0.98, 0.82, 'p < %.1g' % ceilsigfig(p, 1),
                 horizontalalignment='right', verticalalignment='top',
                 transform=gca().transAxes, color='k')
'''
# get percentage of response events with <= 1 spike/trial, separately for each state
spercentlte1 = intround(
    (peaknspikes[1] <= 1).sum() / len(peaknspikes[1]) * 100)
dpercentlte1 = intround(
    (peaknspikes[0] <= 1).sum() / len(peaknspikes[0]) * 100)
text(0.98,
     0.98,
     '%d%% $\leq$ 1' % spercentlte1,
     horizontalalignment='right',
     verticalalignment='top',
     transform=gca().transAxes,
     color='r')
text(0.98,
     0.90,
     '%d%% $\leq$ 1' % dpercentlte1,
     horizontalalignment='right',
     verticalalignment='top',
     transform=gca().transAxes,