コード例 #1
0
 def load(self):
     with open(self.fname, 'rb') as f:
         d = np.load(f)
         stdnames = [
             'chanpos', 'chans', 'data', 't0', 't1', 'tres', 'uVperAD'
         ]
         optnames = ['chan0', 'probename']
         # bind standard array names in .lfp.zip file to self:
         for key in stdnames:
             assert key in d
             val = d[key]
             # pull some singleton vals out of their arrays:
             if key in ['t0', 't1', 'tres']:  # should all be us
                 val = int(val)
             elif key == 'uVperAD':
                 val = float(val)
             self.__setattr__(key, val)
         # bind optional array names in .lfp.zip file to self:
         for key in optnames:
             if key in d:
                 val = d[key]
                 # pull some singleton vals out of their arrays:
                 if key == 'chan0':
                     val = int(val)
                 elif key == 'probename':
                     val = val.tostring().decode(
                     )  # convert from bytes to py3 unicode str
                 self.__setattr__(key, val)
     try:
         self.chan0
     except AttributeError:  # try and figure out base of channel numbering
         nchans, nprobechans = len(self.chans), len(self.chanpos)
         if nchans < nprobechans:
             # it's probably from a .srf recording with only a subset of chans selected for
             # separate analog LFP recording
             self.chan0 = 0  # base of channel numbering, always 0-based for .srf recordings
             print(
                 "Found %d LFP channels, assuming 0-based channel numbering from .srf "
                 "recording" % nchans)
         elif nchans == nprobechans:  # all probe channels have LFP
             self.chan0 = min(self.chans)  # base of channel numbering
         else:  # nchans > nprobechans
             raise ValueError(
                 "don't know how to handle nchans=%d > nprobechans=%d" %
                 (nchans, nprobechans))
     assert self.chan0 in [0, 1]  # either 0- or 1-based
     # make sure chans are in vertical spatial order:
     ypos = self.chanpos[self.chans - self.chan0][:, 1]
     if not issorted(ypos):
         print("LFP chans in %s aren't sorted by depth, sorting them now" %
               self.fname)
         sortis = ypos.argsort()
         self.chans = self.chans[sortis]
         self.data = self.data[sortis]
         newypos = self.chanpos[self.chans - self.chan0][:, 1]
         assert issorted(newypos)
     self.sampfreq = intround(1e6 / self.tres)  # in Hz
     assert self.sampfreq == 1000  # should be 1000 Hz
     self.data = self.data * self.uVperAD  # convert to float uV
     self.UV2UM = 0.05  # transforms LFP voltage in uV to position in um
コード例 #2
0
ファイル: lfp.py プロジェクト: neuropy/neuropy
 def load(self):
     with open(self.fname, 'rb') as f:
         d = np.load(f)
         stdnames = ['chanpos', 'chans', 'data', 't0', 't1', 'tres', 'uVperAD']
         optnames = ['chan0', 'probename']
         # bind standard array names in .lfp.zip file to self:
         for key in stdnames:
             assert key in d
             val = d[key]
             # pull some singleton vals out of their arrays:
             if key in ['t0', 't1', 'tres']: # should all be us
                 val = int(val)
             elif key == 'uVperAD':
                 val = float(val)
             self.__setattr__(key, val)
         # bind optional array names in .lfp.zip file to self:
         for key in optnames:
             if key in d:
                 val = d[key]
                 # pull some singleton vals out of their arrays:
                 if key == 'chan0':
                     val = int(val)
                 elif key == 'probename':
                     val = val.tostring().decode() # convert from bytes to py3 unicode str
                 self.__setattr__(key, val)
     try:
         self.chan0
     except AttributeError: # try and figure out base of channel numbering
         nchans, nprobechans = len(self.chans), len(self.chanpos)
         if nchans < nprobechans:
             # it's probably from a .srf recording with only a subset of chans selected for
             # separate analog LFP recording
             self.chan0 = 0 # base of channel numbering, always 0-based for .srf recordings
             print("Found %d LFP channels, assuming 0-based channel numbering from .srf "
                   "recording" % nchans)
         elif nchans == nprobechans: # all probe channels have LFP
             self.chan0 = min(self.chans) # base of channel numbering
         else: # nchans > nprobechans
             raise ValueError("don't know how to handle nchans=%d > nprobechans=%d" %
                              (nchans, nprobechans))
     assert self.chan0 in [0, 1] # either 0- or 1-based
     # make sure chans are in vertical spatial order:
     ypos = self.chanpos[self.chans - self.chan0][:, 1]
     if not issorted(ypos):
         print("LFP chans in %s aren't sorted by depth, sorting them now" % self.fname)
         sortis = ypos.argsort()
         self.chans = self.chans[sortis]
         self.data = self.data[sortis]
         newypos = self.chanpos[self.chans - self.chan0][:, 1]
         assert issorted(newypos)
     self.sampfreq = intround(1e6 / self.tres) # in Hz
     assert self.sampfreq == 1000 # should be 1000 Hz
     self.data = self.data * self.uVperAD # convert to float uV
     self.UV2UM = 0.05 # transforms LFP voltage in uV to position in um
コード例 #3
0
ファイル: surf.py プロジェクト: nhazar/spyke
 def _verifyParsing(self):
     """Make sure timestamps of all records are in causal (increasing)
     order. If not, sort them"""
     for attrname, attr in self.__dict__.items():
         if attrname.endswith('records') and iterable(attr):
             ts = get_record_timestamps(attr)
             if not issorted(ts):
                 print('sorting %s' % attrname)
                 if type(attr) == list:
                     attr = list(np.asarray(attr)[ts.argsort()])
                 else:
                     attr = attr[ts.argsort()]
                 ts = get_record_timestamps(attr)
                 assert issorted(ts)
                 self.__dict__[attrname] = attr  # update
コード例 #4
0
ファイル: surf.py プロジェクト: nhazar/spyke
 def _verifyParsing(self):
     """Make sure timestamps of all records are in causal (increasing)
     order. If not, sort them"""
     for attrname, attr in self.__dict__.items():
         if attrname.endswith('records') and iterable(attr):
             ts = get_record_timestamps(attr)
             if not issorted(ts):
                 print('sorting %s' % attrname)
                 if type(attr) == list:
                     attr = list(np.asarray(attr)[ts.argsort()])
                 else:
                     attr = attr[ts.argsort()]
                 ts = get_record_timestamps(attr)
                 assert issorted(ts)
                 self.__dict__[attrname] = attr # update
コード例 #5
0
 def load(self):
     with open(self.fname, 'rb') as f:
         d = np.load(f)
         assert sorted(d.keys()) == ['chanpos', 'chans', 'data', 't0', 't1', 'tres',
                                     'uVperAD']
         # bind arrays in .lfp.zip file to self:
         for key, val in d.iteritems():
             # pull some singleton vals out of their arrays:
             if key in ['t0', 't1', 'tres']: # should all be us
                 val = int(val)
             elif key == 'uVperAD':
                 val = float(val)
             self.__setattr__(key, val)
     # make sure chans are in vertical spatial order:
     assert issorted(self.chanpos[self.chans][1])
     self.sampfreq = intround(1e6 / self.tres) # in Hz
     assert self.sampfreq == 1000 # should be 1000 Hz
     self.data = self.data * self.uVperAD # convert to float uV
     self.UV2UM = 0.05 # transforms LFP voltage in uV to position in um
コード例 #6
0
    def detect(self):
        """Search for spikes. Divides large searches into more manageable
        blocks of (slightly overlapping) multichannel waveform data, and
        then combines the results"""
        self.calc_chans()
        sort = self.sort
        self.mpmethod = MPMETHOD
        spikewidth = (sort.tw[1] - sort.tw[0]) / 1000000 # sec
        # num timepoints to allocate per spike:
        self.maxnt = int(sort.stream.sampfreq * spikewidth)

        print('Detection trange: %r' % (self.trange,))

        t0 = time.time()
        # convert from numpy.int64 to normal int for inline C:
        self.dti = int(self.dt // sort.stream.tres)
        self.thresh = self.get_thresh() # abs, in AD units, one per chan in self.chans
        self.ppthresh = np.int16(np.round(self.thresh * self.ppthreshmult)) # abs, in AD units
        AD2uV = sort.converter.AD2uV
        info('thresh calcs took %.3f sec' % (time.time()-t0))
        info('thresh   = %s' % AD2uV(self.thresh))
        info('ppthresh = %s' % AD2uV(self.ppthresh))

        bs = self.blocksize
        bx = self.blockexcess
        blockranges = self.get_blockranges(bs, bx)
        nblocks = len(blockranges)

        self.nchans = len(self.chans) # number of enabled chans
        # total num spikes found across all chans so far by this Detector,
        # reset at start of every search:
        self.nspikes = 0

        # want an nchan*2 array of [chani, x/ycoord]
        xycoords = [ self.enabledSiteLoc[chan] for chan in self.chans ] # (x, y) in chan order
        xcoords = np.asarray([ xycoord[0] for xycoord in xycoords ])
        ycoords = np.asarray([ xycoord[1] for xycoord in xycoords ])
        self.siteloc = np.asarray([xcoords, ycoords]).T # index into with chani to get (x, y)

        # prevent out of memory errors due to copying of large stream.wavedata array
        # when spawning multiple processes
        if type(self.sort.stream) == core.TSFStream:
            self.mpmethod = 'singleprocess'

        ncores = mp.cpu_count()
        t0 = time.time()

        # mp.Pool is slightly faster than my own DetectionProcess
        if not DEBUG and self.mpmethod == 'pool': # use a pool of processes
            nprocesses = min(ncores, nblocks)
            # send pickled copy of self to each process
            pool = mp.Pool(nprocesses, initializer, (self,))
            results = pool.map(callsearchblock, blockranges, chunksize=1)
            pool.close()
            # results is a list of (spikes, wavedata) tuples, and needs to be unzipped
            spikes, wavedata = zip(*results)
        elif not DEBUG and self.mpmethod == 'detectionprocess':
            nprocesses = min(ncores, nblocks)
            dps = []
            q = mp.Queue()
            spikes = [None] * nblocks
            wavedata = [None] * nblocks
            for dpi in range(nprocesses):
                dp = DetectionProcess()
                # not exactly sure why, but deepcopy is crucial to prevent artefactual spikes!
                dp.detector = deepcopy(self)
                dp.detector.sort.stream.open()
                dp.blockis = range(dpi, nblocks, nprocesses)
                dp.blockranges = blockranges[dp.blockis]
                dp.q = q
                dp.start()
                dps.append(dp)
            for i in range(nblocks):
                #blocki, blockspikes, blockwavedata = dp.q.get() # defaults to block=True
                blocki, blockspikes, blockwavedata = _eintr_retry_call(dp.q.get)
                #print('got block %d results' % blocki)
                spikes[blocki] = blockspikes
                wavedata[blocki] = blockwavedata
            for dp in dps:
                dp.join()
                #_eintr_retry_call(dp.join) # eintr isn't raised anymore it seems
        else: # use a single process, useful for debugging or for .tsf files
            spikes = []
            wavedata = []
            for blockrange in blockranges:
                blockspikes, blockwavedata = self.searchblock(blockrange)
                spikes.append(blockspikes)
                wavedata.append(blockwavedata)

        spikes = concatenate_destroy(spikes)
        wavedata = concatenate_destroy(wavedata) # along sid axis, other dims are identical
        self.nspikes = len(spikes)
        assert len(wavedata) == self.nspikes
        # default -1 indicates no nid is set as of yet, reserve 0 for actual ids
        spikes['nid'] = 0
        info('\nfound %d spikes in total' % self.nspikes)
        info('inside .detect() took %.3f sec' % (time.time()-t0))
        if not issorted(spikes['t']):
            raise RuntimeError("spikes aren't sorted for some reason")
        spikes['id'] = np.arange(self.nspikes) # assign ids (should be in temporal order)
        self.datetime = datetime.datetime.now()
        return spikes, wavedata