コード例 #1
0
    def __init__(self, fs, trackfname, kind='highpass', filtmeth=None,
                 sampfreq=None, shcorrect=None):
        # to prevent pickling problems, don't bind fs
        self.fname = trackfname
        self.kind = kind
        streams = []
        self.streams = streams # bind right away so setting sampfreq and shcorrect will work
        # collect appropriate streams from fs
        if kind == 'highpass':
            for f in fs:
                streams.append(f.hpstream)
        elif kind == 'lowpass':
            for f in fs:
                streams.append(f.lpstream)
        else: raise ValueError('Unknown stream kind %r' % kind)

        datetimes = [stream.datetime for stream in streams]
        if not (np.diff(datetimes) >= timedelta(0)).all():
            raise RuntimeError("files aren't in temporal order")

        """Generate tranges, an array of all the contiguous data ranges in all the
        streams in self. These are relative to the start of acquisition (t=0) in the first
        stream. Also generate streamtranges, an array of each stream's t0 and t1"""
        tranges = []
        streamtranges = []
        for stream in streams:
            td = stream.datetime - datetimes[0] # time delta between stream i and stream 0
            for trange in stream.tranges:
                t0 = td2usec(td + timedelta(microseconds=int(trange[0])))
                t1 = td2usec(td + timedelta(microseconds=int(trange[1])))
                tranges.append([t0, t1])
            streamt0 = td2usec(td + timedelta(microseconds=int(stream.t0)))
            streamt1 = td2usec(td + timedelta(microseconds=int(stream.t1)))
            streamtranges.append([streamt0, streamt1])
        self.tranges = np.int64(tranges)
        self.streamtranges = np.int64(streamtranges)
        self.t0 = self.streamtranges[0, 0]
        self.t1 = self.streamtranges[-1, 1]

        try: self.layout = streams[0].layout # assume they're identical
        except AttributeError: pass
        try:
            gains = np.asarray([ stream.converter.intgain for stream in streams ])
        except AttributeError:
            gains = np.asarray([ stream.converter.AD2uVx for stream in streams ])
        if max(gains) != min(gains):
            import pdb; pdb.set_trace() # investigate which are the deviant files
            raise NotImplementedError("not all files have the same gain")
            # TODO: find recording with biggest intgain, call that value maxintgain. For each
            # recording, scale its AD values by its intgain/maxintgain when returning a slice
            # from its stream. Note that this ratio should always be a factor of 2, so all you
            # have to do is bitshift, I think. Then, have a single converter for the
            # MultiStream whose intgain value is set to maxintgain
        self.converter = streams[0].converter # they're identical
        self.fnames = [f.fname for f in fs]
        self.rawsampfreq = streams[0].rawsampfreq # assume they're identical
        self.rawtres = streams[0].rawtres # float us, assume they're identical
        contiguous = np.asarray([stream.contiguous for stream in streams])
        if not contiguous.all() and kind == 'highpass':
            # don't bother reporting again for lowpass
            fnames = [ s.fname for s, c in zip(streams, contiguous) if not c ]
            print("some files are non contiguous:")
            for fname in fnames:
                print(fname)
        probe = streams[0].probe
        if not np.all([type(probe) == type(stream.probe) for stream in streams]):
            raise RuntimeError("some files have different probe types")
        self.probe = probe # they're identical

        # set sampfreq, shcorrect and filtmeth for all streams
        streamtype = type(streams[0])
        if streamtype == SurfStream:
            if kind == 'highpass':
                self.sampfreq = sampfreq or self.rawsampfreq * DEFHPRESAMPLEX
                self.shcorrect = shcorrect or DEFHPSRFSHCORRECT
            else: # kind == 'lowpass'
                self.sampfreq = sampfreq or self.rawsampfreq # don't resample by default
                self.shcorrect = shcorrect or False # don't s+h correct by default
            self.filtmeth = None
        elif streamtype == NSXStream:
            if kind == 'highpass':
                self.sampfreq = sampfreq or self.rawsampfreq * DEFHPRESAMPLEX
                self.shcorrect = shcorrect or DEFHPNSXSHCORRECT
                self.filtmeth = filtmeth or DEFNSXFILTMETH
            else: # kind == 'lowpass'
                return None
コード例 #2
0
    def load(self):
        """Load TrackNeurons by concatenating spikes from neurons from all recordings"""
        tr = self.tr
        rids = sorted(tr.r) # all recording ids in tr
        recs = [ tr.r[rid] for rid in rids ]
        # copy some attribs from first sort, should be the same for all of them:
        sort = recs[0].sort
        datetime0 = sort.datetime # start of acquisition (t=0) of first recording
        self.datetime = datetime0
        self.pttype = sort.pttype
        self.chanpos = sort.chanpos
        self.samplerate = sort.samplerate
        self.tres = sort.tres
        # get the union of all nids in recs:
        nids = tr.get_allnids()
        spikes = {}
        for nid in nids:
            spikes[nid] = [] # init each value to empty list
        alln = {} # dict of first Neurons encountered across recordings
        for rec in recs:
            # store time delta between start of track and start of rec:
            rec.td = td2usec(rec.sort.datetime - datetime0) # (us)
            rec.tdsec = rec.td / 1e6
            rec.tdmin = rec.tdsec / 60
            rec.tdhour = rec.tdmin / 60
            # for each neuron in this recording append appropriately offset spikes
            # array to entry in spikes dict:
            for n in rec.alln.values():
                spikes[n.id].append(n.spikes + rec.td)
                # for each nid, store the first neuron encountered when iterating over
                # recordings;
                if n.id not in alln:
                    alln[n.id] = n

        nspikes = 0 # add them up
        for nid in nids:
            spikes[nid] = np.hstack(spikes[nid]) # concatenate each nid's spikes arrays:
            assert (np.sort(spikes[nid]) == spikes[nid]).all() # should come out sorted
            # replace Neuron with TrackNeuron:
            n = alln[nid]
            tn = TrackNeuron(self)
            # point TrackNeuron attribs to relevant Neuron attribs, probably not copies:
            tn.id = nid
            tn.descr = n.descr
            tn.pos = n.pos
            # ptcs version 3 added sigma:
            try: tn.sigma = n.sigma
            except AttributeError: pass
            tn.nchans = n.nchans
            tn.chans = n.chans
            tn.maxchan = n.maxchan
            tn.nt = n.nt
            tn.wavedata = n.wavedata
            tn.wavestd = n.wavestd
            # assign spikes and calc static attribs:
            tn.spikes = spikes[nid]
            tn.nspikes = len(tn.spikes)
            tn.trange = tn.spikes[0], tn.spikes[-1]
            tn.dt = tn.trange[1] - tn.trange[0]
            tn.dtsec = tn.dt / 1e6
            tn.dtmin = tn.dtsec / 60
            tn.dthour = tn.dtmin / 60
            
            alln[nid] = tn # replace
            nspikes += tn.nspikes

        self.nspikes = nspikes
        self.alln = alln # save it