示例#1
0
    def read_protocol(self):
        """
        Read the protocol waveform of the file, if present;
        function works with ABF2 only. Protocols can be reconstructed
        from the ABF1 header.

        Returns: list of segments (one for every episode)
                 with list of analog signls (one for every DAC).
        """
        header = self.read_header()

        if header['fFileVersionNumber'] < 2.:
            raise IOError("Protocol section is only present in ABF2 files.")

        nADC = header['sections']['ADCSection']['llNumEntries']  # n ADC chans
        nDAC = header['sections']['DACSection']['llNumEntries']  # n DAC chans
        nSam = header['protocol']['lNumSamplesPerEpisode']/nADC  # samples/ep
        nEpi = header['lActualEpisodes']
        sampling_rate = 1.e6/header['protocol']['fADCSequenceInterval'] * pq.Hz

        # Make a list of segments with analog signals with just holding levels
        # List of segments relates to number of episodes, as for recorded data
        segments = []
        for epiNum in range(nEpi):
            seg = Segment(index=epiNum)
            # One analog signal for each DAC in segment (episode)
            for DACNum in range(nDAC):
                t_start = 0 * pq.s  # TODO: Possibly check with episode array
                name = header['listDACInfo'][DACNum]['DACChNames']
                unit = header['listDACInfo'][DACNum]['DACChUnits'].\
                    replace(b'\xb5', b'u')  # \xb5 is µ
                signal = np.ones(nSam) *\
                    header['listDACInfo'][DACNum]['fDACHoldingLevel'] *\
                    pq.Quantity(1, unit)
                anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                      t_start=t_start, name=str(name),
                                      channel_index=DACNum)
                # If there are epoch infos for this DAC
                if DACNum in header['dictEpochInfoPerDAC']:
                    # Save last sample index
                    i_last = int(nSam * 15625 / 10**6)
                    # TODO guess for first holding
                    # Go over EpochInfoPerDAC and change the analog signal
                    # according to the epochs
                    epochInfo = header['dictEpochInfoPerDAC'][DACNum]
                    for epochNum, epoch in iteritems(epochInfo):
                        i_begin = i_last
                        i_end = i_last + epoch['lEpochInitDuration'] +\
                            epoch['lEpochDurationInc'] * epiNum
                        dif = i_end-i_begin
                        anaSig[i_begin:i_end] = np.ones(len(range(dif))) *\
                            pq.Quantity(1, unit) * (epoch['fEpochInitLevel'] +
                                                    epoch['fEpochLevelInc'] *
                                                    epiNum)
                        i_last += epoch['lEpochInitDuration']
                seg.analogsignals.append(anaSig)
            segments.append(seg)

        return segments
示例#2
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = EventArray(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                t_start = t_starts[chan]*pq.s,
                channel_index = slowChannelHeaders[chan]['Channel'],
                channel_name = slowChannelHeaders[chan]['Name'],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
示例#3
0
def generate_one_simple_segment(  seg_name = 'segment 0',
                                                    supported_objects = [ ],
                                                    nb_analogsignal = 4,
                                                    t_start = 0.*s,
                                                    sampling_rate = 10*kHz,
                                                    duration = 6.*s,

                                                    nb_spiketrain = 6,
                                                    spikerate_range = [.5*Hz, 12*Hz],

                                                    event_array_types = {
                                                                                        'stim' : ['a', 'b', 'c' , 'd'],
                                                                                        'enter_zone' : [ 'one', 'two'],
                                                                                        'color' : ['black', 'yellow', 'green' ],
                                                                                        },
                                                    event_array_size_range = [5, 20],

                                                    epoch_array_types = {
                                                                                        'animal state' : ['Sleep', 'Freeze', 'Escape', ],
                                                                                        'light' : ['dark', 'lighted',  ],
                                                                                        },
                                                    epoch_array_duration_range = [.5, 3., ],

                                                ):
    seg = Segment(name = seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)),
                                  sampling_rate=sampling_rate, t_start=t_start,
                                  units=mV, channel_index=a,
                                  name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand()*np.diff(spikerate_range)+spikerate_range[0].magnitude
            #sptr = SpikeTrain( rand(int((spikerate*duration).simplified))*duration , t_start = t_start, t_stop = t_start+duration) #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate*duration).simplified))
            spikes.sort() # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain( spikes*duration, t_start = t_start, t_stop = t_start+duration)
            sptr.annotations['channel_index'] = s
            seg.spiketrains.append(sptr)

    if EventArray in supported_objects:
        for name, labels in iteritems(event_array_types):
            ea_size = rand()*np.diff(event_array_size_range)+event_array_size_range[0]
            ea = EventArray(     #name = name,
                                            times = rand(ea_size)*duration,
                                            labels = np.array( labels, dtype='S')[(rand(ea_size)*len(labels)).astype('i')],
                                            )
            seg.eventarrays.append(ea)

    if EpochArray in supported_objects:
        for name, labels in iteritems(epoch_array_types):
            t = 0
            times, durations = [ ], [ ]
            while t<duration:
                times.append(t)
                dur = (rand()*np.diff(epoch_array_duration_range)+epoch_array_duration_range[0])
                durations.append(dur)
                t = t+dur
            epa = EpochArray(
                #name = name,
                times = pq.Quantity(times, units = pq.s),
                durations = pq.Quantity([x[0] for x in durations], units = pq.s),
                labels =  np.array( labels, dtype='S')[(rand(len(times))*len(labels)).astype('i')],
                )
            seg.epocharrays.append(epa)

    # TODO : Spike, Event, Epoch

    return seg
示例#4
0
def generate_one_simple_segment(
    seg_name='segment 0',
    supported_objects=[],
    nb_analogsignal=4,
    t_start=0. * pq.s,
    sampling_rate=10 * pq.kHz,
    duration=6. * pq.s,
    nb_spiketrain=6,
    spikerate_range=[.5 * pq.Hz, 12 * pq.Hz],
    event_types={
        'stim': ['a', 'b', 'c', 'd'],
        'enter_zone': ['one', 'two'],
        'color': ['black', 'yellow', 'green'],
    },
    event_size_range=[5, 20],
    epoch_types={
        'animal state': ['Sleep', 'Freeze', 'Escape'],
        'light': ['dark', 'lighted']
    },
    epoch_duration_range=[.5, 3.],
):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)),
                                  sampling_rate=sampling_rate,
                                  t_start=t_start,
                                  units=pq.mV,
                                  channel_index=a,
                                  name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand() * np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            #spikedata = rand(int((spikerate*duration).simplified))*duration
            #sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate * duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes * duration,
                              t_start=t_start,
                              t_stop=t_start + duration)
            sptr.annotations['channel_index'] = s
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in iteritems(event_types):
            evt_size = rand() * np.diff(event_size_range)
            evt_size += event_size_range[0]
            evt_size = int(evt_size)
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size) * len(labels)).astype('i')]
            evt = Event(times=rand(evt_size) * duration, labels=labels)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in iteritems(epoch_types):
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand() * np.diff(epoch_duration_range)
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t + dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times)) * len(labels)).astype('i')]
            epc = Epoch(
                times=pq.Quantity(times, units=pq.s),
                durations=pq.Quantity([x[0] for x in durations], units=pq.s),
                labels=labels,
            )
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg
示例#5
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        create_many_to_one_relationship(bl)
        return bl
示例#6
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        bl.create_many_to_one_relationship()
        return bl
def generate_one_simple_segment(seg_name='segment 0',
                                supported_objects=[],
                                nb_analogsignal=4,
                                t_start=0.*pq.s,
                                sampling_rate=10*pq.kHz,
                                duration=6.*pq.s,

                                nb_spiketrain=6,
                                spikerate_range=[.5*pq.Hz, 12*pq.Hz],

                                event_types={'stim': ['a', 'b',
                                                      'c', 'd'],
                                             'enter_zone': ['one',
                                                            'two'],
                                             'color': ['black',
                                                       'yellow',
                                                       'green'],
                                             },
                                event_size_range=[5, 20],

                                epoch_types={'animal state': ['Sleep',
                                                              'Freeze',
                                                              'Escape'],
                                             'light': ['dark',
                                                       'lighted']
                                             },
                                epoch_duration_range=[.5, 3.],

                                ):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)),
                                       sampling_rate=sampling_rate, t_start=t_start,
                                       units=pq.mV, channel_index=a,
                                       name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand()*np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            #spikedata = rand(int((spikerate*duration).simplified))*duration
            #sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate*duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes*duration,
                              t_start=t_start, t_stop=t_start+duration)
            sptr.annotations['channel_index'] = s
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in iteritems(event_types):
            evt_size = rand()*np.diff(event_size_range)
            evt_size += event_size_range[0]
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size)*len(labels)).astype('i')]
            evt = Event(times=rand(evt_size)*duration, labels=labels)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in iteritems(epoch_types):
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand()*np.diff(epoch_duration_range)
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t+dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times))*len(labels)).astype('i')]
            epc = Epoch(times=pq.Quantity(times, units=pq.s),
                        durations=pq.Quantity([x[0] for x in durations],
                                              units=pq.s),
                        labels=labels,
                        )
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg
示例#8
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """

        """

        fid = open(self.filename, "rb")
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader["Year"],
            globalHeader["Month"],
            globalHeader["Day"],
            globalHeader["Hour"],
            globalHeader["Minute"],
            globalHeader["Second"],
        )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=globalHeader["Version"])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader["NumDSPChannels"]):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
            channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
            dspChannelHeaders[channelHeader["Channel"]] = channelHeader
            maxunit = max(channelHeader["NUnits"], maxunit)
            maxchan = max(channelHeader["Channel"], maxchan)

        # event channel header
        eventHeaders = {}
        for _ in range(globalHeader["NumEventChannels"]):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader["Channel"]] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader["NumSlowChannels"]):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype="f")

        # spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")

        # eventarrays
        nb_events = {}
        # maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            # maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader["Channel"]
            unit = dataBlockHeader["Unit"]
            n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
            time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]

            if dataBlockHeader["Type"] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader["Type"] == 4:
                # event
                nb_events[chan] += 1
            elif dataBlockHeader["Type"] == 5:
                # continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit], dtype="f")
                if load_spike_waveform:
                    n1, n2 = wf_sizes[chan, unit, :]
                    swfarrays[chan, unit] = np.zeros((nb_spikes[chan, unit], n1, n2), dtype="f4")
            pos_spikes = np.zeros(nb_spikes.shape, dtype="i")

            # allocating mem for event
            eventpositions = {}
            evarrays = {}
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = np.zeros(nb, dtype="f")
                eventpositions[chan] = 0

            fid.seek(start)
            while fid.tell() != -1:
                dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
                if dataBlockHeader is None:
                    break
                chan = dataBlockHeader["Channel"]
                n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
                time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
                time /= globalHeader["ADFrequency"]

                if n2 < 0:
                    break
                if dataBlockHeader["Type"] == 1:
                    # spike
                    unit = dataBlockHeader["Unit"]
                    pos = pos_spikes[chan, unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1 * n2 != 0:
                        swfarrays[chan, unit][pos, :, :] = (
                            np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4")
                        )
                    else:
                        fid.seek(n1 * n2 * 2, 1)
                    pos_spikes[chan, unit] += 1

                elif dataBlockHeader["Type"] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan][pos] = time
                    eventpositions[chan] += 1

                elif dataBlockHeader["Type"] == 5:
                    # signal
                    data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4")
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data
                    sample_positions[chan] += data.size

        ## Step 3: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
            else:
                times = evarrays[chan]
            ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan)
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = []
            else:
                if globalHeader["Version"] == 100 or globalHeader["Version"] == 101:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0)
                elif globalHeader["Version"] == 102:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"])
                elif globalHeader["Version"] >= 103:
                    gain = globalHeader["SlowMaxMagnitudeMV"] / (
                        0.5
                        * (2 ** globalHeader["BitsPerSpikeSample"])
                        * slowChannelHeaders[chan]["Gain"]
                        * slowChannelHeaders[chan]["PreampGain"]
                    )
                signal = sigarrays[chan] * gain
            anasig = AnalogSignal(
                signal * pq.V,
                sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz,
                t_start=t_starts[chan] * pq.s,
                channel_index=slowChannelHeaders[chan]["Channel"],
                channel_name=slowChannelHeaders[chan]["Name"],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)

        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0:
                continue
            if lazy:
                times = []
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan, unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader["Version"] < 103:
                        gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0)
                    elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0
                        )
                    elif globalHeader["Version"] > 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"]
                        )
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms)
            sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"])
            sptr.annotate(channel_index=chan)
            if lazy:
                sptr.lazy_shape = nb_spikes[chan, unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg