def create_event_array_trig_or_analog(selection, name, labelmode=None): if lazy: times = [] labels = np.array([], dtype='S') else: times = data_trigger['samplepos'][selection].astype(float) / sr if labelmode == 'digital_port': labels = data_trigger['digital_port'][selection].astype( 'S2') elif labelmode is None: label = None ev = EventArray(times=times * pq.s, labels=labels, name=name) if lazy: ev.lazy_shape = np.sum(is_digital) seg.eventarrays.append(ev)
def create_event_array_trig_or_analog(selection, name, labelmode = None): if lazy: times = [ ] labels = np.array([ ], dtype = 'S') else: times = data_trigger['samplepos'][selection].astype(float)/sr if labelmode == 'digital_port': labels = data_trigger['digital_port'][selection].astype('S2') elif labelmode is None: label = None ev = EventArray(times= times*pq.s, labels= labels, name=name) if lazy: ev.lazy_shape = np.sum(is_digital) seg.eventarrays.append(ev)
def read_segment(self, lazy=False, cascade=True): ## Read header file f = open(self.filename + '.ent', 'rU') #version version = f.readline() if version[:2] != 'V2' and version[:2] != 'V3': # raise('read only V2 .eeg.ent files') raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' % version[:2]) return #info info1 = f.readline()[:-1] info2 = f.readline()[:-1] # strange 2 line for datetime #line1 l = f.readline() r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l) r2 = re.findall('(\d+):(\d+):(\d+)', l) r3 = re.findall('(\d+)-(\d+)-(\d+)', l) YY, MM, DD, hh, mm, ss = (None, ) * 6 if len(r1) != 0: DD, MM, YY, hh, mm, ss = r1[0] elif len(r2) != 0: hh, mm, ss = r2[0] elif len(r3) != 0: DD, MM, YY = r3[0] #line2 l = f.readline() r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l) r2 = re.findall('(\d+):(\d+):(\d+)', l) r3 = re.findall('(\d+)-(\d+)-(\d+)', l) if len(r1) != 0: DD, MM, YY, hh, mm, ss = r1[0] elif len(r2) != 0: hh, mm, ss = r2[0] elif len(r3) != 0: DD, MM, YY = r3[0] try: fulldatetime = datetime.datetime(int(YY), int(MM), int(DD), int(hh), int(mm), int(ss)) except: fulldatetime = None seg = Segment( file_origin=os.path.basename(self.filename), elan_version=version, info1=info1, info2=info2, rec_datetime=fulldatetime, ) if not cascade: return seg l = f.readline() l = f.readline() l = f.readline() # sampling rate sample l = f.readline() sampling_rate = 1. / float(l) * pq.Hz # nb channel l = f.readline() nbchannel = int(l) - 2 #channel label labels = [] for c in range(nbchannel + 2): labels.append(f.readline()[:-1]) # channel type types = [] for c in range(nbchannel + 2): types.append(f.readline()[:-1]) # channel unit units = [] for c in range(nbchannel + 2): units.append(f.readline()[:-1]) #print units #range min_physic = [] for c in range(nbchannel + 2): min_physic.append(float(f.readline())) max_physic = [] for c in range(nbchannel + 2): max_physic.append(float(f.readline())) min_logic = [] for c in range(nbchannel + 2): min_logic.append(float(f.readline())) max_logic = [] for c in range(nbchannel + 2): max_logic.append(float(f.readline())) #info filter info_filter = [] for c in range(nbchannel + 2): info_filter.append(f.readline()[:-1]) f.close() #raw data n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8) data = np.fromfile(self.filename, dtype='i' + str(n)) data = data.byteswap().reshape( (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4') for c in range(nbchannel): if lazy: sig = [] else: sig = (data[:,c]-min_logic[c])/(max_logic[c]-min_logic[c])*\ (max_physic[c]-min_physic[c])+min_physic[c] try: unit = pq.Quantity(1, units[c]) except: unit = pq.Quantity(1, '') anaSig = AnalogSignal(sig * unit, sampling_rate=sampling_rate, t_start=0. * pq.s, name=labels[c], channel_index=c) if lazy: anaSig.lazy_shape = data.shape[0] anaSig.annotate(channel_name=labels[c]) seg.analogsignals.append(anaSig) # triggers f = open(self.filename + '.pos') times = [] labels = [] reject_codes = [] for l in f.readlines(): r = re.findall(' *(\d+) *(\d+) *(\d+) *', l) times.append(float(r[0][0]) / sampling_rate.magnitude) labels.append(str(r[0][1])) reject_codes.append(str(r[0][2])) if lazy: times = [] * pq.S labels = np.array([], dtype='S') reject_codes = [] else: times = np.array(times) * pq.s labels = np.array(labels) reject_codes = np.array(reject_codes) ea = EventArray( times=times, labels=labels, reject_codes=reject_codes, ) if lazy: ea.lazy_shape = len(times) seg.eventarrays.append(ea) f.close() seg.create_many_to_one_relationship() return seg
def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms=False): # basic hedaer dt = [ ('header_id', 'S8'), ('ver_major', 'uint8'), ('ver_minor', 'uint8'), ('additionnal_flag', 'uint16'), # Read flags, currently basically unused ('header_size', 'uint32'), #i.e. index of first data ('packet_size', 'uint32'), # Read number of packet bytes, i.e. byte per sample ('sampling_rate', 'uint32' ), # Read time resolution in Hz of time stamps, i.e. data packets ('waveform_sampling_rate', 'uint32'), # Read sampling frequency of waveforms in Hz ('window_datetime', 'S16'), ('application', 'S32'), # ('comments', 'S256'), # comments ('num_ext_header', 'uint32') #Read number of extended headers ] nev_header = h = np.fromfile(filename_nev, count=1, dtype=dt)[0] version = '{}.{}'.format(h['ver_major'], h['ver_minor']) assert h['header_id'].decode( 'ascii' ) == 'NEURALEV' or version == '2.1', 'Unsupported version {}'.format( version) version = '{}.{}'.format(h['ver_major'], h['ver_minor']) seg.annotate(blackrock_version=version) seg.rec_datetime = get_window_datetime(nev_header['window_datetime']) sr = float(h['sampling_rate']) wsr = float(h['waveform_sampling_rate']) if not cascade: return # extented header # this consist in N block with code 8bytes + 24 data bytes # the data bytes depend on the code and need to be converted cafilename_nsx, segse by case raw_ext_header = np.memmap(filename_nev, offset=np.dtype(dt).itemsize, dtype=[('code', 'S8'), ('data', 'S24')], shape=h['num_ext_header']) # this is for debuging ext_header = {} for code, dt_ext in ext_nev_header_codes.items(): sel = raw_ext_header['code'] == code ext_header[code] = raw_ext_header[sel].view(dt_ext) # channel label neuelbl_header = ext_header['NEUEVLBL'] channel_labels = dict( zip(neuelbl_header['channel_id'], neuelbl_header['channel_label'])) # TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case # TODO ECOMMENT + CCOMMENT for annotations # TODO NEUEVFLT for annotations # read data packet and markers dt0 = [ ('samplepos', 'uint32'), ('id', 'uint16'), ('value', 'S{}'.format(h['packet_size'] - 6)), ] data = np.memmap(filename_nev, offset=h['header_size'], dtype=dt0) all_ids = np.unique(data['id']) t_start = 0 * pq.s t_stop = data['samplepos'][-1] / sr * pq.s # read event (digital 9+ analog+comment) def create_event_array_trig_or_analog(selection, name, labelmode=None): if lazy: times = [] labels = np.array([], dtype='S') else: times = data_trigger['samplepos'][selection].astype(float) / sr if labelmode == 'digital_port': labels = data_trigger['digital_port'][selection].astype( 'S2') elif labelmode is None: label = None ev = EventArray(times=times * pq.s, labels=labels, name=name) if lazy: ev.lazy_shape = np.sum(is_digital) seg.eventarrays.append(ev) mask = (data['id'] == 0) dt_trig = [ ('samplepos', 'uint32'), ('id', 'uint16'), ('reason', 'uint8'), ('reserved0', 'uint8'), ('digital_port', 'uint16'), ('reserved1', 'S{}'.format(h['packet_size'] - 10)), ] data_trigger = data.view(dt_trig)[mask] # Digital Triggers (PaquetID 0) is_digital = (data_trigger['reason'] & 1) > 0 create_event_array_trig_or_analog(is_digital, 'Digital trigger', labelmode='digital_port') # Analog Triggers (PaquetID 0) if version in ['2.1', '2.2']: for i in range(5): is_analog = (data_trigger['reason'] & (2**(i + 1))) > 0 create_event_array_trig_or_analog( is_analog, 'Analog trigger {}'.format(i), labelmode=None) # Comments mask = (data['id'] == 0xFFF) dt_comments = [ ('samplepos', 'uint32'), ('id', 'uint16'), ('charset', 'uint8'), ('reserved0', 'uint8'), ('color', 'uint32'), ('comment', 'S{}'.format(h['packet_size'] - 12)), ] data_comments = data.view(dt_comments)[mask] if data_comments.size > 0: if lazy: times = [] labels = [] else: times = data_comments['samplepos'].astype(float) / sr labels = data_comments['comment'].astype('S') ev = EventArray(times=times * pq.s, labels=labels, name='Comments') if lazy: ev.lazy_shape = np.sum(is_digital) seg.eventarrays.append(ev) # READ Spike channel channel_ids = all_ids[(all_ids > 0) & (all_ids <= 2048)] # get the dtype of waveform (this is stupidly complicated) if nev_header['additionnal_flag'] & 0x1: #dtype_waveforms = { k:'int16' for k in channel_ids } dtype_waveforms = dict((k, 'int16') for k in channel_ids) else: # there is a code electrodes by electrodes given the approiate dtype neuewav_header = ext_header['NEUEVWAV'] dtype_waveform = dict( zip(neuewav_header['channel_id'], neuewav_header['num_bytes_per_waveform'])) dtypes_conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'} #dtype_waveforms = { k:dtypes_conv[v] for k,v in dtype_waveform.items() } dtype_waveforms = dict( (k, dtypes_conv[v]) for k, v in dtype_waveform.items()) dt2 = [ ('samplepos', 'uint32'), ('id', 'uint16'), ('cluster', 'uint8'), ('reserved0', 'uint8'), ('waveform', 'uint8', (h['packet_size'] - 8, )), ] data_spike = data.view(dt2) for channel_id in channel_ids: data_spike_chan = data_spike[data['id'] == channel_id] cluster_ids = np.unique(data_spike_chan['cluster']) for cluster_id in cluster_ids: if cluster_id == 0: name = 'unclassified' elif cluster_id == 255: name = 'noise' else: name = 'Cluster {}'.format(cluster_id) name = 'Channel {} '.format(channel_id) + name data_spike_chan_clus = data_spike_chan[ data_spike_chan['cluster'] == cluster_id] n_spike = data_spike_chan_clus.size waveforms, w_sampling_rate, left_sweep = None, None, None if lazy: times = [] else: times = data_spike_chan_clus['samplepos'].astype( float) / sr if load_waveforms: dtype_waveform = dtype_waveforms[channel_id] waveform_size = (h['packet_size'] - 8) / np.dtype(dtype_waveform).itemsize waveforms = data_spike_chan_clus['waveform'].flatten( ).view(dtype_waveform) waveforms = waveforms.reshape(n_spike, 1, waveform_size) waveforms = waveforms * pq.uV w_sampling_rate = wsr * pq.Hz left_sweep = waveform_size // 2 / sr * pq.s st = SpikeTrain(times=times * pq.s, name=name, t_start=t_start, t_stop=t_stop, waveforms=waveforms, sampling_rate=w_sampling_rate, left_sweep=left_sweep) st.annotate(channel_index=int(channel_id)) if lazy: st.lazy_shape = n_spike seg.spiketrains.append(st)
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True): """ """ fid = open(self.filename, "rb") globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0) # metadatas seg = Segment() seg.rec_datetime = datetime.datetime( globalHeader["Year"], globalHeader["Month"], globalHeader["Day"], globalHeader["Hour"], globalHeader["Minute"], globalHeader["Second"], ) seg.file_origin = os.path.basename(self.filename) seg.annotate(plexon_version=globalHeader["Version"]) if not cascade: return seg ## Step 1 : read headers # dsp channels header = sipkes and waveforms dspChannelHeaders = {} maxunit = 0 maxchan = 0 for _ in range(globalHeader["NumDSPChannels"]): # channel is 1 based channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None) channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64)) channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4)) dspChannelHeaders[channelHeader["Channel"]] = channelHeader maxunit = max(channelHeader["NUnits"], maxunit) maxchan = max(channelHeader["Channel"], maxchan) # event channel header eventHeaders = {} for _ in range(globalHeader["NumEventChannels"]): eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None) eventHeaders[eventHeader["Channel"]] = eventHeader # slow channel header = signal slowChannelHeaders = {} for _ in range(globalHeader["NumSlowChannels"]): slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None) slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader ## Step 2 : a first loop for counting size # signal nb_samples = np.zeros(len(slowChannelHeaders)) sample_positions = np.zeros(len(slowChannelHeaders)) t_starts = np.zeros(len(slowChannelHeaders), dtype="f") # spiketimes and waveform nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i") wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i") # eventarrays nb_events = {} # maxstrsizeperchannel = { } for chan, h in iteritems(eventHeaders): nb_events[chan] = 0 # maxstrsizeperchannel[chan] = 0 start = fid.tell() while fid.tell() != -1: # read block header dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None) if dataBlockHeader is None: break chan = dataBlockHeader["Channel"] unit = dataBlockHeader["Unit"] n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"] time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"] if dataBlockHeader["Type"] == 1: nb_spikes[chan, unit] += 1 wf_sizes[chan, unit, :] = [n1, n2] fid.seek(n1 * n2 * 2, 1) elif dataBlockHeader["Type"] == 4: # event nb_events[chan] += 1 elif dataBlockHeader["Type"] == 5: # continuous signal fid.seek(n2 * 2, 1) if n2 > 0: nb_samples[chan] += n2 if nb_samples[chan] == 0: t_starts[chan] = time ## Step 3: allocating memory and 2 loop for reading if not lazy if not lazy: # allocating mem for signal sigarrays = {} for chan, h in iteritems(slowChannelHeaders): sigarrays[chan] = np.zeros(nb_samples[chan]) # allocating mem for SpikeTrain stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object) swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object) for (chan, unit), _ in np.ndenumerate(nb_spikes): stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit], dtype="f") if load_spike_waveform: n1, n2 = wf_sizes[chan, unit, :] swfarrays[chan, unit] = np.zeros((nb_spikes[chan, unit], n1, n2), dtype="f4") pos_spikes = np.zeros(nb_spikes.shape, dtype="i") # allocating mem for event eventpositions = {} evarrays = {} for chan, nb in iteritems(nb_events): evarrays[chan] = np.zeros(nb, dtype="f") eventpositions[chan] = 0 fid.seek(start) while fid.tell() != -1: dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None) if dataBlockHeader is None: break chan = dataBlockHeader["Channel"] n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"] time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"] time /= globalHeader["ADFrequency"] if n2 < 0: break if dataBlockHeader["Type"] == 1: # spike unit = dataBlockHeader["Unit"] pos = pos_spikes[chan, unit] stimearrays[chan, unit][pos] = time if load_spike_waveform and n1 * n2 != 0: swfarrays[chan, unit][pos, :, :] = ( np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4") ) else: fid.seek(n1 * n2 * 2, 1) pos_spikes[chan, unit] += 1 elif dataBlockHeader["Type"] == 4: # event pos = eventpositions[chan] evarrays[chan][pos] = time eventpositions[chan] += 1 elif dataBlockHeader["Type"] == 5: # signal data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4") sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data sample_positions[chan] += data.size ## Step 3: create neo object for chan, h in iteritems(eventHeaders): if lazy: times = [] else: times = evarrays[chan] ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan) if lazy: ea.lazy_shape = nb_events[chan] seg.eventarrays.append(ea) for chan, h in iteritems(slowChannelHeaders): if lazy: signal = [] else: if globalHeader["Version"] == 100 or globalHeader["Version"] == 101: gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0) elif globalHeader["Version"] == 102: gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"]) elif globalHeader["Version"] >= 103: gain = globalHeader["SlowMaxMagnitudeMV"] / ( 0.5 * (2 ** globalHeader["BitsPerSpikeSample"]) * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"] ) signal = sigarrays[chan] * gain anasig = AnalogSignal( signal * pq.V, sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz, t_start=t_starts[chan] * pq.s, channel_index=slowChannelHeaders[chan]["Channel"], channel_name=slowChannelHeaders[chan]["Name"], ) if lazy: anasig.lazy_shape = nb_samples[chan] seg.analogsignals.append(anasig) for (chan, unit), value in np.ndenumerate(nb_spikes): if nb_spikes[chan, unit] == 0: continue if lazy: times = [] waveforms = None t_stop = 0 else: times = stimearrays[chan, unit] t_stop = times.max() if load_spike_waveform: if globalHeader["Version"] < 103: gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0) elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105: gain = globalHeader["SpikeMaxMagnitudeMV"] / ( 0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0 ) elif globalHeader["Version"] > 105: gain = globalHeader["SpikeMaxMagnitudeMV"] / ( 0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"] ) waveforms = swfarrays[chan, unit] * gain * pq.V else: waveforms = None sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms) sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"]) sptr.annotate(channel_index=chan) if lazy: sptr.lazy_shape = nb_spikes[chan, unit] seg.spiketrains.append(sptr) seg.create_many_to_one_relationship() return seg
def readOneChannelEventOrSpike(self, fid, channel_num, header, lazy=True): # return SPikeTrain or EventArray channelHeader = header.channelHeaders[channel_num] if channelHeader.firstblock < 0: return if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]: return ## Step 1 : type of blocks if channelHeader.kind in [2, 3, 4]: # Event data fmt = [('tick', 'i4')] elif channelHeader.kind in [5]: # Marker data fmt = [('tick', 'i4'), ('marker', 'i4')] elif channelHeader.kind in [6]: # AdcMark data fmt = [('tick', 'i4'), ('marker', 'i4'), ('adc', 'S%d' % channelHeader.n_extra)] elif channelHeader.kind in [7]: # RealMark data fmt = [('tick', 'i4'), ('marker', 'i4'), ('real', 'S%d' % channelHeader.n_extra)] elif channelHeader.kind in [8]: # TextMark data fmt = [('tick', 'i4'), ('marker', 'i4'), ('label', 'S%d' % channelHeader.n_extra)] dt = np.dtype(fmt) ## Step 2 : first read for allocating mem fid.seek(channelHeader.firstblock) totalitems = 0 for _ in range(channelHeader.blocks): blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption)) totalitems += blockHeader.items if blockHeader.succ_block > 0: fid.seek(blockHeader.succ_block) #~ print 'totalitems' , totalitems if lazy: if channelHeader.kind in [2, 3, 4, 5, 8]: ea = EventArray() ea.annotate(channel_index=channel_num) ea.lazy_shape = totalitems return [ea] elif channelHeader.kind in [6, 7]: sptr = SpikeTrain( [] * pq.s, t_stop=1e99) # correct value for t_stop to be put in later sptr.annotate(channel_index=channel_num, ced_unit=0) sptr.lazy_shape = totalitems return [sptr] else: alltrigs = np.zeros(totalitems, dtype=dt) ## Step 3 : read fid.seek(channelHeader.firstblock) pos = 0 for _ in range(channelHeader.blocks): blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption)) # read all events in block trigs = np.fromstring(fid.read(blockHeader.items * dt.itemsize), dtype=dt) alltrigs[pos:pos + trigs.size] = trigs pos += trigs.size if blockHeader.succ_block > 0: fid.seek(blockHeader.succ_block) ## Step 3 convert in neo standard class : eventarrays or spiketrains alltimes = alltrigs['tick'].astype( 'f') * header.us_per_time * header.dtime_base * pq.s if channelHeader.kind in [2, 3, 4, 5, 8]: #events ea = EventArray() ea.annotate(channel_index=channel_num) ea.times = alltimes if channelHeader.kind >= 5: # Spike2 marker is closer to label sens of neo ea.labels = alltrigs['marker'].astype('S32') if channelHeader.kind == 8: ea.annotate(extra_labels=alltrigs['label']) return [ea] elif channelHeader.kind in [6, 7]: # spiketrains # waveforms if channelHeader.kind == 6: waveforms = np.fromstring(alltrigs['adc'].tostring(), dtype='i2') waveforms = waveforms.astype( 'f4' ) * channelHeader.scale / 6553.6 + channelHeader.offset elif channelHeader.kind == 7: waveforms = np.fromstring(alltrigs['real'].tostring(), dtype='f4') if header.system_id >= 6 and channelHeader.interleave > 1: waveforms = waveforms.reshape( (alltimes.size, -1, channelHeader.interleave)) waveforms = waveforms.swapaxes(1, 2) else: waveforms = waveforms.reshape((alltimes.size, 1, -1)) if header.system_id in [1, 2, 3, 4, 5]: sample_interval = (channelHeader.divide * header.us_per_time * header.time_per_adc) * 1e-6 else: sample_interval = (channelHeader.l_chan_dvd * header.us_per_time * header.dtime_base) if channelHeader.unit in unit_convert: unit = pq.Quantity(1, unit_convert[channelHeader.unit]) else: #print channelHeader.unit try: unit = pq.Quantity(1, channelHeader.unit) except: unit = pq.Quantity(1, '') if len(alltimes) > 0: t_stop = alltimes.max( ) # can get better value from associated AnalogSignal(s) ? else: t_stop = 0.0 if not self.ced_units: sptr = SpikeTrain(alltimes, waveforms=waveforms * unit, sampling_rate=(1. / sample_interval) * pq.Hz, t_stop=t_stop) sptr.annotate(channel_index=channel_num, ced_unit=0) return [sptr] sptrs = [] for i in set(alltrigs['marker'] & 255): sptr = SpikeTrain( alltimes[alltrigs['marker'] == i], waveforms=waveforms[alltrigs['marker'] == i] * unit, sampling_rate=(1. / sample_interval) * pq.Hz, t_stop=t_stop) sptr.annotate(channel_index=channel_num, ced_unit=i) sptrs.append(sptr) return sptrs
def read_segment( self, cascade=True, lazy=False, ): """ Arguments: """ f = struct_file(self.filename, 'rb') #Name f.seek(64, 0) surname = f.read(22) while surname[-1] == ' ': if len(surname) == 0: break surname = surname[:-1] firstname = f.read(20) while firstname[-1] == ' ': if len(firstname) == 0: break firstname = firstname[:-1] #Date f.seek(128, 0) day, month, year, hour, minute, sec = f.read_f('bbbbbb') rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute, sec) f.seek(138, 0) Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f( 'IHHHH') #~ print Num_Chan, Bytes #header version f.seek(175, 0) header_version, = f.read_f('b') assert header_version == 4 seg = Segment( name=firstname + ' ' + surname, file_origin=os.path.basename(self.filename), ) seg.annotate(surname=surname) seg.annotate(firstname=firstname) seg.annotate(rec_datetime=rec_datetime) if not cascade: return seg # area f.seek(176, 0) zone_names = [ 'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER' ] zones = {} for zname in zone_names: zname2, pos, length = f.read_f('8sII') zones[zname] = zname2, pos, length #~ print zname2, pos, length # reading raw data if not lazy: f.seek(Data_Start_Offset, 0) rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes)) rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan)) # Reading Code Info zname2, pos, length = zones['ORDER'] f.seek(pos, 0) code = np.fromfile(f, dtype='u2', count=Num_Chan) units = { -1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent, 101: pq.dimensionless, 102: pq.dimensionless } for c in range(Num_Chan): zname2, pos, length = zones['LABCOD'] f.seek(pos + code[c] * 128 + 2, 0) label = f.read(6).strip("\x00") ground = f.read(6).strip("\x00") logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f( 'iiiii') k, = f.read_f('h') if k in units.keys(): unit = units[k] else: unit = pq.uV f.seek(8, 1) sampling_rate, = f.read_f('H') * pq.Hz sampling_rate *= Rate_Min if lazy: signal = [] * unit else: factor = float(physical_max - physical_min) / float(logical_max - logical_min + 1) signal = (rawdata[:, c].astype('f') - logical_ground) * factor * unit anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, name=label, channel_index=c) if lazy: anaSig.lazy_shape = None anaSig.annotate(ground=ground) seg.analogsignals.append(anaSig) sampling_rate = np.mean( [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz # Read trigger and notes for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]: zname2, pos, length = zones[zname] f.seek(pos, 0) triggers = np.fromstring( f.read(length), dtype=[('pos', 'u4'), ('label', label_dtype)], ) ea = EventArray(name=zname[0] + zname[1:].lower()) if not lazy: keep = (triggers['pos'] >= triggers['pos'][0]) & ( triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] != 0) triggers = triggers[keep] ea.labels = triggers['label'].astype('S') ea.times = (triggers['pos'] / sampling_rate).rescale('s') else: ea.lazy_shape = triggers.size seg.eventarrays.append(ea) # Read Event A and B # Not so well tested for zname in ['EVENT A', 'EVENT B']: zname2, pos, length = zones[zname] f.seek(pos, 0) epochs = np.fromstring(f.read(length), dtype=[ ('label', 'u4'), ('start', 'u4'), ('stop', 'u4'), ]) ep = EpochArray(name=zname[0] + zname[1:].lower()) if not lazy: keep = (epochs['start'] > 0) & ( epochs['start'] < rawdata.shape[0]) & (epochs['stop'] < rawdata.shape[0]) epochs = epochs[keep] ep.labels = epochs['label'].astype('S') ep.times = (epochs['start'] / sampling_rate).rescale('s') ep.durations = ((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s') else: ep.lazy_shape = triggers.size seg.epocharrays.append(ep) seg.create_many_to_one_relationship() return seg
def read_segment(self, lazy = False, cascade = True): ## Read header file f = open(self.filename+'.ent' , 'rU') #version version = f.readline() if version[:2] != 'V2' and version[:2] != 'V3': # raise('read only V2 .eeg.ent files') raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' % version[:2]) return #info info1 = f.readline()[:-1] info2 = f.readline()[:-1] # strange 2 line for datetime #line1 l = f.readline() r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)',l) r2 = re.findall('(\d+):(\d+):(\d+)',l) r3 = re.findall('(\d+)-(\d+)-(\d+)',l) YY, MM, DD, hh, mm, ss = (None, )*6 if len(r1) != 0 : DD , MM, YY, hh ,mm ,ss = r1[0] elif len(r2) != 0 : hh ,mm ,ss = r2[0] elif len(r3) != 0: DD , MM, YY= r3[0] #line2 l = f.readline() r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)',l) r2 = re.findall('(\d+):(\d+):(\d+)',l) r3 = re.findall('(\d+)-(\d+)-(\d+)',l) if len(r1) != 0 : DD , MM, YY, hh ,mm ,ss = r1[0] elif len(r2) != 0 : hh ,mm ,ss = r2[0] elif len(r3) != 0: DD , MM, YY= r3[0] try: fulldatetime = datetime.datetime(int(YY) , int(MM) , int(DD) , int(hh) , int(mm) , int(ss) ) except: fulldatetime = None seg = Segment( file_origin = os.path.basename(self.filename), elan_version = version, info1 = info1, info2 = info2, rec_datetime = fulldatetime, ) if not cascade : return seg l = f.readline() l = f.readline() l = f.readline() # sampling rate sample l = f.readline() sampling_rate = 1./float(l) * pq.Hz # nb channel l = f.readline() nbchannel = int(l)-2 #channel label labels = [ ] for c in range(nbchannel+2) : labels.append(f.readline()[:-1]) # channel type types = [ ] for c in range(nbchannel+2) : types.append(f.readline()[:-1]) # channel unit units = [ ] for c in range(nbchannel+2) : units.append(f.readline()[:-1]) #print units #range min_physic = [] for c in range(nbchannel+2) : min_physic.append( float(f.readline()) ) max_physic = [] for c in range(nbchannel+2) : max_physic.append( float(f.readline()) ) min_logic = [] for c in range(nbchannel+2) : min_logic.append( float(f.readline()) ) max_logic = [] for c in range(nbchannel+2) : max_logic.append( float(f.readline()) ) #info filter info_filter = [] for c in range(nbchannel+2) : info_filter.append(f.readline()[:-1]) f.close() #raw data n = int(round(np.log(max_logic[0]-min_logic[0])/np.log(2))/8) data = np.fromfile(self.filename,dtype = 'i'+str(n) ) data = data.byteswap().reshape( (data.size/(nbchannel+2) ,nbchannel+2) ).astype('f4') for c in range(nbchannel) : if lazy: sig = [ ] else: sig = (data[:,c]-min_logic[c])/(max_logic[c]-min_logic[c])*\ (max_physic[c]-min_physic[c])+min_physic[c] try: unit = pq.Quantity(1, units[c] ) except: unit = pq.Quantity(1, '' ) anaSig = AnalogSignal(sig * unit, sampling_rate=sampling_rate, t_start=0. * pq.s, name=labels[c], channel_index=c) if lazy: anaSig.lazy_shape = data.shape[0] anaSig.annotate(channel_name= labels[c]) seg.analogsignals.append( anaSig ) # triggers f = open(self.filename+'.pos') times =[ ] labels = [ ] reject_codes = [ ] for l in f.readlines() : r = re.findall(' *(\d+) *(\d+) *(\d+) *',l) times.append( float(r[0][0])/sampling_rate.magnitude ) labels.append(str(r[0][1]) ) reject_codes.append( str(r[0][2]) ) if lazy: times = [ ]*pq.S labels = np.array([ ], dtype = 'S') reject_codes = [ ] else: times = np.array(times) * pq.s labels = np.array(labels) reject_codes = np.array(reject_codes) ea = EventArray( times = times, labels = labels, reject_codes = reject_codes, ) if lazy: ea.lazy_shape = len(times) seg.eventarrays.append(ea) f.close() seg.create_many_to_one_relationship() return seg
def read_segment(self, lazy = False, cascade = True): ## Read header file (vhdr) header = readBrainSoup(self.filename) assert header['Common Infos']['DataFormat'] == 'BINARY', NotImplementedError assert header['Common Infos']['DataOrientation'] == 'MULTIPLEXED', NotImplementedError nb_channel = int(header['Common Infos']['NumberOfChannels']) sampling_rate = 1.e6/float(header['Common Infos']['SamplingInterval']) * pq.Hz fmt = header['Binary Infos']['BinaryFormat'] fmts = { 'INT_16':np.int16, 'IEEE_FLOAT_32':np.float32,} assert fmt in fmts, NotImplementedError dt = fmts[fmt] seg = Segment(file_origin = os.path.basename(self.filename), ) if not cascade : return seg # read binary if not lazy: binary_file = os.path.splitext(self.filename)[0]+'.eeg' sigs = np.memmap(binary_file , dt, 'r', ).astype('f') n = int(sigs.size/nb_channel) sigs = sigs[:n*nb_channel] sigs = sigs.reshape(n, nb_channel) for c in range(nb_channel): name, ref, res, units = header['Channel Infos']['Ch%d' % (c+1,)].split(',') units = pq.Quantity(1, units.replace('µ', 'u') ) if lazy: signal = [ ]*units else: signal = sigs[:,c]*units anasig = AnalogSignal(signal = signal, channel_index = c, name = name, sampling_rate = sampling_rate, ) if lazy: anasig.lazy_shape = -1 seg.analogsignals.append(anasig) # read marker marker_file = os.path.splitext(self.filename)[0]+'.vmrk' all_info = readBrainSoup(marker_file)['Marker Infos'] all_types = [ ] times = [ ] labels = [ ] for i in range(len(all_info)): type_, label, pos, size, channel = all_info['Mk%d' % (i+1,)].split(',')[:5] all_types.append(type_) times.append(float(pos)/sampling_rate.magnitude) labels.append(label) all_types = np.array(all_types) times = np.array(times) * pq.s labels = np.array(labels, dtype = 'S') for type_ in np.unique(all_types): ind = type_ == all_types if lazy: ea = EventArray(name = str(type_)) ea.lazy_shape = -1 else: ea = EventArray( times = times[ind], labels = labels[ind], name = str(type_), ) seg.eventarrays.append(ea) seg.create_many_to_one_relationship() return seg
def readOneChannelEventOrSpike(self , fid, channel_num, header ,lazy = True): # return SPikeTrain or EventArray channelHeader = header.channelHeaders[channel_num] if channelHeader.firstblock <0: return if channelHeader.kind not in [2, 3, 4 , 5 , 6 ,7, 8]: return ## Step 1 : type of blocks if channelHeader.kind in [2, 3, 4]: # Event data fmt = [('tick' , 'i4') ] elif channelHeader.kind in [5]: # Marker data fmt = [('tick' , 'i4') , ('marker' , 'i4') ] elif channelHeader.kind in [6]: # AdcMark data fmt = [('tick' , 'i4') , ('marker' , 'i4') , ('adc' , 'S%d' %channelHeader.n_extra )] elif channelHeader.kind in [7]: # RealMark data fmt = [('tick' , 'i4') , ('marker' , 'i4') , ('real' , 'S%d' %channelHeader.n_extra )] elif channelHeader.kind in [8]: # TextMark data fmt = [('tick' , 'i4') , ('marker' , 'i4') , ('label' , 'S%d'%channelHeader.n_extra)] dt = np.dtype(fmt) ## Step 2 : first read for allocating mem fid.seek(channelHeader.firstblock) totalitems = 0 for _ in range(channelHeader.blocks) : blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption)) totalitems += blockHeader.items if blockHeader.succ_block > 0 : fid.seek(blockHeader.succ_block) #~ print 'totalitems' , totalitems if lazy : if channelHeader.kind in [2, 3, 4 , 5 , 8]: ea = EventArray( ) ea.annotate(channel_index = channel_num) ea.lazy_shape = totalitems return ea elif channelHeader.kind in [6 ,7]: sptr = SpikeTrain([ ]*pq.s, t_stop=1e99) # correct value for t_stop to be put in later sptr.annotate(channel_index = channel_num) sptr.lazy_shape = totalitems return sptr else: alltrigs = np.zeros( totalitems , dtype = dt) ## Step 3 : read fid.seek(channelHeader.firstblock) pos = 0 for _ in range(channelHeader.blocks) : blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption)) # read all events in block trigs = np.fromstring( fid.read( blockHeader.items*dt.itemsize) , dtype = dt) alltrigs[pos:pos+trigs.size] = trigs pos += trigs.size if blockHeader.succ_block > 0 : fid.seek(blockHeader.succ_block) ## Step 3 convert in neo standard class : eventarrays or spiketrains alltimes = alltrigs['tick'].astype('f')*header.us_per_time * header.dtime_base*pq.s if channelHeader.kind in [2, 3, 4 , 5 , 8]: #events ea = EventArray( ) ea.annotate(channel_index = channel_num) ea.times = alltimes if channelHeader.kind >= 5: # Spike2 marker is closer to label sens of neo ea.labels = alltrigs['marker'].astype('S32') if channelHeader.kind == 8: ea.annotate(extra_labels = alltrigs['label']) return ea elif channelHeader.kind in [6 ,7]: # spiketrains # waveforms if channelHeader.kind == 6 : waveforms = np.fromstring(alltrigs['adc'].tostring() , dtype = 'i2') waveforms = waveforms.astype('f4') *channelHeader.scale/ 6553.6 + channelHeader.offset elif channelHeader.kind == 7 : waveforms = np.fromstring(alltrigs['real'].tostring() , dtype = 'f4') if header.system_id>=6 and channelHeader.interleave>1: waveforms = waveforms.reshape((alltimes.size,-1,channelHeader.interleave)) waveforms = waveforms.swapaxes(1,2) else: waveforms = waveforms.reshape(( alltimes.size,1, -1)) if header.system_id in [1,2,3,4,5]: sample_interval = (channelHeader.divide*header.us_per_time*header.time_per_adc)*1e-6 else : sample_interval = (channelHeader.l_chan_dvd*header.us_per_time*header.dtime_base) if channelHeader.unit in unit_convert: unit = pq.Quantity(1, unit_convert[channelHeader.unit] ) else: #print channelHeader.unit try: unit = pq.Quantity(1, channelHeader.unit ) except: unit = pq.Quantity(1, '') if len(alltimes) > 0: t_stop = alltimes.max() # can get better value from associated AnalogSignal(s) ? else: t_stop = 0.0 sptr = SpikeTrain(alltimes, waveforms = waveforms*unit, sampling_rate = (1./sample_interval)*pq.Hz, t_stop = t_stop ) sptr.annotate(channel_index = channel_num) return sptr
def read_block(self, lazy = False, cascade = True, ): bl = Block() tankname = os.path.basename(self.dirname) bl.file_origin = tankname if not cascade : return bl for blockname in os.listdir(self.dirname): if blockname == 'TempBlk': continue subdir = os.path.join(self.dirname,blockname) if not os.path.isdir(subdir): continue seg = Segment(name = blockname) bl.segments.append( seg) global_t_start = None # Step 1 : first loop for counting - tsq file tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb') hr = HeaderReader(tsq, TsqDescription) allsig = { } allspiketr = { } allevent = { } while 1: h= hr.read_f() if h==None:break channel, code , evtype = h['channel'], h['code'], h['evtype'] if Types[evtype] == 'EVTYPE_UNKNOWN': pass elif Types[evtype] == 'EVTYPE_MARK' : if global_t_start is None: global_t_start = h['timestamp'] elif Types[evtype] == 'EVTYPE_SCALER' : # TODO pass elif Types[evtype] == 'EVTYPE_STRON' or \ Types[evtype] == 'EVTYPE_STROFF': # EVENTS if code not in allevent: allevent[code] = { } if channel not in allevent[code]: ea = EventArray(name = code , channel_index = channel) # for counting: ea.lazy_shape = 0 ea.maxlabelsize = 0 allevent[code][channel] = ea allevent[code][channel].lazy_shape += 1 strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset'])) strobe = str(strobe) if len(strobe)>= allevent[code][channel].maxlabelsize: allevent[code][channel].maxlabelsize = len(strobe) #~ ev = Event() #~ ev.time = h['timestamp'] - global_t_start #~ ev.name = code #~ # it the strobe attribute masked with eventoffset #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset'])) #~ ev.label = str(strobe) #~ seg._events.append( ev ) elif Types[evtype] == 'EVTYPE_SNIP' : if code not in allspiketr: allspiketr[code] = { } if channel not in allspiketr[code]: allspiketr[code][channel] = { } if h['sortcode'] not in allspiketr[code][channel]: sptr = SpikeTrain([ ], units = 's', name = str(h['sortcode']), #t_start = global_t_start, t_start = 0.*pq.s, t_stop = 0.*pq.s, # temporary left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s, sampling_rate = h['frequency'] * pq.Hz, ) #~ sptr.channel = channel #sptr.annotations['channel_index'] = channel sptr.annotate(channel_index = channel) # for counting: sptr.lazy_shape = 0 sptr.pos = 0 sptr.waveformsize = h['size']-10 #~ sptr.name = str(h['sortcode']) #~ sptr.t_start = global_t_start #~ sptr.sampling_rate = h['frequency'] #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency'] #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency'] #~ sptr.waveformsize = h['size']-10 allspiketr[code][channel][h['sortcode']] = sptr allspiketr[code][channel][h['sortcode']].lazy_shape += 1 elif Types[evtype] == 'EVTYPE_STREAM': if code not in allsig: allsig[code] = { } if channel not in allsig[code]: #~ print 'code', code, 'channel', channel anaSig = AnalogSignal([] * pq.V, name=code, sampling_rate= h['frequency'] * pq.Hz, t_start=(h['timestamp'] - global_t_start) * pq.s, channel_index=channel) anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']]) anaSig.pos = 0 # for counting: anaSig.lazy_shape = 0 #~ anaSig.pos = 0 allsig[code][channel] = anaSig allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize if not lazy: # Step 2 : allocate memory for code, v in iteritems(allsig): for channel, anaSig in iteritems(v): v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V ) v[channel].pos = 0 for code, v in iteritems(allevent): for channel, ea in iteritems(v): ea.times = np.empty( (ea.lazy_shape) ) * pq.s ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) ) ea.pos = 0 for code, v in iteritems(allspiketr): for channel, allsorted in iteritems(v): for sortcode, sptr in iteritems(allsorted): new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s , name = sptr.name, t_start = sptr.t_start, t_stop = sptr.t_stop, left_sweep = sptr.left_sweep, sampling_rate = sptr.sampling_rate, waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV , ) new.annotations.update(sptr.annotations) new.pos = 0 new.waveformsize = sptr.waveformsize allsorted[sortcode] = new # Step 3 : searh sev (individual data files) or tev (common data file) # sev is for version > 70 if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')): tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb') else: tev = None for code, v in iteritems(allsig): for channel, anaSig in iteritems(v): if PY3K: signame = anaSig.name.decode('ascii') else: signame = anaSig.name filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev') if os.path.exists(filename): anaSig.fid = open(filename, 'rb') else: anaSig.fid = tev for code, v in iteritems(allspiketr): for channel, allsorted in iteritems(v): for sortcode, sptr in iteritems(allsorted): sptr.fid = tev # Step 4 : second loop for copyin chunk of data tsq.seek(0) while 1: h= hr.read_f() if h==None:break channel, code , evtype = h['channel'], h['code'], h['evtype'] if Types[evtype] == 'EVTYPE_STREAM': a = allsig[code][channel] dt = a.dtype s = int((h['size']*4-40)/dt.itemsize) a.fid.seek(h['eventoffset']) a[ a.pos:a.pos+s ] = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype) a.pos += s elif Types[evtype] == 'EVTYPE_STRON' or \ Types[evtype] == 'EVTYPE_STROFF': ea = allevent[code][channel] ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset'])) ea.labels[ea.pos] = str(strobe) ea.pos += 1 elif Types[evtype] == 'EVTYPE_SNIP': sptr = allspiketr[code][channel][h['sortcode']] sptr.t_stop = (h['timestamp'] - global_t_start) * pq.s sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V sptr.pos += 1 # Step 5 : populating segment for code, v in iteritems(allsig): for channel, anaSig in iteritems(v): seg.analogsignals.append( anaSig ) for code, v in iteritems(allevent): for channel, ea in iteritems(v): seg.eventarrays.append( ea ) for code, v in iteritems(allspiketr): for channel, allsorted in iteritems(v): for sortcode, sptr in iteritems(allsorted): seg.spiketrains.append( sptr ) create_many_to_one_relationship(bl) return bl
def read_segment(self, cascade = True, lazy = False,): """ Arguments: """ f = struct_file(self.filename, 'rb') #Name f.seek(64,0) surname = f.read(22) while surname[-1] == ' ' : if len(surname) == 0 :break surname = surname[:-1] firstname = f.read(20) while firstname[-1] == ' ' : if len(firstname) == 0 :break firstname = firstname[:-1] #Date f.seek(128,0) day, month, year, hour, minute, sec = f.read_f('bbbbbb') rec_datetime = datetime.datetime(year+1900 , month , day, hour, minute, sec) f.seek(138,0) Data_Start_Offset , Num_Chan , Multiplexer , Rate_Min , Bytes = f.read_f('IHHHH') #~ print Num_Chan, Bytes #header version f.seek(175,0) header_version, = f.read_f('b') assert header_version == 4 seg = Segment( name = firstname+' '+surname, file_origin = os.path.basename(self.filename), ) seg.annotate(surname = surname) seg.annotate(firstname = firstname) seg.annotate(rec_datetime = rec_datetime) if not cascade: return seg # area f.seek(176,0) zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER'] zones = { } for zname in zone_names: zname2, pos, length = f.read_f('8sII') zones[zname] = zname2, pos, length #~ print zname2, pos, length # reading raw data if not lazy: f.seek(Data_Start_Offset,0) rawdata = np.fromstring(f.read() , dtype = 'u'+str(Bytes)) rawdata = rawdata.reshape(( rawdata.size/Num_Chan , Num_Chan)) # Reading Code Info zname2, pos, length = zones['ORDER'] f.seek(pos,0) code = np.fromfile(f, dtype='u2', count=Num_Chan) units = {-1: pq.nano*pq.V, 0:pq.uV, 1:pq.mV, 2:1, 100: pq.percent, 101:pq.dimensionless, 102:pq.dimensionless} for c in range(Num_Chan): zname2, pos, length = zones['LABCOD'] f.seek(pos+code[c]*128+2,0) label = f.read(6).strip("\x00") ground = f.read(6).strip("\x00") logical_min , logical_max, logical_ground, physical_min, physical_max = f.read_f('iiiii') k, = f.read_f('h') if k in units.keys() : unit = units[k] else : unit = pq.uV f.seek(8,1) sampling_rate, = f.read_f('H') * pq.Hz sampling_rate *= Rate_Min if lazy: signal = [ ]*unit else: factor = float(physical_max - physical_min) / float(logical_max-logical_min+1) signal = ( rawdata[:,c].astype('f') - logical_ground )* factor*unit anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, name=label, channel_index=c) if lazy: anaSig.lazy_shape = None anaSig.annotate(ground = ground) seg.analogsignals.append( anaSig ) sampling_rate = np.mean([ anaSig.sampling_rate for anaSig in seg.analogsignals ])*pq.Hz # Read trigger and notes for zname, label_dtype in [ ('TRIGGER', 'u2'), ('NOTE', 'S40') ]: zname2, pos, length = zones[zname] f.seek(pos,0) triggers = np.fromstring(f.read(length) , dtype = [('pos','u4'), ('label', label_dtype)] , ) ea = EventArray(name =zname[0]+zname[1:].lower()) if not lazy: keep = (triggers['pos']>=triggers['pos'][0]) & (triggers['pos']<rawdata.shape[0]) & (triggers['pos']!=0) triggers = triggers[keep] ea.labels = triggers['label'].astype('S') ea.times = (triggers['pos']/sampling_rate).rescale('s') else: ea.lazy_shape = triggers.size seg.eventarrays.append(ea) # Read Event A and B # Not so well tested for zname in ['EVENT A', 'EVENT B']: zname2, pos, length = zones[zname] f.seek(pos,0) epochs = np.fromstring(f.read(length) , dtype = [('label','u4'),('start','u4'),('stop','u4'),] ) ep = EpochArray(name =zname[0]+zname[1:].lower()) if not lazy: keep = (epochs['start']>0) & (epochs['start']<rawdata.shape[0]) & (epochs['stop']<rawdata.shape[0]) epochs = epochs[keep] ep.labels = epochs['label'].astype('S') ep.times = (epochs['start']/sampling_rate).rescale('s') ep.durations = ((epochs['stop'] - epochs['start'])/sampling_rate).rescale('s') else: ep.lazy_shape = triggers.size seg.epocharrays.append(ep) seg.create_many_to_one_relationship() return seg
def read_block(self, lazy = False, cascade = True ): header = self.read_header() version = header['fFileVersionNumber'] bl = Block() bl.file_origin = os.path.basename(self.filename) bl.annotate(abf_version = version) # date and time if version <2. : YY = 1900 MM = 1 DD = 1 hh = int(header['lFileStartTime']/3600.) mm = int((header['lFileStartTime']-hh*3600)/60) ss = header['lFileStartTime']-hh*3600-mm*60 ms = int(np.mod(ss,1)*1e6) ss = int(ss) elif version >=2. : YY = int(header['uFileStartDate']/10000) MM = int((header['uFileStartDate']-YY*10000)/100) DD = int(header['uFileStartDate']-YY*10000-MM*100) hh = int(header['uFileStartTimeMS']/1000./3600.) mm = int((header['uFileStartTimeMS']/1000.-hh*3600)/60) ss = header['uFileStartTimeMS']/1000.-hh*3600-mm*60 ms = int(np.mod(ss,1)*1e6) ss = int(ss) bl.rec_datetime = datetime.datetime( YY , MM , DD , hh , mm , ss , ms) if not cascade: return bl # file format if header['nDataFormat'] == 0 : dt = np.dtype('i2') elif header['nDataFormat'] == 1 : dt = np.dtype('f4') if version <2. : nbchannel = header['nADCNumChannels'] headOffset = header['lDataSectionPtr']*BLOCKSIZE+header['nNumPointsIgnored']*dt.itemsize totalsize = header['lActualAcqLength'] elif version >=2. : nbchannel = header['sections']['ADCSection']['llNumEntries'] headOffset = header['sections']['DataSection']['uBlockIndex']*BLOCKSIZE totalsize = header['sections']['DataSection']['llNumEntries'] data = np.memmap(self.filename , dt , 'r', shape = (totalsize,) , offset = headOffset) # 3 possible modes if version <2. : mode = header['nOperationMode'] elif version >=2. : mode = header['protocol']['nOperationMode'] #~ print 'mode' , mode if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3): # event-driven variable-length mode (mode 1) # event-driven fixed-length mode (mode 2 or 5) # gap free mode (mode 3) can be in several episod (strange but possible) # read sweep pos if version <2. : nbepisod = header['lSynchArraySize'] offsetEpisod = header['lSynchArrayPtr']*BLOCKSIZE elif version >=2. : nbepisod = header['sections']['SynchArraySection']['llNumEntries'] offsetEpisod = header['sections']['SynchArraySection']['uBlockIndex']*BLOCKSIZE if nbepisod>0: episodArray = np.memmap(self.filename , [('offset','i4'), ('len', 'i4') ] , 'r', shape = (nbepisod), offset = offsetEpisod ) else: episodArray = np.empty( (1) , [('offset','i4'), ('len', 'i4') ] ,) episodArray[0]['len'] = data.size episodArray[0]['offset'] = 0 # sampling_rate if version <2. : sampling_rate = 1./(header['fADCSampleInterval']*nbchannel*1.e-6) * pq.Hz elif version >=2. : sampling_rate = 1.e6/header['protocol']['fADCSequenceInterval'] * pq.Hz # construct block # one sweep = one segment in a block pos = 0 for j in range(episodArray.size): seg = Segment(index = j) length = episodArray[j]['len'] if version <2. : fSynchTimeUnit = header['fSynchTimeUnit'] elif version >=2. : fSynchTimeUnit = header['protocol']['fSynchTimeUnit'] if (fSynchTimeUnit != 0) and (mode == 1) : length /= fSynchTimeUnit subdata = data[pos:pos+length] pos += length subdata = subdata.reshape( (subdata.size/nbchannel, nbchannel )).astype('f') if dt == np.dtype('i2'): if version <2. : reformat_integer_V1(subdata, nbchannel , header) elif version >=2. : reformat_integer_V2(subdata, nbchannel , header) for i in range(nbchannel): if version <2. : name = header['sADCChannelName'][i].replace('\x00','') unit = header['sADCUnits'][i].replace('\xb5', 'u').replace('\x00','')#\xb5 is µ num = header['nADCPtoLChannelMap'][i] elif version >=2. : name = header['listADCInfo'][i]['ADCChNames'].replace('\x00','') unit = header['listADCInfo'][i]['ADCChUnits'].replace('\xb5', 'u').replace('\x00','')#\xb5 is µ num = header['listADCInfo'][i]['nADCNum'] t_start = float(episodArray[j]['offset'])/sampling_rate t_start = t_start.rescale('s') try: pq.Quantity(1, unit) except: #~ print 'bug units', i, unit unit = '' if lazy: signal = [ ] * pq.Quantity(1, unit) else: signal = subdata[:,i] * pq.Quantity(1, unit) anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, t_start=t_start, name=str(name), channel_index=int(num)) if lazy: anaSig.lazy_shape = subdata.shape[0] seg.analogsignals.append( anaSig ) bl.segments.append(seg) if mode in [3,5]:# TODO check if tags exits in other mode # tag is EventArray that should be attached to Block # It is attched to the first Segment times = [ ] labels = [ ] comments = [ ] for i,tag in enumerate(header['listTag']) : times.append(tag['lTagTime']/sampling_rate ) labels.append( str(tag['nTagType']) ) comments.append(clean_string(tag['sComment'])) times = np.array(times) labels = np.array(labels, dtype='S') comments = np.array(comments, dtype='S') # attach all tags to the first segment. seg = bl.segments[0] if lazy : ea = EventArray( times =[ ] * pq.s , labels=np.array([ ], dtype = 'S')) ea.lazy_shape = len(times) else: ea = EventArray( times = times*pq.s, labels = labels, comments = comments ) seg.eventarrays.append(ea) bl.create_many_to_one_relationship() return bl
def read_block(self, lazy = False, cascade = True, ): bl = Block() tankname = os.path.basename(self.dirname) bl.file_origin = tankname if not cascade : return bl for blockname in os.listdir(self.dirname): if blockname == 'TempBlk': continue subdir = os.path.join(self.dirname,blockname) if not os.path.isdir(subdir): continue seg = Segment(name = blockname) bl.segments.append( seg) global_t_start = None # Step 1 : first loop for counting - tsq file tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb') hr = HeaderReader(tsq, TsqDescription) allsig = { } allspiketr = { } allevent = { } while 1: h= hr.read_f() if h==None:break channel, code , evtype = h['channel'], h['code'], h['evtype'] if Types[evtype] == 'EVTYPE_UNKNOWN': pass elif Types[evtype] == 'EVTYPE_MARK' : if global_t_start is None: global_t_start = h['timestamp'] elif Types[evtype] == 'EVTYPE_SCALER' : # TODO pass elif Types[evtype] == 'EVTYPE_STRON' or \ Types[evtype] == 'EVTYPE_STROFF': # EVENTS if code not in allevent: allevent[code] = { } if channel not in allevent[code]: ea = EventArray(name = code , channel_index = channel) # for counting: ea.lazy_shape = 0 ea.maxlabelsize = 0 allevent[code][channel] = ea allevent[code][channel].lazy_shape += 1 strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset'])) strobe = str(strobe) if len(strobe)>= allevent[code][channel].maxlabelsize: allevent[code][channel].maxlabelsize = len(strobe) #~ ev = Event() #~ ev.time = h['timestamp'] - global_t_start #~ ev.name = code #~ # it the strobe attribute masked with eventoffset #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset'])) #~ ev.label = str(strobe) #~ seg._events.append( ev ) elif Types[evtype] == 'EVTYPE_SNIP' : if code not in allspiketr: allspiketr[code] = { } if channel not in allspiketr[code]: allspiketr[code][channel] = { } if h['sortcode'] not in allspiketr[code][channel]: sptr = SpikeTrain([ ], units = 's', name = str(h['sortcode']), #t_start = global_t_start, t_start = 0.*pq.s, t_stop = 0.*pq.s, # temporary left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s, sampling_rate = h['frequency'] * pq.Hz, ) #~ sptr.channel = channel #sptr.annotations['channel_index'] = channel sptr.annotate(channel_index = channel) # for counting: sptr.lazy_shape = 0 sptr.pos = 0 sptr.waveformsize = h['size']-10 #~ sptr.name = str(h['sortcode']) #~ sptr.t_start = global_t_start #~ sptr.sampling_rate = h['frequency'] #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency'] #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency'] #~ sptr.waveformsize = h['size']-10 allspiketr[code][channel][h['sortcode']] = sptr allspiketr[code][channel][h['sortcode']].lazy_shape += 1 elif Types[evtype] == 'EVTYPE_STREAM': if code not in allsig: allsig[code] = { } if channel not in allsig[code]: #~ print 'code', code, 'channel', channel anaSig = AnalogSignal([] * pq.V, name=code, sampling_rate= h['frequency'] * pq.Hz, t_start=(h['timestamp'] - global_t_start) * pq.s, channel_index=channel) anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']]) anaSig.pos = 0 # for counting: anaSig.lazy_shape = 0 #~ anaSig.pos = 0 allsig[code][channel] = anaSig allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize if not lazy: # Step 2 : allocate memory for code, v in iteritems(allsig): for channel, anaSig in iteritems(v): v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V ) v[channel].pos = 0 for code, v in iteritems(allevent): for channel, ea in iteritems(v): ea.times = np.empty( (ea.lazy_shape) ) * pq.s ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) ) ea.pos = 0 for code, v in iteritems(allspiketr): for channel, allsorted in iteritems(v): for sortcode, sptr in iteritems(allsorted): new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s , name = sptr.name, t_start = sptr.t_start, t_stop = sptr.t_stop, left_sweep = sptr.left_sweep, sampling_rate = sptr.sampling_rate, waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV , ) new.annotations.update(sptr.annotations) new.pos = 0 new.waveformsize = sptr.waveformsize allsorted[sortcode] = new # Step 3 : searh sev (individual data files) or tev (common data file) # sev is for version > 70 if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')): tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb') else: tev = None for code, v in iteritems(allsig): for channel, anaSig in iteritems(v): if PY3K: signame = anaSig.name.decode('ascii') else: signame = anaSig.name filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev') if os.path.exists(filename): anaSig.fid = open(filename, 'rb') else: anaSig.fid = tev for code, v in iteritems(allspiketr): for channel, allsorted in iteritems(v): for sortcode, sptr in iteritems(allsorted): sptr.fid = tev # Step 4 : second loop for copyin chunk of data tsq.seek(0) while 1: h= hr.read_f() if h==None:break channel, code , evtype = h['channel'], h['code'], h['evtype'] if Types[evtype] == 'EVTYPE_STREAM': a = allsig[code][channel] dt = a.dtype s = int((h['size']*4-40)/dt.itemsize) a.fid.seek(h['eventoffset']) a[ a.pos:a.pos+s ] = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype) a.pos += s elif Types[evtype] == 'EVTYPE_STRON' or \ Types[evtype] == 'EVTYPE_STROFF': ea = allevent[code][channel] ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset'])) ea.labels[ea.pos] = str(strobe) ea.pos += 1 elif Types[evtype] == 'EVTYPE_SNIP': sptr = allspiketr[code][channel][h['sortcode']] sptr.t_stop = (h['timestamp'] - global_t_start) * pq.s sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V sptr.pos += 1 # Step 5 : populating segment for code, v in iteritems(allsig): for channel, anaSig in iteritems(v): seg.analogsignals.append( anaSig ) for code, v in iteritems(allevent): for channel, ea in iteritems(v): seg.eventarrays.append( ea ) for code, v in iteritems(allspiketr): for channel, allsorted in iteritems(v): for sortcode, sptr in iteritems(allsorted): seg.spiketrains.append( sptr ) bl.create_many_to_one_relationship() return bl
def read_segment(self, import_neuroshare_segment=True, lazy=False, cascade=True): """ Arguments: import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all. """ seg = Segment(file_origin=os.path.basename(self.filename), ) neuroshare = ctypes.windll.LoadLibrary(self.dllname) # API version info = ns_LIBRARYINFO() neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info)) seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' + str(info.dwAPIVersionMin)) if not cascade: return seg # open file hFile = ctypes.c_uint32(0) neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename), ctypes.byref(hFile)) fileinfo = ns_FILEINFO() neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo), ctypes.sizeof(fileinfo)) # read all entities for dwEntityID in range(fileinfo.dwEntityCount): entityInfo = ns_ENTITYINFO() neuroshare.ns_GetEntityInfo(hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo)) #~ print 'type', entityInfo.dwEntityType,entity_types[entityInfo.dwEntityType], 'count', entityInfo.dwItemCount #~ print entityInfo.szEntityLabel # EVENT if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT': pEventInfo = ns_EVENTINFO() neuroshare.ns_GetEventInfo(hFile, dwEntityID, ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo)) #~ print pEventInfo.szCSVDesc, pEventInfo.dwEventType, pEventInfo.dwMinDataLength, pEventInfo.dwMaxDataLength if pEventInfo.dwEventType == 0: #TEXT pData = ctypes.create_string_buffer( pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 1: #CVS pData = ctypes.create_string_buffer( pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 2: # 8bit pData = ctypes.c_byte(0) elif pEventInfo.dwEventType == 3: # 16bit pData = ctypes.c_int16(0) elif pEventInfo.dwEventType == 4: # 32bit pData = ctypes.c_int32(0) pdTimeStamp = ctypes.c_double(0.) pdwDataRetSize = ctypes.c_uint32(0) ea = EventArray(name=str(entityInfo.szEntityLabel), ) if not lazy: times = [] labels = [] for dwIndex in range(entityInfo.dwItemCount): neuroshare.ns_GetEventData( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), ctypes.byref(pData), ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize)) times.append(pdTimeStamp.value) labels.append(str(pData)) ea.times = times * pq.s ea.labels = np.array(labels, dtype='S') else: ea.lazy_shape = entityInfo.dwItemCount seg.eventarrays.append(ea) # analog if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG': pAnalogInfo = ns_ANALOGINFO() neuroshare.ns_GetAnalogInfo(hFile, dwEntityID, ctypes.byref(pAnalogInfo), ctypes.sizeof(pAnalogInfo)) #~ print 'dSampleRate' , pAnalogInfo.dSampleRate , pAnalogInfo.szUnits dwStartIndex = ctypes.c_uint32(0) dwIndexCount = entityInfo.dwItemCount if lazy: signal = [] * pq.Quantity(1, pAnalogInfo.szUnits) else: pdwContCount = ctypes.c_uint32(0) pData = np.zeros((entityInfo.dwItemCount, ), dtype='f8') neuroshare.ns_GetAnalogData( hFile, dwEntityID, dwStartIndex, dwIndexCount, ctypes.byref(pdwContCount), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) pszMsgBuffer = ctypes.create_string_buffer(" " * 256) neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256) #~ print 'pszMsgBuffer' , pszMsgBuffer.value signal = pData[:pdwContCount.value] * pq.Quantity( 1, pAnalogInfo.szUnits) #t_start dwIndex = 0 pdTime = ctypes.c_double(0) neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex, ctypes.byref(pdTime)) anaSig = AnalogSignal( signal, sampling_rate=pAnalogInfo.dSampleRate * pq.Hz, t_start=pdTime.value * pq.s, name=str(entityInfo.szEntityLabel), ) if lazy: anaSig.lazy_shape = entityInfo.dwItemCount seg.analogsignals.append(anaSig) #segment if entity_types[ entityInfo. dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment: pdwSegmentInfo = ns_SEGMENTINFO() neuroshare.ns_GetSegmentInfo(hFile, dwEntityID, ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo)) nsource = pdwSegmentInfo.dwSourceCount pszMsgBuffer = ctypes.create_string_buffer(" " * 256) neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256) #~ print 'pszMsgBuffer' , pszMsgBuffer.value #~ print 'pdwSegmentInfo.dwSourceCount' , pdwSegmentInfo.dwSourceCount for dwSourceID in range(pdwSegmentInfo.dwSourceCount): pSourceInfo = ns_SEGSOURCEINFO() neuroshare.ns_GetSegmentSourceInfo( hFile, dwEntityID, dwSourceID, ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo)) if lazy: sptr = SpikeTrain(times, name=str(entityInfo.szEntityLabel)) sptr.lazy_shape = entityInfo.dwItemCount else: pdTimeStamp = ctypes.c_double(0.) dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount pData = np.zeros((dwDataBufferSize), dtype='f8') pdwSampleCount = ctypes.c_uint32(0) pdwUnitID = ctypes.c_uint32(0) nsample = pdwSampleCount.value times = np.empty((entityInfo.dwItemCount), drtype='f') waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), drtype='f') for dwIndex in range(entityInfo.dwItemCount): neuroshare.ns_GetSegmentData( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), pData.ctypes.data_as( ctypes.POINTER(ctypes.c_double)), dwDataBufferSize * 8, ctypes.byref(pdwSampleCount), ctypes.byref(pdwUnitID)) #print 'dwDataBufferSize' , dwDataBufferSize,pdwSampleCount , pdwUnitID times[dwIndex] = pdTimeStamp.value waveforms[ dwIndex, :, :] = pData[:nsample * nsource].reshape( nsample, nsource).transpose() sptr = SpikeTrain( times * pq.s, waveforms=waveforms * pq.Quantity(1., str(pdwSegmentInfo.szUnits)), left_sweep=nsample / 2. / float(pdwSegmentInfo.dSampleRate) * pq.s, sampling_rate=float(pdwSegmentInfo.dSampleRate) * pq.Hz, name=str(entityInfo.szEntityLabel), ) seg.spiketrains.append(sptr) # neuralevent if entity_types[ entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT': pNeuralInfo = ns_NEURALINFO() neuroshare.ns_GetNeuralInfo(hFile, dwEntityID, ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo)) #print pNeuralInfo.dwSourceUnitID , pNeuralInfo.szProbeInfo if lazy: times = [] * pq.s else: pData = np.zeros((entityInfo.dwItemCount, ), dtype='f8') dwStartIndex = 0 dwIndexCount = entityInfo.dwItemCount neuroshare.ns_GetNeuralData( hFile, dwEntityID, dwStartIndex, dwIndexCount, pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) times = pData * pq.s sptr = SpikeTrain( times, name=str(entityInfo.szEntityLabel), ) if lazy: sptr.lazy_shape = entityInfo.dwItemCount seg.spiketrains.append(sptr) # close neuroshare.ns_CloseFile(hFile) seg.create_many_to_one_relationship() return seg
def read_segment(self, lazy=False, cascade=True): ## Read header file (vhdr) header = readBrainSoup(self.filename) assert header['Common Infos'][ 'DataFormat'] == 'BINARY', NotImplementedError assert header['Common Infos'][ 'DataOrientation'] == 'MULTIPLEXED', NotImplementedError nb_channel = int(header['Common Infos']['NumberOfChannels']) sampling_rate = 1.e6 / float( header['Common Infos']['SamplingInterval']) * pq.Hz fmt = header['Binary Infos']['BinaryFormat'] fmts = { 'INT_16': np.int16, 'IEEE_FLOAT_32': np.float32, } assert fmt in fmts, NotImplementedError dt = fmts[fmt] seg = Segment(file_origin=os.path.basename(self.filename), ) if not cascade: return seg # read binary if not lazy: binary_file = os.path.splitext(self.filename)[0] + '.eeg' sigs = np.memmap( binary_file, dt, 'r', ).astype('f') n = int(sigs.size / nb_channel) sigs = sigs[:n * nb_channel] sigs = sigs.reshape(n, nb_channel) for c in range(nb_channel): name, ref, res, units = header['Channel Infos']['Ch%d' % (c + 1, )].split(',') units = pq.Quantity(1, units.replace('µ', 'u')) if lazy: signal = [] * units else: signal = sigs[:, c] * units anasig = AnalogSignal( signal=signal, channel_index=c, name=name, sampling_rate=sampling_rate, ) if lazy: anasig.lazy_shape = -1 seg.analogsignals.append(anasig) # read marker marker_file = os.path.splitext(self.filename)[0] + '.vmrk' all_info = readBrainSoup(marker_file)['Marker Infos'] all_types = [] times = [] labels = [] for i in range(len(all_info)): type_, label, pos, size, channel = all_info['Mk%d' % (i + 1, )].split(',')[:5] all_types.append(type_) times.append(float(pos) / sampling_rate.magnitude) labels.append(label) all_types = np.array(all_types) times = np.array(times) * pq.s labels = np.array(labels, dtype='S') for type_ in np.unique(all_types): ind = type_ == all_types if lazy: ea = EventArray(name=str(type_)) ea.lazy_shape = -1 else: ea = EventArray( times=times[ind], labels=labels[ind], name=str(type_), ) seg.eventarrays.append(ea) create_many_to_one_relationship(seg) return seg
def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms = False): # basic header dt = [('header_id','S8'), ('ver_major','uint8'), ('ver_minor','uint8'), ('additionnal_flag', 'uint16'), # Read flags, currently basically unused ('header_size', 'uint32'), #i.e. index of first data ('packet_size', 'uint32'),# Read number of packet bytes, i.e. byte per sample ('sampling_rate', 'uint32'),# Read time resolution in Hz of time stamps, i.e. data packets ('waveform_sampling_rate', 'uint32'),# Read sampling frequency of waveforms in Hz ('window_datetime', 'S16'), ('application', 'S32'), # ('comments', 'S256'), # comments ('num_ext_header', 'uint32') #Read number of extended headers ] nev_header = h = np.fromfile(filename_nev, count = 1, dtype = dt)[0] version = '{0}.{1}'.format(h['ver_major'], h['ver_minor']) assert h['header_id'].decode('ascii') == 'NEURALEV' or version == '2.1', 'Unsupported version {0}'.format(version) version = '{0}.{1}'.format(h['ver_major'], h['ver_minor']) seg.annotate(blackrock_version = version) seg.rec_datetime = get_window_datetime(nev_header['window_datetime']) sr = float(h['sampling_rate']) wsr = float(h['waveform_sampling_rate']) if not cascade: return # extented header # this consist in N block with code 8bytes + 24 data bytes # the data bytes depend on the code and need to be converted cafilename_nsx, segse by case raw_ext_header = np.memmap(filename_nev, offset = np.dtype(dt).itemsize, dtype = [('code', 'S8'), ('data', 'S24')], shape = h['num_ext_header']) # this is for debuging ext_header = { } for code, dt_ext in ext_nev_header_codes.items(): sel = raw_ext_header['code']==code ext_header[code] = raw_ext_header[sel].view(dt_ext) # channel label neuelbl_header = ext_header['NEUEVLBL'] # Sometimes when making the channel labels we have only one channel and so must address it differently. try: channel_labels = dict(zip(neuelbl_header['channel_id'], neuelbl_header['channel_label'])) except TypeError: channel_labels = dict([(neuelbl_header['channel_id'], neuelbl_header['channel_label'])]) # TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case # TODO ECOMMENT + CCOMMENT for annotations # TODO NEUEVFLT for annotations # read data packet and markers dt0 = [('samplepos', 'uint32'), ('id', 'uint16'), ('value', 'S{0}'.format(h['packet_size']-6)), ] data = np.memmap( filename_nev, offset = h['header_size'], dtype = dt0) all_ids = np.unique(data['id']) t_start = 0*pq.s t_stop = data['samplepos'][-1]/sr*pq.s # read event (digital 9+ analog+comment) def create_event_array_trig_or_analog(selection, name, labelmode = None): if lazy: times = [ ] labels = np.array([ ], dtype = 'S') else: times = data_trigger['samplepos'][selection].astype(float)/sr if labelmode == 'digital_port': labels = data_trigger['digital_port'][selection].astype('S2') elif labelmode is None: label = None ev = EventArray(times= times*pq.s, labels= labels, name=name) if lazy: ev.lazy_shape = np.sum(is_digital) seg.eventarrays.append(ev) mask = (data['id']==0) dt_trig = [('samplepos', 'uint32'), ('id', 'uint16'), ('reason', 'uint8'), ('reserved0', 'uint8'), ('digital_port', 'uint16'), ('reserved1', 'S{0}'.format(h['packet_size']-10)), ] data_trigger = data.view(dt_trig)[mask] # Digital Triggers (PaquetID 0) is_digital = (data_trigger ['reason']&1)>0 create_event_array_trig_or_analog(is_digital, 'Digital trigger', labelmode = 'digital_port' ) # Analog Triggers (PaquetID 0) if version in ['2.1', '2.2' ]: for i in range(5): is_analog = (data_trigger ['reason']&(2**(i+1)))>0 create_event_array_trig_or_analog(is_analog, 'Analog trigger {0}'.format(i), labelmode = None) # Comments mask = (data['id']==0xFFF) dt_comments = [('samplepos', 'uint32'), ('id', 'uint16'), ('charset', 'uint8'), ('reserved0', 'uint8'), ('color', 'uint32'), ('comment', 'S{0}'.format(h['packet_size']-12)), ] data_comments = data.view(dt_comments)[mask] if data_comments.size>0: if lazy: times = [ ] labels = [ ] else: times = data_comments['samplepos'].astype(float)/sr labels = data_comments['comment'].astype('S') ev = EventArray(times= times*pq.s, labels= labels, name='Comments') if lazy: ev.lazy_shape = np.sum(is_digital) seg.eventarrays.append(ev) # READ Spike channel channel_ids = all_ids[(all_ids>0) & (all_ids<=2048)] # get the dtype of waveform (this is stupidly complicated) if nev_header['additionnal_flag']&0x1: #dtype_waveforms = { k:'int16' for k in channel_ids } dtype_waveforms = dict( (k,'int16') for k in channel_ids) else: # there is a code electrodes by electrodes given the approiate dtype neuewav_header = ext_header['NEUEVWAV'] dtype_waveform = dict(zip(neuewav_header['channel_id'], neuewav_header['num_bytes_per_waveform'])) dtypes_conv = { 0: 'int8', 1 : 'int8', 2: 'int16', 4 : 'int32' } #dtype_waveforms = { k:dtypes_conv[v] for k,v in dtype_waveform.items() } dtype_waveforms = dict( (k,dtypes_conv[v]) for k,v in dtype_waveform.items() ) dt2 = [('samplepos', 'uint32'), ('id', 'uint16'), ('cluster', 'uint8'), ('reserved0', 'uint8'), ('waveform','uint8',(h['packet_size']-8, )), ] data_spike = data.view(dt2) for channel_id in channel_ids: data_spike_chan = data_spike[data['id']==channel_id] cluster_ids = np.unique(data_spike_chan['cluster']) for cluster_id in cluster_ids: if cluster_id==0: name = 'unclassified' elif cluster_id==255: name = 'noise' else: name = 'Cluster {0}'.format(cluster_id) name = 'Channel {0} '.format(channel_id)+name data_spike_chan_clus = data_spike_chan[data_spike_chan['cluster']==cluster_id] n_spike = data_spike_chan_clus.size waveforms, w_sampling_rate, left_sweep = None, None, None if lazy: times = [ ] else: times = data_spike_chan_clus['samplepos'].astype(float)/sr if load_waveforms: dtype_waveform = dtype_waveforms[channel_id] waveform_size = (h['packet_size']-8)/np.dtype(dtype_waveform).itemsize waveforms = data_spike_chan_clus['waveform'].flatten().view(dtype_waveform) waveforms = waveforms.reshape(n_spike,1, waveform_size) waveforms =waveforms*pq.uV w_sampling_rate = wsr*pq.Hz left_sweep = waveform_size//2/sr*pq.s st = SpikeTrain(times = times*pq.s, name = name, t_start = t_start, t_stop =t_stop, waveforms = waveforms, sampling_rate = w_sampling_rate, left_sweep = left_sweep) st.annotate(channel_index = int(channel_id)) if lazy: st.lazy_shape = n_spike seg.spiketrains.append(st)
def read_block(self, lazy=False, cascade=True): header = self.read_header() version = header['fFileVersionNumber'] bl = Block() bl.file_origin = os.path.basename(self.filename) bl.annotate(abf_version=version) # date and time if version < 2.: YY = 1900 MM = 1 DD = 1 hh = int(header['lFileStartTime'] / 3600.) mm = int((header['lFileStartTime'] - hh * 3600) / 60) ss = header['lFileStartTime'] - hh * 3600 - mm * 60 ms = int(np.mod(ss, 1) * 1e6) ss = int(ss) elif version >= 2.: YY = int(header['uFileStartDate'] / 10000) MM = int((header['uFileStartDate'] - YY * 10000) / 100) DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100) hh = int(header['uFileStartTimeMS'] / 1000. / 3600.) mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60) ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60 ms = int(np.mod(ss, 1) * 1e6) ss = int(ss) bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms) if not cascade: return bl # file format if header['nDataFormat'] == 0: dt = np.dtype('i2') elif header['nDataFormat'] == 1: dt = np.dtype('f4') if version < 2.: nbchannel = header['nADCNumChannels'] headOffset = header['lDataSectionPtr'] * BLOCKSIZE + header[ 'nNumPointsIgnored'] * dt.itemsize totalsize = header['lActualAcqLength'] elif version >= 2.: nbchannel = header['sections']['ADCSection']['llNumEntries'] headOffset = header['sections']['DataSection'][ 'uBlockIndex'] * BLOCKSIZE totalsize = header['sections']['DataSection']['llNumEntries'] data = np.memmap(self.filename, dt, 'r', shape=(totalsize, ), offset=headOffset) # 3 possible modes if version < 2.: mode = header['nOperationMode'] elif version >= 2.: mode = header['protocol']['nOperationMode'] #~ print 'mode' , mode if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3): # event-driven variable-length mode (mode 1) # event-driven fixed-length mode (mode 2 or 5) # gap free mode (mode 3) can be in several episod (strange but possible) # read sweep pos if version < 2.: nbepisod = header['lSynchArraySize'] offsetEpisod = header['lSynchArrayPtr'] * BLOCKSIZE elif version >= 2.: nbepisod = header['sections']['SynchArraySection'][ 'llNumEntries'] offsetEpisod = header['sections']['SynchArraySection'][ 'uBlockIndex'] * BLOCKSIZE if nbepisod > 0: episodArray = np.memmap(self.filename, [('offset', 'i4'), ('len', 'i4')], 'r', shape=(nbepisod), offset=offsetEpisod) else: episodArray = np.empty( (1), [('offset', 'i4'), ('len', 'i4')], ) episodArray[0]['len'] = data.size episodArray[0]['offset'] = 0 # sampling_rate if version < 2.: sampling_rate = 1. / (header['fADCSampleInterval'] * nbchannel * 1.e-6) * pq.Hz elif version >= 2.: sampling_rate = 1.e6 / header['protocol'][ 'fADCSequenceInterval'] * pq.Hz # construct block # one sweep = one segment in a block pos = 0 for j in range(episodArray.size): seg = Segment(index=j) length = episodArray[j]['len'] if version < 2.: fSynchTimeUnit = header['fSynchTimeUnit'] elif version >= 2.: fSynchTimeUnit = header['protocol']['fSynchTimeUnit'] if (fSynchTimeUnit != 0) and (mode == 1): length /= fSynchTimeUnit subdata = data[pos:pos + length] pos += length subdata = subdata.reshape( (subdata.size / nbchannel, nbchannel)).astype('f') if dt == np.dtype('i2'): if version < 2.: reformat_integer_V1(subdata, nbchannel, header) elif version >= 2.: reformat_integer_V2(subdata, nbchannel, header) for i in range(nbchannel): if version < 2.: name = header['sADCChannelName'][i].replace('\x00', '') unit = header['sADCUnits'][i].replace( '\xb5', 'u').replace('\x00', '') #\xb5 is µ num = header['nADCPtoLChannelMap'][i] elif version >= 2.: name = header['listADCInfo'][i]['ADCChNames'].replace( '\x00', '') unit = header['listADCInfo'][i]['ADCChUnits'].replace( '\xb5', 'u').replace('\x00', '') #\xb5 is µ num = header['listADCInfo'][i]['nADCNum'] t_start = float(episodArray[j]['offset']) / sampling_rate t_start = t_start.rescale('s') try: pq.Quantity(1, unit) except: #~ print 'bug units', i, unit unit = '' if lazy: signal = [] * pq.Quantity(1, unit) else: signal = subdata[:, i] * pq.Quantity(1, unit) anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, t_start=t_start, name=str(name), channel_index=int(num)) if lazy: anaSig.lazy_shape = subdata.shape[0] seg.analogsignals.append(anaSig) bl.segments.append(seg) if mode in [3, 5]: # TODO check if tags exits in other mode # tag is EventArray that should be attached to Block # It is attched to the first Segment times = [] labels = [] comments = [] for i, tag in enumerate(header['listTag']): times.append(tag['lTagTime'] / sampling_rate) labels.append(str(tag['nTagType'])) comments.append(clean_string(tag['sComment'])) times = np.array(times) labels = np.array(labels, dtype='S') comments = np.array(comments, dtype='S') # attach all tags to the first segment. seg = bl.segments[0] if lazy: ea = EventArray(times=[] * pq.s, labels=np.array([], dtype='S')) ea.lazy_shape = len(times) else: ea = EventArray(times=times * pq.s, labels=labels, comments=comments) seg.eventarrays.append(ea) bl.create_many_to_one_relationship() return bl
def read_segment(self, import_neuroshare_segment = True, lazy=False, cascade=True): """ Arguments: import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all. """ seg = Segment( file_origin = os.path.basename(self.filename), ) if sys.platform.startswith('win'): neuroshare = ctypes.windll.LoadLibrary(self.dllname) elif sys.platform.startswith('linux'): neuroshare = ctypes.cdll.LoadLibrary(self.dllname) neuroshare = DllWithError(neuroshare) #elif sys.platform.startswith('darwin'): # API version info = ns_LIBRARYINFO() neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info)) seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin)) if not cascade: return seg # open file hFile = ctypes.c_uint32(0) neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile)) fileinfo = ns_FILEINFO() neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo)) # read all entities for dwEntityID in range(fileinfo.dwEntityCount): entityInfo = ns_ENTITYINFO() neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo)) # EVENT if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT': pEventInfo = ns_EVENTINFO() neuroshare.ns_GetEventInfo ( hFile, dwEntityID, ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo)) if pEventInfo.dwEventType == 0: #TEXT pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 1:#CVS pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 2:# 8bit pData = ctypes.c_byte(0) elif pEventInfo.dwEventType == 3:# 16bit pData = ctypes.c_int16(0) elif pEventInfo.dwEventType == 4:# 32bit pData = ctypes.c_int32(0) pdTimeStamp = ctypes.c_double(0.) pdwDataRetSize = ctypes.c_uint32(0) ea = EventArray(name = str(entityInfo.szEntityLabel),) if not lazy: times = [ ] labels = [ ] for dwIndex in range(entityInfo.dwItemCount ): neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), ctypes.byref(pData), ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) ) times.append(pdTimeStamp.value) labels.append(str(pData.value)) ea.times = times*pq.s ea.labels = np.array(labels, dtype ='S') else : ea.lazy_shape = entityInfo.dwItemCount seg.eventarrays.append(ea) # analog if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG': pAnalogInfo = ns_ANALOGINFO() neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) ) dwIndexCount = entityInfo.dwItemCount if lazy: signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits) else: pdwContCount = ctypes.c_uint32(0) pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64') total_read = 0 while total_read< entityInfo.dwItemCount: dwStartIndex = ctypes.c_uint32(total_read) dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read) neuroshare.ns_GetAnalogData( hFile, dwEntityID, dwStartIndex, dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double))) total_read += pdwContCount.value signal = pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False) #t_start dwIndex = 0 pdTime = ctypes.c_double(0) neuroshare.ns_GetTimeByIndex( hFile, dwEntityID, dwIndex, ctypes.byref(pdTime)) anaSig = AnalogSignal(signal, sampling_rate = pAnalogInfo.dSampleRate*pq.Hz, t_start = pdTime.value * pq.s, name = str(entityInfo.szEntityLabel), ) anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo)) if lazy: anaSig.lazy_shape = entityInfo.dwItemCount seg.analogsignals.append( anaSig ) #segment if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment: pdwSegmentInfo = ns_SEGMENTINFO() if not str(entityInfo.szEntityLabel).startswith('spks'): continue neuroshare.ns_GetSegmentInfo( hFile, dwEntityID, ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) ) nsource = pdwSegmentInfo.dwSourceCount pszMsgBuffer = ctypes.create_string_buffer(" "*256) neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256) for dwSourceID in range(pdwSegmentInfo.dwSourceCount) : pSourceInfo = ns_SEGSOURCEINFO() neuroshare.ns_GetSegmentSourceInfo( hFile, dwEntityID, dwSourceID, ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) ) if lazy: sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s) sptr.lazy_shape = entityInfo.dwItemCount else: pdTimeStamp = ctypes.c_double(0.) dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount pData = np.zeros( (dwDataBufferSize), dtype = 'float64') pdwSampleCount = ctypes.c_uint32(0) pdwUnitID= ctypes.c_uint32(0) nsample = int(dwDataBufferSize) times = np.empty( (entityInfo.dwItemCount), dtype = 'f') waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f') for dwIndex in range(entityInfo.dwItemCount ): neuroshare.ns_GetSegmentData ( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), dwDataBufferSize * 8, ctypes.byref(pdwSampleCount), ctypes.byref(pdwUnitID ) ) times[dwIndex] = pdTimeStamp.value waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose() sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False), t_stop = times.max(), waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ), left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s, sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz, name = str(entityInfo.szEntityLabel), ) seg.spiketrains.append(sptr) # neuralevent if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT': pNeuralInfo = ns_NEURALINFO() neuroshare.ns_GetNeuralInfo ( hFile, dwEntityID, ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo)) if lazy: times = [ ]*pq.s t_stop = 0*pq.s else: pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64') dwStartIndex = 0 dwIndexCount = entityInfo.dwItemCount neuroshare.ns_GetNeuralData( hFile, dwEntityID, dwStartIndex, dwIndexCount, pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) times = pData*pq.s t_stop = times.max() sptr = SpikeTrain(times, t_stop =t_stop, name = str(entityInfo.szEntityLabel),) if lazy: sptr.lazy_shape = entityInfo.dwItemCount seg.spiketrains.append(sptr) # close neuroshare.ns_CloseFile(hFile) seg.create_many_to_one_relationship() return seg
def read_block(self, lazy = False, cascade = True, ): bl = Block() tankname = os.path.basename(self.dirname) bl.file_origin = tankname if not cascade : return bl for blockname in os.listdir(self.dirname): if blockname == 'TempBlk': continue subdir = os.path.join(self.dirname,blockname) if not os.path.isdir(subdir): continue seg = Segment(name = blockname) bl.segments.append( seg) #TSQ is the global index tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq') dt = [('size','int32'), ('evtype','int32'), ('code','S4'), ('channel','uint16'), ('sortcode','uint16'), ('timestamp','float64'), ('eventoffset','int64'), ('dataformat','int32'), ('frequency','float32'), ] tsq = np.fromfile(tsq_filename, dtype = dt) #0x8801: 'EVTYPE_MARK' give the global_start global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0] #TEV is the old data file if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')): tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev') #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead tev_array = np.fromfile(tev_filename, dtype = 'uint8') else: tev_filename = None for type_code, type_label in tdt_event_type: mask1 = tsq['evtype']==type_code codes = np.unique(tsq[mask1]['code']) for code in codes: mask2 = mask1 & (tsq['code']==code) channels = np.unique(tsq[mask2]['channel']) for channel in channels: mask3 = mask2 & (tsq['channel']==channel) if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']: if lazy: times = [ ]*pq.s labels = np.array([ ], dtype = str) else: times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s labels = tsq[mask3]['eventoffset'].view('float64').astype('S') ea = EventArray(times = times, name = code , channel_index = int(channel), labels = labels) if lazy: ea.lazy_shape = np.sum(mask3) seg.eventarrays.append(ea) elif type_label == 'EVTYPE_SNIP': sortcodes = np.unique(tsq[mask3]['sortcode']) for sortcode in sortcodes: mask4 = mask3 & (tsq['sortcode']==sortcode) nb_spike = np.sum(mask4) sr = tsq[mask4]['frequency'][0] waveformsize = tsq[mask4]['size'][0]-10 if lazy: times = [ ]*pq.s waveforms = None else: times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]]) waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt) waveforms = waveforms.reshape(nb_spike, -1, waveformsize) waveforms = waveforms * pq.mV if nb_spike>0: # t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not t_start = 0 *pq.s t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s else: t_start = 0 *pq.s t_stop = 0 *pq.s st = SpikeTrain(times = times, name = 'Chan{} Code{}'.format(channel,sortcode), t_start = t_start, t_stop = t_stop, waveforms = waveforms, left_sweep = waveformsize/2./sr * pq.s, sampling_rate = sr * pq.Hz, ) st.annotate(channel_index = channel) if lazy: st.lazy_shape = nb_spike seg.spiketrains.append(st) elif type_label == 'EVTYPE_STREAM': dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]]) shape = np.sum(tsq[mask3]['size']-10) sr = tsq[mask3]['frequency'][0] if lazy: signal = [ ] else: if PY3K: signame = code.decode('ascii') else: signame = code sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev') if os.path.exists(sev_filename): #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead sig_array = np.fromfile(sev_filename, dtype = 'uint8') else: sig_array = tev_array signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'], sig_array).view(dt) anasig = AnalogSignal(signal = signal* pq.V, name = '{} {}'.format(code, channel), sampling_rate= sr * pq.Hz, t_start = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s, channel_index = int(channel)) if lazy: anasig.lazy_shape = shape seg.analogsignals.append(anasig) bl.create_many_to_one_relationship() return bl
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True): """ Read in a segment. Arguments: load_spike_waveform : load or not waveform of spikes (default True) """ fid = open(self.filename, 'rb') globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0) # metadatas seg = Segment() seg.rec_datetime = datetime.datetime( globalHeader.pop('Year'), globalHeader.pop('Month'), globalHeader.pop('Day'), globalHeader.pop('Hour'), globalHeader.pop('Minute'), globalHeader.pop('Second') ) seg.file_origin = os.path.basename(self.filename) for key, val in globalHeader.iteritems(): seg.annotate(**{key: val}) if not cascade: return seg ## Step 1 : read headers # dsp channels header = spikes and waveforms dspChannelHeaders = {} maxunit = 0 maxchan = 0 for _ in range(globalHeader['NumDSPChannels']): # channel is 1 based channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None) channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64)) channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4)) dspChannelHeaders[channelHeader['Channel']] = channelHeader maxunit = max(channelHeader['NUnits'], maxunit) maxchan = max(channelHeader['Channel'], maxchan) # event channel header eventHeaders = { } for _ in range(globalHeader['NumEventChannels']): eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None) eventHeaders[eventHeader['Channel']] = eventHeader # slow channel header = signal slowChannelHeaders = {} for _ in range(globalHeader['NumSlowChannels']): slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None) slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader ## Step 2 : a first loop for counting size # signal nb_samples = np.zeros(len(slowChannelHeaders)) sample_positions = np.zeros(len(slowChannelHeaders)) t_starts = np.zeros(len(slowChannelHeaders), dtype='f') #spiketimes and waveform nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i') wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i') # eventarrays nb_events = { } #maxstrsizeperchannel = { } for chan, h in iteritems(eventHeaders): nb_events[chan] = 0 #maxstrsizeperchannel[chan] = 0 start = fid.tell() while fid.tell() !=-1 : # read block header dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None) if dataBlockHeader is None : break chan = dataBlockHeader['Channel'] unit = dataBlockHeader['Unit'] n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform'] time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']) if dataBlockHeader['Type'] == 1: nb_spikes[chan,unit] +=1 wf_sizes[chan,unit,:] = [n1,n2] fid.seek(n1*n2*2,1) elif dataBlockHeader['Type'] ==4: #event nb_events[chan] += 1 elif dataBlockHeader['Type'] == 5: #continuous signal fid.seek(n2*2, 1) if n2> 0: nb_samples[chan] += n2 if nb_samples[chan] ==0: t_starts[chan] = time ## Step 3: allocating memory and 2 loop for reading if not lazy if not lazy: # allocating mem for signal sigarrays = { } for chan, h in iteritems(slowChannelHeaders): sigarrays[chan] = np.zeros(nb_samples[chan]) # allocating mem for SpikeTrain stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object) swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object) for (chan, unit), _ in np.ndenumerate(nb_spikes): stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f') if load_spike_waveform: n1,n2 = wf_sizes[chan, unit,:] swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' ) pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i') # allocating mem for event eventpositions = { } evarrays = { } for chan, nb in iteritems(nb_events): evarrays[chan] = { 'times': np.zeros(nb, dtype='f'), 'labels': np.zeros(nb, dtype='S4') } eventpositions[chan]=0 fid.seek(start) while fid.tell() !=-1 : dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None) if dataBlockHeader is None : break chan = dataBlockHeader['Channel'] n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform'] time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp'] time/= globalHeader['ADFrequency'] if n2 <0: break if dataBlockHeader['Type'] == 1: #spike unit = dataBlockHeader['Unit'] pos = pos_spikes[chan,unit] stimearrays[chan, unit][pos] = time if load_spike_waveform and n1*n2 != 0 : swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4') else: fid.seek(n1*n2*2,1) pos_spikes[chan,unit] +=1 elif dataBlockHeader['Type'] == 4: # event pos = eventpositions[chan] evarrays[chan]['times'][pos] = time evarrays[chan]['labels'][pos] = dataBlockHeader['Unit'] eventpositions[chan]+= 1 elif dataBlockHeader['Type'] == 5: #signal data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4') sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data sample_positions[chan] += data.size ## Step 4: create neo object for chan, h in iteritems(eventHeaders): if lazy: times = [] labels = None else: times = evarrays[chan]['times'] labels = evarrays[chan]['labels'] ea = EventArray( times*pq.s, labels=labels, channel_name=eventHeaders[chan]['Name'], channel_index=chan ) if lazy: ea.lazy_shape = nb_events[chan] seg.eventarrays.append(ea) for chan, h in iteritems(slowChannelHeaders): if lazy: signal = [ ] else: if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 : gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.) elif globalHeader['Version'] ==102 : gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain']) elif globalHeader['Version'] >= 103: gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\ slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain']) signal = sigarrays[chan]*gain anasig = AnalogSignal(signal*pq.V, sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz, t_start = t_starts[chan]*pq.s, channel_index = slowChannelHeaders[chan]['Channel'], channel_name = slowChannelHeaders[chan]['Name'], ) if lazy: anasig.lazy_shape = nb_samples[chan] seg.analogsignals.append(anasig) for (chan, unit), value in np.ndenumerate(nb_spikes): if nb_spikes[chan, unit] == 0: continue if lazy: times = [ ] waveforms = None t_stop = 0 else: times = stimearrays[chan,unit] t_stop = times.max() if load_spike_waveform: if globalHeader['Version'] <103: gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.) elif globalHeader['Version'] >=103 and globalHeader['Version'] <105: gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.) elif globalHeader['Version'] >105: gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain']) waveforms = swfarrays[chan, unit] * gain * pq.V else: waveforms = None sptr = SpikeTrain( times, units='s', t_stop=t_stop*pq.s, waveforms=waveforms ) sptr.annotate(unit_name = dspChannelHeaders[chan]['Name']) sptr.annotate(channel_index = chan) for key, val in dspChannelHeaders[chan].iteritems(): sptr.annotate(**{key: val}) if lazy: sptr.lazy_shape = nb_spikes[chan,unit] seg.spiketrains.append(sptr) seg.create_many_to_one_relationship() return seg
def read_segment(self, lazy = False, cascade = True, ): fid = open(self.filename, 'rb') globalHeader = HeaderReader(fid , GlobalHeader ).read_f(offset = 0) #~ print globalHeader #~ print 'version' , globalHeader['version'] seg = Segment() seg.file_origin = os.path.basename(self.filename) seg.annotate(neuroexplorer_version = globalHeader['version']) seg.annotate(comment = globalHeader['comment']) if not cascade : return seg offset = 544 for i in range(globalHeader['nvar']): entityHeader = HeaderReader(fid , EntityHeader ).read_f(offset = offset+i*208) entityHeader['name'] = entityHeader['name'].decode().replace('\x00','') #print 'i',i, entityHeader['type'] if entityHeader['type'] == 0: # neuron if lazy: spike_times = [ ]*pq.s else: spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) spike_times = spike_times.astype('f8')/globalHeader['freq']*pq.s sptr = SpikeTrain( times= spike_times, t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s, t_stop = globalHeader['tend']/globalHeader['freq']*pq.s, name = entityHeader['name'], ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index = entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 1: # event if lazy: event_times = [ ]*pq.s else: event_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) event_times = event_times.astype('f8')/globalHeader['freq'] * pq.s labels = np.array(['']*event_times.size, dtype = 'S') evar = EventArray(times = event_times, labels=labels, channel_name = entityHeader['name'] ) if lazy: evar.lazy_shape = entityHeader['n'] seg.eventarrays.append(evar) if entityHeader['type'] == 2: # interval if lazy: start_times = [ ]*pq.s stop_times = [ ]*pq.s else: start_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) start_times = start_times.astype('f8')/globalHeader['freq']*pq.s stop_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset']+entityHeader['n']*4, ) stop_times = stop_times.astype('f')/globalHeader['freq']*pq.s epar = EpochArray(times = start_times, durations = stop_times - start_times, labels = np.array(['']*start_times.size, dtype = 'S'), channel_name = entityHeader['name']) if lazy: epar.lazy_shape = entityHeader['n'] seg.epocharrays.append(epar) if entityHeader['type'] == 3: # spiketrain and wavefoms if lazy: spike_times = [ ]*pq.s waveforms = None else: spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) spike_times = spike_times.astype('f8')/globalHeader['freq'] * pq.s waveforms = np.memmap(self.filename , np.dtype('i2') ,'r' , shape = (entityHeader['n'] , 1,entityHeader['NPointsWave']), offset = entityHeader['offset']+entityHeader['n'] *4, ) waveforms = (waveforms.astype('f')* entityHeader['ADtoMV'] + entityHeader['MVOffset'])*pq.mV t_stop = globalHeader['tend']/globalHeader['freq']*pq.s if spike_times.size>0: t_stop = max(t_stop, max(spike_times)) sptr = SpikeTrain( times = spike_times, t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s, #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)), t_stop = t_stop, name = entityHeader['name'], waveforms = waveforms, sampling_rate = entityHeader['WFrequency']*pq.Hz, left_sweep = 0*pq.ms, ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index = entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 4: # popvectors pass if entityHeader['type'] == 5: # analog timestamps= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) timestamps = timestamps.astype('f8')/globalHeader['freq'] fragmentStarts = np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) fragmentStarts = fragmentStarts.astype('f8')/globalHeader['freq'] t_start = timestamps[0] - fragmentStarts[0]/float(entityHeader['WFrequency']) del timestamps, fragmentStarts if lazy : signal = [ ]*pq.mV else: signal = np.memmap(self.filename , np.dtype('i2') ,'r' , shape = (entityHeader['NPointsWave'] ), offset = entityHeader['offset'], ) signal = signal.astype('f') signal *= entityHeader['ADtoMV'] signal += entityHeader['MVOffset'] signal = signal*pq.mV anaSig = AnalogSignal(signal=signal, t_start=t_start * pq.s, sampling_rate= entityHeader['WFrequency'] * pq.Hz, name=entityHeader['name'], channel_index=entityHeader['WireNumber']) if lazy: anaSig.lazy_shape = entityHeader['NPointsWave'] seg.analogsignals.append( anaSig ) if entityHeader['type'] == 6: # markers : TO TEST if lazy: times = [ ]*pq.s labels = np.array([ ], dtype = 'S') markertype = None else: times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) times = times.astype('f8')/globalHeader['freq'] * pq.s fid.seek(entityHeader['offset'] + entityHeader['n']*4) markertype = fid.read(64).replace('\x00','') labels = np.memmap(self.filename, np.dtype('S' + str(entityHeader['MarkerLength'])) ,'r', shape = (entityHeader['n'] ), offset = entityHeader['offset'] + entityHeader['n']*4 + 64 ) ea = EventArray( times = times, labels = labels.view(np.ndarray), name = entityHeader['name'], channel_index = entityHeader['WireNumber'], marker_type = markertype ) if lazy: ea.lazy_shape = entityHeader['n'] seg.eventarrays.append(ea) seg.create_many_to_one_relationship() return seg
def read_segment( self, lazy=False, cascade=True, ): fid = open(self.filename, 'rb') globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0) #~ print globalHeader #~ print 'version' , globalHeader['version'] seg = Segment() seg.file_origin = os.path.basename(self.filename) seg.annotate(neuroexplorer_version=globalHeader['version']) seg.annotate(comment=globalHeader['comment']) if not cascade: return seg offset = 544 for i in range(globalHeader['nvar']): entityHeader = HeaderReader( fid, EntityHeader).read_f(offset=offset + i * 208) entityHeader['name'] = entityHeader['name'].replace('\x00', '') #print 'i',i, entityHeader['type'] if entityHeader['type'] == 0: # neuron if lazy: spike_times = [] * pq.s else: spike_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) spike_times = spike_times.astype( 'f8') / globalHeader['freq'] * pq.s sptr = SpikeTrain( times=spike_times, t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s, t_stop=globalHeader['tend'] / globalHeader['freq'] * pq.s, name=entityHeader['name'], ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index=entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 1: # event if lazy: event_times = [] * pq.s else: event_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) event_times = event_times.astype( 'f8') / globalHeader['freq'] * pq.s labels = np.array([''] * event_times.size, dtype='S') evar = EventArray(times=event_times, labels=labels, channel_name=entityHeader['name']) if lazy: evar.lazy_shape = entityHeader['n'] seg.eventarrays.append(evar) if entityHeader['type'] == 2: # interval if lazy: start_times = [] * pq.s stop_times = [] * pq.s else: start_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) start_times = start_times.astype( 'f8') / globalHeader['freq'] * pq.s stop_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'] + entityHeader['n'] * 4, ) stop_times = stop_times.astype( 'f') / globalHeader['freq'] * pq.s epar = EpochArray(times=start_times, durations=stop_times - start_times, labels=np.array([''] * start_times.size, dtype='S'), channel_name=entityHeader['name']) if lazy: epar.lazy_shape = entityHeader['n'] seg.epocharrays.append(epar) if entityHeader['type'] == 3: # spiketrain and wavefoms if lazy: spike_times = [] * pq.s waveforms = None else: spike_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) spike_times = spike_times.astype( 'f8') / globalHeader['freq'] * pq.s waveforms = np.memmap( self.filename, np.dtype('i2'), 'r', shape=(entityHeader['n'], 1, entityHeader['NPointsWave']), offset=entityHeader['offset'] + entityHeader['n'] * 4, ) waveforms = (waveforms.astype('f') * entityHeader['ADtoMV'] + entityHeader['MVOffset']) * pq.mV t_stop = globalHeader['tend'] / globalHeader['freq'] * pq.s if spike_times.size > 0: t_stop = max(t_stop, max(spike_times)) sptr = SpikeTrain( times=spike_times, t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s, #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)), t_stop=t_stop, name=entityHeader['name'], waveforms=waveforms, sampling_rate=entityHeader['WFrequency'] * pq.Hz, left_sweep=0 * pq.ms, ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index=entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 4: # popvectors pass if entityHeader['type'] == 5: # analog timestamps = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) timestamps = timestamps.astype('f8') / globalHeader['freq'] fragmentStarts = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) fragmentStarts = fragmentStarts.astype( 'f8') / globalHeader['freq'] t_start = timestamps[0] - fragmentStarts[0] / float( entityHeader['WFrequency']) del timestamps, fragmentStarts if lazy: signal = [] * pq.mV else: signal = np.memmap( self.filename, np.dtype('i2'), 'r', shape=(entityHeader['NPointsWave']), offset=entityHeader['offset'], ) signal = signal.astype('f') signal *= entityHeader['ADtoMV'] signal += entityHeader['MVOffset'] signal = signal * pq.mV anaSig = AnalogSignal( signal=signal, t_start=t_start * pq.s, sampling_rate=entityHeader['WFrequency'] * pq.Hz, name=entityHeader['name'], channel_index=entityHeader['WireNumber']) if lazy: anaSig.lazy_shape = entityHeader['NPointsWave'] seg.analogsignals.append(anaSig) if entityHeader['type'] == 6: # markers : TO TEST if lazy: times = [] * pq.s labels = np.array([], dtype='S') markertype = None else: times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) times = times.astype('f8') / globalHeader['freq'] * pq.s fid.seek(entityHeader['offset'] + entityHeader['n'] * 4) markertype = fid.read(64).replace('\x00', '') labels = np.memmap( self.filename, np.dtype('S' + str(entityHeader['MarkerLength'])), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'] + entityHeader['n'] * 4 + 64) ea = EventArray(times=times, labels=labels.view(np.ndarray), name=entityHeader['name'], channel_index=entityHeader['WireNumber'], marker_type=markertype) if lazy: ea.lazy_shape = entityHeader['n'] seg.eventarrays.append(ea) create_many_to_one_relationship(seg) return seg
def read_block( self, lazy=False, cascade=True, ): bl = Block() tankname = os.path.basename(self.dirname) bl.file_origin = tankname if not cascade: return bl for blockname in os.listdir(self.dirname): if blockname == 'TempBlk': continue subdir = os.path.join(self.dirname, blockname) if not os.path.isdir(subdir): continue seg = Segment(name=blockname) bl.segments.append(seg) #TSQ is the global index tsq_filename = os.path.join(subdir, tankname + '_' + blockname + '.tsq') dt = [ ('size', 'int32'), ('evtype', 'int32'), ('code', 'S4'), ('channel', 'uint16'), ('sortcode', 'uint16'), ('timestamp', 'float64'), ('eventoffset', 'int64'), ('dataformat', 'int32'), ('frequency', 'float32'), ] tsq = np.fromfile(tsq_filename, dtype=dt) #0x8801: 'EVTYPE_MARK' give the global_start global_t_start = tsq[tsq['evtype'] == 0x8801]['timestamp'][0] #TEV is the old data file if os.path.exists( os.path.join(subdir, tankname + '_' + blockname + '.tev')): tev_filename = os.path.join( subdir, tankname + '_' + blockname + '.tev') #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead tev_array = np.fromfile(tev_filename, dtype='uint8') else: tev_filename = None for type_code, type_label in tdt_event_type: mask1 = tsq['evtype'] == type_code codes = np.unique(tsq[mask1]['code']) for code in codes: mask2 = mask1 & (tsq['code'] == code) channels = np.unique(tsq[mask2]['channel']) for channel in channels: mask3 = mask2 & (tsq['channel'] == channel) if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']: if lazy: times = [] * pq.s labels = np.array([], dtype=str) else: times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s labels = tsq[mask3]['eventoffset'].view( 'float64').astype('S') ea = EventArray(times=times, name=code, channel_index=int(channel), labels=labels) if lazy: ea.lazy_shape = np.sum(mask3) seg.eventarrays.append(ea) elif type_label == 'EVTYPE_SNIP': sortcodes = np.unique(tsq[mask3]['sortcode']) for sortcode in sortcodes: mask4 = mask3 & (tsq['sortcode'] == sortcode) nb_spike = np.sum(mask4) sr = tsq[mask4]['frequency'][0] waveformsize = tsq[mask4]['size'][0] - 10 if lazy: times = [] * pq.s waveforms = None else: times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]]) waveforms = get_chunks( tsq[mask4]['size'], tsq[mask4]['eventoffset'], tev_array).view(dt) waveforms = waveforms.reshape( nb_spike, -1, waveformsize) waveforms = waveforms * pq.mV if nb_spike > 0: # t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not t_start = 0 * pq.s t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s else: t_start = 0 * pq.s t_stop = 0 * pq.s st = SpikeTrain( times=times, name='Chan{} Code{}'.format( channel, sortcode), t_start=t_start, t_stop=t_stop, waveforms=waveforms, left_sweep=waveformsize / 2. / sr * pq.s, sampling_rate=sr * pq.Hz, ) st.annotate(channel_index=channel) if lazy: st.lazy_shape = nb_spike seg.spiketrains.append(st) elif type_label == 'EVTYPE_STREAM': dt = np.dtype( data_formats[tsq[mask3]['dataformat'][0]]) shape = np.sum(tsq[mask3]['size'] - 10) sr = tsq[mask3]['frequency'][0] if lazy: signal = [] else: if PY3K: signame = code.decode('ascii') else: signame = code sev_filename = os.path.join( subdir, tankname + '_' + blockname + '_' + signame + '_ch' + str(channel) + '.sev') if os.path.exists(sev_filename): #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead sig_array = np.fromfile(sev_filename, dtype='uint8') else: sig_array = tev_array signal = get_chunks(tsq[mask3]['size'], tsq[mask3]['eventoffset'], sig_array).view(dt) anasig = AnalogSignal( signal=signal * pq.V, name='{} {}'.format(code, channel), sampling_rate=sr * pq.Hz, t_start=(tsq[mask3]['timestamp'][0] - global_t_start) * pq.s, channel_index=int(channel)) if lazy: anasig.lazy_shape = shape seg.analogsignals.append(anasig) bl.create_many_to_one_relationship() return bl