def setup_epocharrays(self): epocharrname11 = 'epocharr 1 1' epocharrname12 = 'epocharr 1 2' epocharrname21 = 'epocharr 2 1' epocharrname22 = 'epocharr 2 2' epocharrtime11 = np.arange(0, 10) * pq.ms epocharrtime12 = np.arange(10, 20) * pq.ms epocharrtime21 = np.arange(20, 30) * pq.s epocharrtime22 = np.arange(30, 40) * pq.s epocharrdur11 = np.arange(1, 11) * pq.s epocharrdur12 = np.arange(11, 21) * pq.s epocharrdur21 = np.arange(21, 31) * pq.ms epocharrdur22 = np.arange(31, 41) * pq.ms self.epocharrnames1 = [epocharrname11, epocharrname12] self.epocharrnames2 = [epocharrname21, epocharrname22] self.epocharrnames = [epocharrname11, epocharrname12, epocharrname21, epocharrname22] epocharr11 = EpochArray(epocharrtime11, epocharrdur11, label=epocharrname11, name=epocharrname11) epocharr12 = EpochArray(epocharrtime12, epocharrdur12, label=epocharrname12, name=epocharrname12) epocharr21 = EpochArray(epocharrtime21, epocharrdur21, label=epocharrname21, name=epocharrname21) epocharr22 = EpochArray(epocharrtime22, epocharrdur22, label=epocharrname22, name=epocharrname22) self.epocharr1 = [epocharr11, epocharr12] self.epocharr2 = [epocharr21, epocharr22] self.epocharr = [epocharr11, epocharr12, epocharr21, epocharr22]
def read_epocharray(self, lazy=False, cascade=True, channel_index=0, t_start=0., segment_duration=0.): """function to read digital timestamps. this function reads the event onset and offset and outputs onset and duration. to get only onsets use the event array function""" if lazy: epa = EpochArray(file_origin=self.filename, times=None, durations=None, labels=None) else: #create temporary empty lists to store data tempNames = list() tempTimeStamp = list() durations = list() #get entity from file digEntity = self.fd.get_entity(channel_index) #transform t_start into index (reading will start from this index) startat = digEntity.get_index_by_time( t_start, 0) #zero means closest index to value #get the last index to read, using segment duration and t_start endat = digEntity.get_index_by_time( float(segment_duration + t_start), -1) #-1 means last index before time #run through entity using only odd "i"s for i in range(startat, endat + 1, 1): if i % 2 == 1: #get in which digital bit was the trigger detected tempNames.append(digEntity.label[-8:]) #get the time stamps of even events tempData, onOrOff = digEntity.get_data(i - 1) #if this was an onset event, save it to the list #on triggered recordings it seems that only onset events are #recorded. On continuous recordings both onset(==1) #and offset(==255) seem to be recorded #if onOrOff == 1: #append the time stamp to them empty list tempTimeStamp.append(tempData) #get time stamps of odd events tempData1, onOrOff = digEntity.get_data(i) #if onOrOff == 255: #pass durations.append(tempData1 - tempData) epa = EpochArray(file_origin=self.filename, times=np.array(tempTimeStamp) * pq.s, durations=np.array(durations) * pq.s, labels=np.array(tempNames, dtype="S"), description="digital events with duration") return epa
def read_segment( self, lazy=False, cascade=True, ): fid = open(self.filename, 'rb') globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0) #~ print globalHeader #~ print 'version' , globalHeader['version'] seg = Segment() seg.file_origin = os.path.basename(self.filename) seg.annotate(neuroexplorer_version=globalHeader['version']) seg.annotate(comment=globalHeader['comment']) if not cascade: return seg offset = 544 for i in range(globalHeader['nvar']): entityHeader = HeaderReader( fid, EntityHeader).read_f(offset=offset + i * 208) entityHeader['name'] = entityHeader['name'].replace('\x00', '') #print 'i',i, entityHeader['type'] if entityHeader['type'] == 0: # neuron if lazy: spike_times = [] * pq.s else: spike_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) spike_times = spike_times.astype( 'f8') / globalHeader['freq'] * pq.s sptr = SpikeTrain( times=spike_times, t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s, t_stop=globalHeader['tend'] / globalHeader['freq'] * pq.s, name=entityHeader['name'], ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index=entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 1: # event if lazy: event_times = [] * pq.s else: event_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) event_times = event_times.astype( 'f8') / globalHeader['freq'] * pq.s labels = np.array([''] * event_times.size, dtype='S') evar = EventArray(times=event_times, labels=labels, channel_name=entityHeader['name']) if lazy: evar.lazy_shape = entityHeader['n'] seg.eventarrays.append(evar) if entityHeader['type'] == 2: # interval if lazy: start_times = [] * pq.s stop_times = [] * pq.s else: start_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) start_times = start_times.astype( 'f8') / globalHeader['freq'] * pq.s stop_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'] + entityHeader['n'] * 4, ) stop_times = stop_times.astype( 'f') / globalHeader['freq'] * pq.s epar = EpochArray(times=start_times, durations=stop_times - start_times, labels=np.array([''] * start_times.size, dtype='S'), channel_name=entityHeader['name']) if lazy: epar.lazy_shape = entityHeader['n'] seg.epocharrays.append(epar) if entityHeader['type'] == 3: # spiketrain and wavefoms if lazy: spike_times = [] * pq.s waveforms = None else: spike_times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) spike_times = spike_times.astype( 'f8') / globalHeader['freq'] * pq.s waveforms = np.memmap( self.filename, np.dtype('i2'), 'r', shape=(entityHeader['n'], 1, entityHeader['NPointsWave']), offset=entityHeader['offset'] + entityHeader['n'] * 4, ) waveforms = (waveforms.astype('f') * entityHeader['ADtoMV'] + entityHeader['MVOffset']) * pq.mV t_stop = globalHeader['tend'] / globalHeader['freq'] * pq.s if spike_times.size > 0: t_stop = max(t_stop, max(spike_times)) sptr = SpikeTrain( times=spike_times, t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s, #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)), t_stop=t_stop, name=entityHeader['name'], waveforms=waveforms, sampling_rate=entityHeader['WFrequency'] * pq.Hz, left_sweep=0 * pq.ms, ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index=entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 4: # popvectors pass if entityHeader['type'] == 5: # analog timestamps = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) timestamps = timestamps.astype('f8') / globalHeader['freq'] fragmentStarts = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) fragmentStarts = fragmentStarts.astype( 'f8') / globalHeader['freq'] t_start = timestamps[0] - fragmentStarts[0] / float( entityHeader['WFrequency']) del timestamps, fragmentStarts if lazy: signal = [] * pq.mV else: signal = np.memmap( self.filename, np.dtype('i2'), 'r', shape=(entityHeader['NPointsWave']), offset=entityHeader['offset'], ) signal = signal.astype('f') signal *= entityHeader['ADtoMV'] signal += entityHeader['MVOffset'] signal = signal * pq.mV anaSig = AnalogSignal( signal=signal, t_start=t_start * pq.s, sampling_rate=entityHeader['WFrequency'] * pq.Hz, name=entityHeader['name'], channel_index=entityHeader['WireNumber']) if lazy: anaSig.lazy_shape = entityHeader['NPointsWave'] seg.analogsignals.append(anaSig) if entityHeader['type'] == 6: # markers : TO TEST if lazy: times = [] * pq.s labels = np.array([], dtype='S') markertype = None else: times = np.memmap( self.filename, np.dtype('i4'), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'], ) times = times.astype('f8') / globalHeader['freq'] * pq.s fid.seek(entityHeader['offset'] + entityHeader['n'] * 4) markertype = fid.read(64).replace('\x00', '') labels = np.memmap( self.filename, np.dtype('S' + str(entityHeader['MarkerLength'])), 'r', shape=(entityHeader['n']), offset=entityHeader['offset'] + entityHeader['n'] * 4 + 64) ea = EventArray(times=times, labels=labels.view(np.ndarray), name=entityHeader['name'], channel_index=entityHeader['WireNumber'], marker_type=markertype) if lazy: ea.lazy_shape = entityHeader['n'] seg.eventarrays.append(ea) create_many_to_one_relationship(seg) return seg
def read_segment( self, cascade=True, lazy=False, ): """ Arguments: """ f = struct_file(self.filename, 'rb') #Name f.seek(64, 0) surname = f.read(22) while surname[-1] == ' ': if len(surname) == 0: break surname = surname[:-1] firstname = f.read(20) while firstname[-1] == ' ': if len(firstname) == 0: break firstname = firstname[:-1] #Date f.seek(128, 0) day, month, year, hour, minute, sec = f.read_f('bbbbbb') rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute, sec) f.seek(138, 0) Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f( 'IHHHH') #~ print Num_Chan, Bytes #header version f.seek(175, 0) header_version, = f.read_f('b') assert header_version == 4 seg = Segment( name=firstname + ' ' + surname, file_origin=os.path.basename(self.filename), ) seg.annotate(surname=surname) seg.annotate(firstname=firstname) seg.annotate(rec_datetime=rec_datetime) if not cascade: return seg # area f.seek(176, 0) zone_names = [ 'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER' ] zones = {} for zname in zone_names: zname2, pos, length = f.read_f('8sII') zones[zname] = zname2, pos, length #~ print zname2, pos, length # reading raw data if not lazy: f.seek(Data_Start_Offset, 0) rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes)) rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan)) # Reading Code Info zname2, pos, length = zones['ORDER'] f.seek(pos, 0) code = np.fromfile(f, dtype='u2', count=Num_Chan) units = { -1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent, 101: pq.dimensionless, 102: pq.dimensionless } for c in range(Num_Chan): zname2, pos, length = zones['LABCOD'] f.seek(pos + code[c] * 128 + 2, 0) label = f.read(6).strip("\x00") ground = f.read(6).strip("\x00") logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f( 'iiiii') k, = f.read_f('h') if k in units.keys(): unit = units[k] else: unit = pq.uV f.seek(8, 1) sampling_rate, = f.read_f('H') * pq.Hz sampling_rate *= Rate_Min if lazy: signal = [] * unit else: factor = float(physical_max - physical_min) / float(logical_max - logical_min + 1) signal = (rawdata[:, c].astype('f') - logical_ground) * factor * unit anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, name=label, channel_index=c) if lazy: anaSig.lazy_shape = None anaSig.annotate(ground=ground) seg.analogsignals.append(anaSig) sampling_rate = np.mean( [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz # Read trigger and notes for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]: zname2, pos, length = zones[zname] f.seek(pos, 0) triggers = np.fromstring( f.read(length), dtype=[('pos', 'u4'), ('label', label_dtype)], ) ea = EventArray(name=zname[0] + zname[1:].lower()) if not lazy: keep = (triggers['pos'] >= triggers['pos'][0]) & ( triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] != 0) triggers = triggers[keep] ea.labels = triggers['label'].astype('S') ea.times = (triggers['pos'] / sampling_rate).rescale('s') else: ea.lazy_shape = triggers.size seg.eventarrays.append(ea) # Read Event A and B # Not so well tested for zname in ['EVENT A', 'EVENT B']: zname2, pos, length = zones[zname] f.seek(pos, 0) epochs = np.fromstring(f.read(length), dtype=[ ('label', 'u4'), ('start', 'u4'), ('stop', 'u4'), ]) ep = EpochArray(name=zname[0] + zname[1:].lower()) if not lazy: keep = (epochs['start'] > 0) & ( epochs['start'] < rawdata.shape[0]) & (epochs['stop'] < rawdata.shape[0]) epochs = epochs[keep] ep.labels = epochs['label'].astype('S') ep.times = (epochs['start'] / sampling_rate).rescale('s') ep.durations = ((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s') else: ep.lazy_shape = triggers.size seg.epocharrays.append(ep) seg.create_many_to_one_relationship() return seg
def read_segment(self, lazy = False, cascade = True, ): fid = open(self.filename, 'rb') globalHeader = HeaderReader(fid , GlobalHeader ).read_f(offset = 0) #~ print globalHeader #~ print 'version' , globalHeader['version'] seg = Segment() seg.file_origin = os.path.basename(self.filename) seg.annotate(neuroexplorer_version = globalHeader['version']) seg.annotate(comment = globalHeader['comment']) if not cascade : return seg offset = 544 for i in range(globalHeader['nvar']): entityHeader = HeaderReader(fid , EntityHeader ).read_f(offset = offset+i*208) entityHeader['name'] = entityHeader['name'].decode().replace('\x00','') #print 'i',i, entityHeader['type'] if entityHeader['type'] == 0: # neuron if lazy: spike_times = [ ]*pq.s else: spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) spike_times = spike_times.astype('f8')/globalHeader['freq']*pq.s sptr = SpikeTrain( times= spike_times, t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s, t_stop = globalHeader['tend']/globalHeader['freq']*pq.s, name = entityHeader['name'], ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index = entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 1: # event if lazy: event_times = [ ]*pq.s else: event_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) event_times = event_times.astype('f8')/globalHeader['freq'] * pq.s labels = np.array(['']*event_times.size, dtype = 'S') evar = EventArray(times = event_times, labels=labels, channel_name = entityHeader['name'] ) if lazy: evar.lazy_shape = entityHeader['n'] seg.eventarrays.append(evar) if entityHeader['type'] == 2: # interval if lazy: start_times = [ ]*pq.s stop_times = [ ]*pq.s else: start_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) start_times = start_times.astype('f8')/globalHeader['freq']*pq.s stop_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset']+entityHeader['n']*4, ) stop_times = stop_times.astype('f')/globalHeader['freq']*pq.s epar = EpochArray(times = start_times, durations = stop_times - start_times, labels = np.array(['']*start_times.size, dtype = 'S'), channel_name = entityHeader['name']) if lazy: epar.lazy_shape = entityHeader['n'] seg.epocharrays.append(epar) if entityHeader['type'] == 3: # spiketrain and wavefoms if lazy: spike_times = [ ]*pq.s waveforms = None else: spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) spike_times = spike_times.astype('f8')/globalHeader['freq'] * pq.s waveforms = np.memmap(self.filename , np.dtype('i2') ,'r' , shape = (entityHeader['n'] , 1,entityHeader['NPointsWave']), offset = entityHeader['offset']+entityHeader['n'] *4, ) waveforms = (waveforms.astype('f')* entityHeader['ADtoMV'] + entityHeader['MVOffset'])*pq.mV t_stop = globalHeader['tend']/globalHeader['freq']*pq.s if spike_times.size>0: t_stop = max(t_stop, max(spike_times)) sptr = SpikeTrain( times = spike_times, t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s, #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)), t_stop = t_stop, name = entityHeader['name'], waveforms = waveforms, sampling_rate = entityHeader['WFrequency']*pq.Hz, left_sweep = 0*pq.ms, ) if lazy: sptr.lazy_shape = entityHeader['n'] sptr.annotate(channel_index = entityHeader['WireNumber']) seg.spiketrains.append(sptr) if entityHeader['type'] == 4: # popvectors pass if entityHeader['type'] == 5: # analog timestamps= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) timestamps = timestamps.astype('f8')/globalHeader['freq'] fragmentStarts = np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) fragmentStarts = fragmentStarts.astype('f8')/globalHeader['freq'] t_start = timestamps[0] - fragmentStarts[0]/float(entityHeader['WFrequency']) del timestamps, fragmentStarts if lazy : signal = [ ]*pq.mV else: signal = np.memmap(self.filename , np.dtype('i2') ,'r' , shape = (entityHeader['NPointsWave'] ), offset = entityHeader['offset'], ) signal = signal.astype('f') signal *= entityHeader['ADtoMV'] signal += entityHeader['MVOffset'] signal = signal*pq.mV anaSig = AnalogSignal(signal=signal, t_start=t_start * pq.s, sampling_rate= entityHeader['WFrequency'] * pq.Hz, name=entityHeader['name'], channel_index=entityHeader['WireNumber']) if lazy: anaSig.lazy_shape = entityHeader['NPointsWave'] seg.analogsignals.append( anaSig ) if entityHeader['type'] == 6: # markers : TO TEST if lazy: times = [ ]*pq.s labels = np.array([ ], dtype = 'S') markertype = None else: times= np.memmap(self.filename , np.dtype('i4') ,'r' , shape = (entityHeader['n'] ), offset = entityHeader['offset'], ) times = times.astype('f8')/globalHeader['freq'] * pq.s fid.seek(entityHeader['offset'] + entityHeader['n']*4) markertype = fid.read(64).replace('\x00','') labels = np.memmap(self.filename, np.dtype('S' + str(entityHeader['MarkerLength'])) ,'r', shape = (entityHeader['n'] ), offset = entityHeader['offset'] + entityHeader['n']*4 + 64 ) ea = EventArray( times = times, labels = labels.view(np.ndarray), name = entityHeader['name'], channel_index = entityHeader['WireNumber'], marker_type = markertype ) if lazy: ea.lazy_shape = entityHeader['n'] seg.eventarrays.append(ea) seg.create_many_to_one_relationship() return seg
def read_segment(self, cascade = True, lazy = False,): """ Arguments: """ f = struct_file(self.filename, 'rb') #Name f.seek(64,0) surname = f.read(22) while surname[-1] == ' ' : if len(surname) == 0 :break surname = surname[:-1] firstname = f.read(20) while firstname[-1] == ' ' : if len(firstname) == 0 :break firstname = firstname[:-1] #Date f.seek(128,0) day, month, year, hour, minute, sec = f.read_f('bbbbbb') rec_datetime = datetime.datetime(year+1900 , month , day, hour, minute, sec) f.seek(138,0) Data_Start_Offset , Num_Chan , Multiplexer , Rate_Min , Bytes = f.read_f('IHHHH') #~ print Num_Chan, Bytes #header version f.seek(175,0) header_version, = f.read_f('b') assert header_version == 4 seg = Segment( name = firstname+' '+surname, file_origin = os.path.basename(self.filename), ) seg.annotate(surname = surname) seg.annotate(firstname = firstname) seg.annotate(rec_datetime = rec_datetime) if not cascade: return seg # area f.seek(176,0) zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER'] zones = { } for zname in zone_names: zname2, pos, length = f.read_f('8sII') zones[zname] = zname2, pos, length #~ print zname2, pos, length # reading raw data if not lazy: f.seek(Data_Start_Offset,0) rawdata = np.fromstring(f.read() , dtype = 'u'+str(Bytes)) rawdata = rawdata.reshape(( rawdata.size/Num_Chan , Num_Chan)) # Reading Code Info zname2, pos, length = zones['ORDER'] f.seek(pos,0) code = np.fromfile(f, dtype='u2', count=Num_Chan) units = {-1: pq.nano*pq.V, 0:pq.uV, 1:pq.mV, 2:1, 100: pq.percent, 101:pq.dimensionless, 102:pq.dimensionless} for c in range(Num_Chan): zname2, pos, length = zones['LABCOD'] f.seek(pos+code[c]*128+2,0) label = f.read(6).strip("\x00") ground = f.read(6).strip("\x00") logical_min , logical_max, logical_ground, physical_min, physical_max = f.read_f('iiiii') k, = f.read_f('h') if k in units.keys() : unit = units[k] else : unit = pq.uV f.seek(8,1) sampling_rate, = f.read_f('H') * pq.Hz sampling_rate *= Rate_Min if lazy: signal = [ ]*unit else: factor = float(physical_max - physical_min) / float(logical_max-logical_min+1) signal = ( rawdata[:,c].astype('f') - logical_ground )* factor*unit anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, name=label, channel_index=c) if lazy: anaSig.lazy_shape = None anaSig.annotate(ground = ground) seg.analogsignals.append( anaSig ) sampling_rate = np.mean([ anaSig.sampling_rate for anaSig in seg.analogsignals ])*pq.Hz # Read trigger and notes for zname, label_dtype in [ ('TRIGGER', 'u2'), ('NOTE', 'S40') ]: zname2, pos, length = zones[zname] f.seek(pos,0) triggers = np.fromstring(f.read(length) , dtype = [('pos','u4'), ('label', label_dtype)] , ) ea = EventArray(name =zname[0]+zname[1:].lower()) if not lazy: keep = (triggers['pos']>=triggers['pos'][0]) & (triggers['pos']<rawdata.shape[0]) & (triggers['pos']!=0) triggers = triggers[keep] ea.labels = triggers['label'].astype('S') ea.times = (triggers['pos']/sampling_rate).rescale('s') else: ea.lazy_shape = triggers.size seg.eventarrays.append(ea) # Read Event A and B # Not so well tested for zname in ['EVENT A', 'EVENT B']: zname2, pos, length = zones[zname] f.seek(pos,0) epochs = np.fromstring(f.read(length) , dtype = [('label','u4'),('start','u4'),('stop','u4'),] ) ep = EpochArray(name =zname[0]+zname[1:].lower()) if not lazy: keep = (epochs['start']>0) & (epochs['start']<rawdata.shape[0]) & (epochs['stop']<rawdata.shape[0]) epochs = epochs[keep] ep.labels = epochs['label'].astype('S') ep.times = (epochs['start']/sampling_rate).rescale('s') ep.durations = ((epochs['stop'] - epochs['start'])/sampling_rate).rescale('s') else: ep.lazy_shape = triggers.size seg.epocharrays.append(ep) seg.create_many_to_one_relationship() return seg
def generate_one_simple_segment( seg_name='segment 0', supported_objects=[], nb_analogsignal=4, t_start=0. * pq.s, sampling_rate=10 * pq.kHz, duration=6. * pq.s, nb_spiketrain=6, spikerate_range=[.5 * pq.Hz, 12 * pq.Hz], event_array_types={ 'stim': ['a', 'b', 'c', 'd'], 'enter_zone': ['one', 'two'], 'color': ['black', 'yellow', 'green'], }, event_array_size_range=[5, 20], epoch_array_types={ 'animal state': ['Sleep', 'Freeze', 'Escape'], 'light': ['dark', 'lighted'] }, epoch_array_duration_range=[.5, 3.], ): seg = Segment(name=seg_name) if AnalogSignal in supported_objects: for a in range(nb_analogsignal): anasig = AnalogSignal(rand(int(sampling_rate * duration)), sampling_rate=sampling_rate, t_start=t_start, units=pq.mV, channel_index=a, name='sig %d for segment %s' % (a, seg.name)) seg.analogsignals.append(anasig) if SpikeTrain in supported_objects: for s in range(nb_spiketrain): spikerate = rand() * np.diff(spikerate_range) spikerate += spikerate_range[0].magnitude #spikedata = rand(int((spikerate*duration).simplified))*duration #sptr = SpikeTrain(spikedata, # t_start=t_start, t_stop=t_start+duration) # #, name = 'spiketrain %d'%s) spikes = rand(int((spikerate * duration).simplified)) spikes.sort() # spikes are supposed to be an ascending sequence sptr = SpikeTrain(spikes * duration, t_start=t_start, t_stop=t_start + duration) sptr.annotations['channel_index'] = s seg.spiketrains.append(sptr) if EventArray in supported_objects: for name, labels in iteritems(event_array_types): ea_size = rand() * np.diff(event_array_size_range) ea_size += event_array_size_range[0] labels = np.array(labels, dtype='S') labels = labels[(rand(ea_size) * len(labels)).astype('i')] ea = EventArray(times=rand(ea_size) * duration, labels=labels) seg.eventarrays.append(ea) if EpochArray in supported_objects: for name, labels in iteritems(epoch_array_types): t = 0 times = [] durations = [] while t < duration: times.append(t) dur = rand() * np.diff(epoch_array_duration_range) dur += epoch_array_duration_range[0] durations.append(dur) t = t + dur labels = np.array(labels, dtype='S') labels = labels[(rand(len(times)) * len(labels)).astype('i')] epa = EpochArray( times=pq.Quantity(times, units=pq.s), durations=pq.Quantity([x[0] for x in durations], units=pq.s), labels=labels, ) seg.epocharrays.append(epa) # TODO : Spike, Event, Epoch return seg