def setup_units(self): params = {'testarg2': 'yes', 'testarg3': True} self.unit1 = Unit(name='test', description='tester 1', file_origin='test.file', channel_indexes=np.array([1]), testarg1=1, **params) self.unit2 = Unit(name='test', description='tester 2', file_origin='test.file', channel_indexes=np.array([2]), testarg1=1, **params) self.unit1.annotate(testarg1=1.1, testarg0=[1, 2, 3]) self.unit2.annotate(testarg11=1.1, testarg10=[1, 2, 3]) self.unit1train = [self.train1[0], self.train2[1]] self.unit2train = [self.train1[1], self.train2[0]] self.unit1.spiketrains = self.unit1train self.unit2.spiketrains = self.unit2train self.unit1spike = [self.spike1[0], self.spike2[1]] self.unit2spike = [self.spike1[1], self.spike2[0]] self.unit1.spikes = self.unit1spike self.unit2.spikes = self.unit2spike create_many_to_one_relationship(self.unit1) create_many_to_one_relationship(self.unit2)
def proc_src_units(srcfile, filename): '''Get the units in an src file that has been processed by the official matlab function. See proc_src for details''' rcg = RecordingChannelGroup(file_origin=filename) un_unit = Unit(name='UnassignedSpikes', file_origin=filename, elliptic=[], boundaries=[], timestamp=[], max_valid=[]) rcg.units.append(un_unit) sortInfo = srcfile['sortInfo'][0, 0] timeslice = sortInfo['timeslice'][0, 0] maxValid = timeslice['maxValid'][0, 0] cluster = timeslice['cluster'][0, 0] if len(cluster): maxValid = maxValid[0, 0] elliptic = [res.flatten() for res in cluster['elliptic'].flatten()] boundaries = [res.flatten() for res in cluster['boundaries'].flatten()] fullclust = zip(elliptic, boundaries) for ielliptic, iboundaries in fullclust: unit = Unit(file_origin=filename, boundaries=[iboundaries], elliptic=[ielliptic], timeStamp=[], max_valid=[maxValid]) rcg.units.append(unit) return rcg
def test__construct_subsegment_by_unit(self): nb_seg = 3 nb_unit = 7 unit_with_sig = [0, 2, 5] signal_types = ['Vm', 'Conductances'] sig_len = 100 #recordingchannelgroups rcgs = [ RecordingChannelGroup(name = 'Vm', channel_indexes = unit_with_sig), RecordingChannelGroup(name = 'Conductance', channel_indexes = unit_with_sig), ] # Unit all_unit = [ ] for u in range(nb_unit): un = Unit(name = 'Unit #%d' % u, channel_indexes = [u]) all_unit.append(un) bl = Block() for s in range(nb_seg): seg = Segment(name = 'Simulation %s' % s) for j in range(nb_unit): st = SpikeTrain([1, 2, 3], units = 'ms', t_start = 0., t_stop = 10) st.unit = all_unit[j] for t in signal_types: anasigarr = AnalogSignalArray( np.zeros((sig_len, len(unit_with_sig)) ), units = 'nA', sampling_rate = 1000.*pq.Hz, channel_indexes = unit_with_sig ) seg.analogsignalarrays.append(anasigarr) # what you want subseg = seg.construct_subsegment_by_unit(all_unit[:4])
def test__children(self): segment = Segment(name='seg1') segment.spikes = [self.spike1] segment.create_many_to_one_relationship() unit = Unit(name='unit1') unit.spikes = [self.spike1] unit.create_many_to_one_relationship() self.assertEqual(self.spike1._single_parent_objects, ('Segment', 'Unit')) self.assertEqual(self.spike1._multi_parent_objects, ()) self.assertEqual(self.spike1._single_parent_containers, ('segment', 'unit')) self.assertEqual(self.spike1._multi_parent_containers, ()) self.assertEqual(self.spike1._parent_objects, ('Segment', 'Unit')) self.assertEqual(self.spike1._parent_containers, ('segment', 'unit')) self.assertEqual(len(self.spike1.parents), 2) self.assertEqual(self.spike1.parents[0].name, 'seg1') self.assertEqual(self.spike1.parents[1].name, 'unit1') assert_neo_object_is_compliant(self.spike1)
def read_block(self, lazy=False, cascade=True, **kargs): ''' Reads a block from the simple spike data file "fname" generated with BrainWare ''' # there are no keyargs implemented to so far. If someone tries to pass # them they are expecting them to do something or making a mistake, # neither of which should pass silently if kargs: raise NotImplementedError('This method does not have any ' 'argument implemented yet') self._fsrc = None self.__lazy = lazy self._blk = Block(file_origin=self._filename) block = self._blk # if we aren't doing cascade, don't load anything if not cascade: return block # create the objects to store other objects chx = ChannelIndex(file_origin=self._filename, index=np.array([], dtype=np.int)) self.__unit = Unit(file_origin=self._filename) # load objects into their containers block.channel_indexes.append(chx) chx.units.append(self.__unit) # initialize values self.__t_stop = None self.__params = None self.__seg = None self.__spiketimes = None # open the file with open(self._path, 'rb') as self._fsrc: res = True # while the file is not done keep reading segments while res: res = self.__read_id() block.create_many_to_one_relationship() # cleanup attributes self._fsrc = None self.__lazy = False self._blk = None self.__t_stop = None self.__params = None self.__seg = None self.__spiketimes = None return block
def setup_unit(self): unitname11 = 'unit 1 1' unitname12 = 'unit 1 2' unitname21 = 'unit 2 1' unitname22 = 'unit 2 2' self.unitnames1 = [unitname11, unitname12] self.unitnames2 = [unitname21, unitname22, unitname11] self.unitnames = [unitname11, unitname12, unitname21, unitname22] unit11 = Unit(name=unitname11, channel_indexes=np.array([1])) unit12 = Unit(name=unitname12, channel_indexes=np.array([2])) unit21 = Unit(name=unitname21, channel_indexes=np.array([1])) unit22 = Unit(name=unitname22, channel_indexes=np.array([2])) unit23 = Unit(name=unitname11, channel_indexes=np.array([1])) self.units1 = [unit11, unit12] self.units2 = [unit21, unit22, unit23] self.units = [unit11, unit12, unit21, unit22]
def _read_unit(self, node, parent): attributes = self._get_standard_attributes(node) spiketrains = [] for name, child_node in node["spiketrains"].items(): if "SpikeTrain" in name: obj_ref = child_node.attrs["object_ref"] spiketrains.append(self.object_refs[obj_ref]) unit = Unit(**attributes) unit.channel_index = parent unit.spiketrains = spiketrains return unit
def read_channelindex(self, path, cascade=True, lazy=False, read_waveforms=True): channel_group = self._exdir_directory[path] group_id = channel_group.attrs['electrode_group_id'] chx = ChannelIndex( name='Channel group {}'.format(group_id), index=channel_group.attrs['electrode_idx'], channel_ids=channel_group.attrs['electrode_identities'], **{ 'group_id': group_id, 'exdir_path': path }) if 'LFP' in channel_group: for lfp_group in channel_group['LFP'].values(): ana = self.read_analogsignal(lfp_group.name, cascade=cascade, lazy=lazy) chx.analogsignals.append(ana) ana.channel_index = chx if 'MUA' in channel_group: for mua_group in channel_group['MUA'].values(): ana = self.read_analogsignal(mua_group.name, cascade=cascade, lazy=lazy) chx.analogsignals.append(ana) ana.channel_index = chx sptrs = [] if 'UnitTimes' in channel_group: for unit_group in channel_group['UnitTimes'].values(): unit = self.read_unit(unit_group.name, cascade=cascade, lazy=lazy, read_waveforms=read_waveforms) unit.channel_index = chx chx.units.append(unit) sptr = unit.spiketrains[0] sptr.channel_index = chx elif 'EventWaveform' in channel_group: sptr = self.read_spiketrain(channel_group['EventWaveform'].name, cascade=cascade, lazy=lazy, read_waveforms=read_waveforms) unit = Unit(name=sptr.name, **sptr.annotations) unit.spiketrains.append(sptr) unit.channel_index = chx sptr.channel_index = chx chx.units.append(unit) return chx
def read_unit(self, path, cascade=True, lazy=False, cluster_num=None, read_waveforms=True): group = self._exdir_directory[path] assert group.parent.object_name == 'UnitTimes' attrs = {'exdir_path': path} attrs.update(group.attrs.to_dict()) unit = Unit(**attrs) sptr = self.read_spiketrain(path, cascade, lazy, cluster_num, read_waveforms) unit.spiketrains.append(sptr) return unit
def test__construct_subsegment_by_unit(self): nb_seg = 3 nb_unit = 7 unit_with_sig = np.array([0, 2, 5]) signal_types = ['Vm', 'Conductances'] sig_len = 100 # channelindexes chxs = [ChannelIndex(name='Vm', index=unit_with_sig), ChannelIndex(name='Conductance', index=unit_with_sig)] # Unit all_unit = [] for u in range(nb_unit): un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u])) assert_neo_object_is_compliant(un) all_unit.append(un) blk = Block() blk.channel_indexes = chxs for s in range(nb_seg): seg = Segment(name='Simulation %s' % s) for j in range(nb_unit): st = SpikeTrain([1, 2], units='ms', t_start=0., t_stop=10) st.unit = all_unit[j] for t in signal_types: anasigarr = AnalogSignal(np.zeros((sig_len, len(unit_with_sig))), units='nA', sampling_rate=1000. * pq.Hz, channel_indexes=unit_with_sig) seg.analogsignals.append(anasigarr) blk.create_many_to_one_relationship() for unit in all_unit: assert_neo_object_is_compliant(unit) for chx in chxs: assert_neo_object_is_compliant(chx) assert_neo_object_is_compliant(blk) # what you want newseg = seg.construct_subsegment_by_unit(all_unit[:4]) assert_neo_object_is_compliant(newseg)
def test_anonymous_objects_write(self): nblocks = 2 nsegs = 2 nanasig = 4 nirrseg = 2 nepochs = 3 nevents = 4 nspiketrains = 3 nchx = 5 nunits = 10 times = self.rquant(1, pq.s) signal = self.rquant(1, pq.V) blocks = [] for blkidx in range(nblocks): blk = Block() blocks.append(blk) for segidx in range(nsegs): seg = Segment() blk.segments.append(seg) for anaidx in range(nanasig): seg.analogsignals.append(AnalogSignal(signal=signal, sampling_rate=pq.Hz)) for irridx in range(nirrseg): seg.irregularlysampledsignals.append( IrregularlySampledSignal(times=times, signal=signal, time_units=pq.s) ) for epidx in range(nepochs): seg.epochs.append(Epoch(times=times, durations=times)) for evidx in range(nevents): seg.events.append(Event(times=times)) for stidx in range(nspiketrains): seg.spiketrains.append(SpikeTrain(times=times, t_stop=times[-1]+pq.s, units=pq.s)) for chidx in range(nchx): chx = ChannelIndex(name="chx{}".format(chidx), index=[1, 2], channel_ids=[11, 22]) blk.channel_indexes.append(chx) for unidx in range(nunits): unit = Unit() chx.units.append(unit) self.writer.write_all_blocks(blocks) self.compare_blocks(blocks, self.reader.blocks)
def create_all_annotated(cls): times = cls.rquant(1, pq.s) signal = cls.rquant(1, pq.V) blk = Block() blk.annotate(**cls.rdict(3)) cls.populate_dates(blk) seg = Segment() seg.annotate(**cls.rdict(4)) cls.populate_dates(seg) blk.segments.append(seg) asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz) asig.annotate(**cls.rdict(2)) seg.analogsignals.append(asig) isig = IrregularlySampledSignal(times=times, signal=signal, time_units=pq.s) isig.annotate(**cls.rdict(2)) seg.irregularlysampledsignals.append(isig) epoch = Epoch(times=times, durations=times) epoch.annotate(**cls.rdict(4)) seg.epochs.append(epoch) event = Event(times=times) event.annotate(**cls.rdict(4)) seg.events.append(event) spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s) d = cls.rdict(6) d["quantity"] = pq.Quantity(10, "mV") d["qarray"] = pq.Quantity(range(10), "mA") spiketrain.annotate(**d) seg.spiketrains.append(spiketrain) chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10]) chx.annotate(**cls.rdict(5)) blk.channel_indexes.append(chx) unit = Unit() unit.annotate(**cls.rdict(2)) chx.units.append(unit) return blk
def test_multiref_write(self): blk = Block("blk1") signal = AnalogSignal(name="sig1", signal=[0, 1, 2], units="mV", sampling_period=pq.Quantity(1, "ms")) othersignal = IrregularlySampledSignal(name="i1", signal=[0, 0, 0], units="mV", times=[1, 2, 3], time_units="ms") event = Event(name="Evee", times=[0.3, 0.42], units="year") epoch = Epoch(name="epoche", times=[0.1, 0.2] * pq.min, durations=[0.5, 0.5] * pq.min) st = SpikeTrain(name="the train of spikes", times=[0.1, 0.2, 10.3], t_stop=11, units="us") for idx in range(3): segname = "seg" + str(idx) seg = Segment(segname) blk.segments.append(seg) seg.analogsignals.append(signal) seg.irregularlysampledsignals.append(othersignal) seg.events.append(event) seg.epochs.append(epoch) seg.spiketrains.append(st) chidx = ChannelIndex([10, 20, 29]) seg = blk.segments[0] st = SpikeTrain(name="choochoo", times=[10, 11, 80], t_stop=1000, units="s") seg.spiketrains.append(st) blk.channel_indexes.append(chidx) for idx in range(6): unit = Unit("unit" + str(idx)) chidx.units.append(unit) unit.spiketrains.append(st) self.writer.write_block(blk) self.compare_blocks([blk], self.reader.blocks)
def read_block(self, lazy=False, cascade=True, load_waveforms=False): """ """ # Create block bl = Block(file_origin=self.filename) if not cascade: return bl seg = self.read_segment(self.filename, lazy=lazy, cascade=cascade, load_waveforms=load_waveforms) bl.segments.append(seg) neo.io.tools.populate_RecordingChannel(bl, remove_from_annotation=False) # This create rc and RCG for attaching Units rcg0 = bl.recordingchannelgroups[0] def find_rc(chan): for rc in rcg0.recordingchannels: if rc.index == chan: return rc for st in seg.spiketrains: chan = st.annotations['channel_index'] rc = find_rc(chan) if rc is None: rc = RecordingChannel(index=chan) rcg0.recordingchannels.append(rc) rc.recordingchannelgroups.append(rcg0) if len(rc.recordingchannelgroups) == 1: rcg = RecordingChannelGroup(name='Group {}'.format(chan)) rcg.recordingchannels.append(rc) rc.recordingchannelgroups.append(rcg) bl.recordingchannelgroups.append(rcg) else: rcg = rc.recordingchannelgroups[1] unit = Unit(name=st.name) rcg.units.append(unit) unit.spiketrains.append(st) bl.create_many_to_one_relationship() return bl
def test_no_segment_write(self): # Tests storing AnalogSignal, IrregularlySampledSignal, and SpikeTrain # objects in the secondary (ChannelIndex) substructure without them # being attached to a Segment. blk = Block("segmentless block") signal = AnalogSignal(name="sig1", signal=[0, 1, 2], units="mV", sampling_period=pq.Quantity(1, "ms")) othersignal = IrregularlySampledSignal(name="i1", signal=[0, 0, 0], units="mV", times=[1, 2, 3], time_units="ms") sta = SpikeTrain(name="the train of spikes", times=[0.1, 0.2, 10.3], t_stop=11, units="us") stb = SpikeTrain(name="the train of spikes b", times=[1.1, 2.2, 10.1], t_stop=100, units="ms") chidx = ChannelIndex([8, 13, 21]) blk.channel_indexes.append(chidx) chidx.analogsignals.append(signal) chidx.irregularlysampledsignals.append(othersignal) unit = Unit() chidx.units.append(unit) unit.spiketrains.extend([sta, stb]) self.writer.write_block(blk) self.compare_blocks([blk], self.reader.blocks) self.writer.close() reader = NixIO(self.filename, "ro") blk = reader.read_block(neoname="segmentless block") chx = blk.channel_indexes[0] self.assertEqual(len(chx.analogsignals), 1) self.assertEqual(len(chx.irregularlysampledsignals), 1) self.assertEqual(len(chx.units[0].spiketrains), 2)
def read_unit(fh, block_id, rcg_source_id, unit_id): def read_spiketrains(nix_file): strains = filter(lambda x: x.type == 'spiketrain', nix_file.blocks[block_id].data_arrays) strains = [x for x in strains if nsn in [y.name for y in x.sources]] return [Reader.read_spiketrain(fh, block_id, da.name) for da in strains] nix_block = fh.handle.blocks[block_id] nix_rcg_source = nix_block.sources[rcg_source_id] nix_source = nix_rcg_source.sources[unit_id] nsn = nix_source.name rcg = Unit(nix_source.name) for key, value in Reader.Help.read_attributes(nix_source.metadata, 'unit').items(): setattr(rcg, key, value) rcg.annotations = Reader.Help.read_annotations(nix_source.metadata, 'unit') setattr(rcg, 'spiketrains', ProxyList(fh, read_spiketrains)) return rcg
def test__issue_285(self): train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0) unit = Unit() train.unit = unit unit.spiketrains.append(train) epoch = Epoch([0, 10, 20], [2, 2, 2], ["a", "b", "c"], units="ms") blk = Block() seg = Segment() seg.spiketrains.append(train) seg.epochs.append(epoch) epoch.segment = seg blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.spiketrains[0].unit, Unit) self.assertIsInstance(r_seg.epochs[0], Epoch)
def test_multiref_write(self): blk = Block("blk1") signal = AnalogSignal(name="sig1", signal=[0, 1, 2], units="mV", sampling_period=pq.Quantity(1, "ms")) for idx in range(3): segname = "seg" + str(idx) seg = Segment(segname) blk.segments.append(seg) seg.analogsignals.append(signal) chidx = ChannelIndex([10, 20, 29]) seg = blk.segments[0] st = SpikeTrain(name="choochoo", times=[10, 11, 80], t_stop=1000, units="s") seg.spiketrains.append(st) blk.channel_indexes.append(chidx) for idx in range(6): unit = Unit("unit" + str(idx)) chidx.units.append(unit) unit.spiketrains.append(st) self.writer.write_block(blk) self.compare_blocks([blk], self.reader.blocks)
def test__issue_285(self): # Spiketrain train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0) unit = Unit() train.unit = unit unit.spiketrains.append(train) epoch = Epoch(np.array([0, 10, 20]), np.array([2, 2, 2]), np.array(["a", "b", "c"]), units="ms") blk = Block() seg = Segment() seg.spiketrains.append(train) seg.epochs.append(epoch) epoch.segment = seg blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.spiketrains[0].unit, Unit) self.assertIsInstance(r_seg.epochs[0], Epoch) os.remove('blk.pkl') # Epoch epoch = Epoch(times=np.arange(0, 30, 10) * pq.s, durations=[10, 5, 7] * pq.ms, labels=np.array(['btn0', 'btn1', 'btn2'], dtype='U')) epoch.segment = Segment() blk = Block() seg = Segment() seg.epochs.append(epoch) blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.epochs[0].segment, Segment) os.remove('blk.pkl') # Event event = Event(np.arange(0, 30, 10) * pq.s, labels=np.array(['trig0', 'trig1', 'trig2'], dtype='U')) event.segment = Segment() blk = Block() seg = Segment() seg.events.append(event) blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.events[0].segment, Segment) os.remove('blk.pkl') # IrregularlySampledSignal signal = IrregularlySampledSignal([0.0, 1.23, 6.78], [1, 2, 3], units='mV', time_units='ms') signal.segment = Segment() blk = Block() seg = Segment() seg.irregularlysampledsignals.append(signal) blk.segments.append(seg) blk.segments[0].block = blk reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.irregularlysampledsignals[0].segment, Segment) os.remove('blk.pkl')
def read_block(self, block_index=0, lazy=False, signal_group_mode=None, units_group_mode=None, load_waveforms=False): """ :param block_index: int default 0. In case of several block block_index can be specified. :param lazy: False by default. :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO): This control behavior for grouping channels in AnalogSignal. * 'split-all': each channel will give an AnalogSignal * 'group-by-same-units' all channel sharing the same quantity units ar grouped in a 2D AnalogSignal :param units_group_mode: 'split-all' or 'all-in-one'(default depend IO) This control behavior for grouping Unit in ChannelIndex: * 'split-all': each neo.Unit is assigned to a new neo.ChannelIndex * 'all-in-one': all neo.Unit are grouped in the same neo.ChannelIndex (global spike sorting for instance) :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not. """ if signal_group_mode is None: signal_group_mode = self._prefered_signal_group_mode if self._prefered_signal_group_mode == 'split-all': self.logger.warning("the default signal_group_mode will change from "\ "'split-all' to 'group-by-same-units' in next release") if units_group_mode is None: units_group_mode = self._prefered_units_group_mode # annotations bl_annotations = dict(self.raw_annotations['blocks'][block_index]) bl_annotations.pop('segments') bl_annotations = check_annotations(bl_annotations) bl = Block(**bl_annotations) # ChannelIndex are plit in 2 parts: # * some for AnalogSignals # * some for Units # ChannelIndex for AnalogSignals all_channels = self.header['signal_channels'] channel_indexes_list = self.get_group_channel_indexes() for channel_index in channel_indexes_list: for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups( channel_index, signal_group_mode=signal_group_mode).items(): if signal_group_mode == "split-all": chidx_annotations = self.raw_annotations[ 'signal_channels'][i] elif signal_group_mode == "group-by-same-units": # this should be done with array_annotation soon: keys = list(self.raw_annotations['signal_channels'][ ind_abs[0]].keys()) # take key from first channel of the group chidx_annotations = {key: [] for key in keys} for j in ind_abs: for key in keys: v = self.raw_annotations['signal_channels'][j].get( key, None) chidx_annotations[key].append(v) if 'name' in list(chidx_annotations.keys()): chidx_annotations.pop('name') chidx_annotations = check_annotations(chidx_annotations) # this should be done with array_annotation soon: ch_names = all_channels[ind_abs]['name'].astype('S') neo_channel_index = ChannelIndex( index=ind_within, channel_names=ch_names, channel_ids=all_channels[ind_abs]['id'], name='Channel group {}'.format(i), **chidx_annotations) bl.channel_indexes.append(neo_channel_index) # ChannelIndex and Unit # 2 case are possible in neo defifferent IO have choosen one or other: # * All units are grouped in the same ChannelIndex and indexes are all channels: # 'all-in-one' # * Each units is assigned to one ChannelIndex: 'split-all' # This is kept for compatibility unit_channels = self.header['unit_channels'] if units_group_mode == 'all-in-one': if unit_channels.size > 0: channel_index = ChannelIndex(index=np.array([], dtype='i'), name='ChannelIndex for all Unit') bl.channel_indexes.append(channel_index) for c in range(unit_channels.size): unit_annotations = self.raw_annotations['unit_channels'][c] unit_annotations = check_annotations(unit_annotations) unit = Unit(**unit_annotations) channel_index.units.append(unit) elif units_group_mode == 'split-all': for c in range(len(unit_channels)): unit_annotations = self.raw_annotations['unit_channels'][c] unit_annotations = check_annotations(unit_annotations) unit = Unit(**unit_annotations) channel_index = ChannelIndex(index=np.array([], dtype='i'), name='ChannelIndex for Unit') channel_index.units.append(unit) bl.channel_indexes.append(channel_index) # Read all segments for seg_index in range(self.segment_count(block_index)): seg = self.read_segment(block_index=block_index, seg_index=seg_index, lazy=lazy, signal_group_mode=signal_group_mode, load_waveforms=load_waveforms) bl.segments.append(seg) # create link to other containers ChannelIndex and Units for seg in bl.segments: for c, anasig in enumerate(seg.analogsignals): bl.channel_indexes[c].analogsignals.append(anasig) nsig = len(seg.analogsignals) for c, sptr in enumerate(seg.spiketrains): if units_group_mode == 'all-in-one': bl.channel_indexes[nsig].units[c].spiketrains.append(sptr) elif units_group_mode == 'split-all': bl.channel_indexes[nsig + c].units[0].spiketrains.append(sptr) bl.create_many_to_one_relationship() return bl
def read_block(self, block_index=0, lazy=False, cascade=True, signal_group_mode=None, units_group_mode=None, load_waveforms=False, time_slices=None): """ :param block_index: int default 0. In case of several block block_index can be specified. :param lazy: False by default. :param cascade: True by Default :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO): This control behavior for grouping channels in AnalogSignal. * 'split-all': each channel will give an AnalogSignal * 'group-by-same-units' all channel sharing the same quantity units ar grouped in a 2D AnalogSignal :param units_group_mode: 'split-all' or 'all-in-one'(default depend IO) This control behavior for grouping Unit in ChannelIndex: * 'split-all': each neo.Unit is assigned to a new neo.ChannelIndex * 'all-in-one': all neo.Unit are grouped in the same neo.ChannelIndex (global spike sorting for instance) :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not. :param time_slices: None by default. List of time_slice. A time slice is (t_start, t_stop) both are quantities. each element will lead to a fake neo.Segment. So len(block.segment) == len(time_slice) all time_slice must be compatible with original time range. """ if signal_group_mode is None: signal_group_mode = self._prefered_signal_group_mode if units_group_mode is None: units_group_mode = self._prefered_units_group_mode #annotations bl_annotations = dict(self.raw_annotations['blocks'][block_index]) bl_annotations.pop('segments') bl_annotations = check_annotations(bl_annotations) bl = Block(**bl_annotations) if not cascade: return bl #ChannelIndex are plit in 2 parts: # * some for AnalogSignals # * some for Units #ChannelIndex ofr AnalogSignals all_channels = self.header['signal_channels'] channel_indexes_list = self.get_group_channel_indexes() for channel_index in channel_indexes_list: for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(channel_index, signal_group_mode=signal_group_mode).items(): neo_channel_index = ChannelIndex(index=ind_within, channel_names=all_channels[ind_abs]['name'].astype('S'), channel_ids=all_channels[ind_abs]['id'], name='Channel group {}'.format(i)) bl.channel_indexes.append(neo_channel_index) #ChannelIndex and Unit #2 case are possible in neo defifferent IO have choosen one or other: # * All units are group in the same ChannelIndex and indexes are all channels : 'all-in-one' # * Each units is assigned to one ChannelIndex : 'split-all' # This is kept for compatibility unit_channels = self.header['unit_channels'] if units_group_mode=='all-in-one': if unit_channels.size>0: channel_index = ChannelIndex(index=np.array([], dtype='i'), name='ChannelIndex for all Unit') bl.channel_indexes.append(channel_index) for c in range(unit_channels.size): unit_annotations = self.raw_annotations['unit_channels'][c] unit = Unit(**unit_annotations) channel_index.units.append(unit) elif units_group_mode=='split-all': for c in range(len(unit_channels)): unit_annotations = self.raw_annotations['unit_channels'][c] unit = Unit(**unit_annotations) channel_index = ChannelIndex(index=np.array([], dtype='i'), name='ChannelIndex for Unit') channel_index.units.append(unit) bl.channel_indexes.append(channel_index) if time_slices is None: #Read the real segment counts for seg_index in range(self.segment_count(block_index)): seg = self.read_segment(block_index=block_index, seg_index=seg_index, lazy=lazy, cascade=cascade, signal_group_mode=signal_group_mode, load_waveforms=load_waveforms) bl.segments.append(seg) else: #return a fake segment list corresponding to time_slices for s, time_slice in enumerate(time_slices): #find in which segment time_slice is t_start, t_stop = time_slice t_start = ensure_second(t_start) t_stop = ensure_second(t_stop) related_seg_index = None for seg_index in range(self.segment_count(block_index)): seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s if (seg_t_start<=t_start<=seg_t_stop) and (seg_t_start<=t_stop<=seg_t_stop): related_seg_index = seg_index if related_seg_index is None: raise(ValueError('time_slice not in any segment range {}'.format(time_slice))) seg = self.read_segment(block_index=block_index, seg_index=related_seg_index, lazy=lazy, cascade=cascade, signal_group_mode=signal_group_mode, load_waveforms=load_waveforms, time_slice=time_slice) seg.index = s bl.segments.append(seg) for c, anasig in enumerate(seg.analogsignals): bl.channel_indexes[c].analogsignals.append(anasig) #create link to other containers ChannelIndex and Units for seg in bl.segments: for c, anasig in enumerate(seg.analogsignals): bl.channel_indexes[c].analogsignals.append(anasig) nsig = len(seg.analogsignals) for c, sptr in enumerate(seg.spiketrains): if units_group_mode=='all-in-one': bl.channel_indexes[nsig].units[c].spiketrains.append(sptr) elif units_group_mode=='split-all': bl.channel_indexes[nsig+c].units[0].spiketrains.append(sptr) bl.create_many_to_one_relationship() return bl
def proc_f32(filename): '''Load an f32 file that has already been processed by the official matlab file converter. That matlab data is saved to an m-file, which is then converted to a numpy '.npz' file. This numpy file is the file actually loaded. This function converts it to a neo block and returns the block. This block can be compared to the block produced by BrainwareF32IO to make sure BrainwareF32IO is working properly block = proc_f32(filename) filename: The file name of the numpy file to load. It should end with '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property with the value '*.f32', so the filename to compare should fit that pattern. 'py?' should be 'py2' for the python 2 version of the numpy file or 'py3' for the python 3 version of the numpy file. example: filename = 'file1_f32_py2.npz' f32 file name = 'file1.f32' ''' filenameorig = os.path.basename(filename[:-12] + '.f32') # create the objects to store other objects block = Block(file_origin=filenameorig) chx = ChannelIndex(file_origin=filenameorig, index=np.array([], dtype=np.int), channel_names=np.array([], dtype='S')) unit = Unit(file_origin=filenameorig) # load objects into their containers block.channel_indexes.append(chx) chx.units.append(unit) try: with np.load(filename) as f32obj: f32file = f32obj.items()[0][1].flatten() except IOError as exc: if 'as a pickle' in exc.message: block.create_many_to_one_relationship() return block else: raise sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']] stims = [res.flatten().tolist() for res in f32file['stim']] sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size] fullf32 = zip(sweeplengths, stims, sweeps) for sweeplength, stim, sweep in fullf32: for trainpts in sweep: if trainpts.size: trainpts = trainpts.flatten().astype('float32') else: trainpts = [] paramnames = ['Param%s' % i for i in range(len(stim))] params = dict(zip(paramnames, stim)) train = SpikeTrain(trainpts, units=pq.ms, t_start=0, t_stop=sweeplength, file_origin=filenameorig) segment = Segment(file_origin=filenameorig, **params) segment.spiketrains = [train] unit.spiketrains.append(train) block.segments.append(segment) block.create_many_to_one_relationship() return block
def read_spiketrain(self): # TODO add parameter to allow user to read raw data or not? assert (SpikeTrain in self.readable_objects) spike_trains = [] channel_group_files = glob.glob( os.path.join(self._path, self._base_filename) + ".[0-9]*") for raw_filename in sorted(channel_group_files): with open(raw_filename, "rb") as f: params = parse_header_and_leave_cursor(f) channel_group_index = int(raw_filename.split(".")[-1]) bytes_per_timestamp = params.get("bytes_per_timestamp", 4) bytes_per_sample = params.get("bytes_per_sample", 1) num_spikes = params.get("num_spikes", 0) num_chans = params.get("num_chans", 1) samples_per_spike = params.get("samples_per_spike", 50) timebase = int( params.get("timebase", "96000 hz").split(" ")[0]) * pq.Hz sampling_rate = params.get("rawrate", 48000) * pq.Hz bytes_per_spike_without_timestamp = samples_per_spike * bytes_per_sample bytes_per_spike = bytes_per_spike_without_timestamp + bytes_per_timestamp timestamp_dtype = ">u" + str(bytes_per_timestamp) waveform_dtype = "<i" + str(bytes_per_sample) dtype = np.dtype([("times", (timestamp_dtype, 1), 1), ("waveforms", (waveform_dtype, 1), samples_per_spike)]) data = np.fromfile(f, dtype=dtype, count=num_spikes * num_chans) assert_end_of_data(f) # times are saved for each channel times = data["times"][::num_chans] / timebase assert len(times) == num_spikes waveforms = data["waveforms"] waveforms = np.reshape(waveforms, (num_spikes, num_chans, samples_per_spike)) # TODO HACK !!!! findout if recording is sig - ref or the other # way around, this determines the way of the peak which should be # possible to set in a parameter e.g. peak='negative'/'positive' waveforms = -waveforms.astype(float) channel_gain_matrix = np.ones(waveforms.shape) for i in range(num_chans): channel_gain_matrix[:, i, :] *= self._channel_gain( channel_group_index, i) waveforms = scale_analog_signal(waveforms, channel_gain_matrix, self._adc_fullscale, bytes_per_sample) # TODO get left_sweep form setfile? spike_train = SpikeTrain(times, t_stop=self._duration, waveforms=waveforms * pq.uV, sampling_rate=sampling_rate, left_sweep=0.2 * pq.ms, **params) spike_trains.append(spike_train) channel_index = self._channel_group_to_channel_index[ channel_group_index] spike_train.channel_index = channel_index unit = Unit( ) # TODO unit can have several spiketrains from different segments, not necessarily relevant here though unit.spiketrains.append(spike_train) channel_index.units.append(unit) return spike_trains
def setUp(self): self.fname = '/tmp/test.exdir' if os.path.exists(self.fname): shutil.rmtree(self.fname) self.n_channels = 5 self.n_samples = 20 self.n_spikes = 50 blk = Block() seg = Segment() blk.segments.append(seg) chx1 = ChannelIndex(index=np.arange(self.n_channels), channel_ids=np.arange(self.n_channels)) chx2 = ChannelIndex(index=np.arange(self.n_channels), channel_ids=np.arange(self.n_channels) * 2) blk.channel_indexes.extend([chx1, chx2]) wf1 = np.random.random( (self.n_spikes, self.n_channels, self.n_samples)) ts1 = np.sort(np.random.random(self.n_spikes)) t_stop1 = np.ceil(ts1[-1]) sptr1 = SpikeTrain( times=ts1, units='s', waveforms=np.random.random( (self.n_spikes, self.n_channels, self.n_samples)) * pq.V, name='spikes 1', description='sptr1', t_stop=t_stop1, **{'id': 1}) sptr1.channel_index = chx1 unit1 = Unit(name='unit 1') unit1.spiketrains.append(sptr1) chx1.units.append(unit1) seg.spiketrains.append(sptr1) ts2 = np.sort(np.random.random(self.n_spikes)) t_stop2 = np.ceil(ts2[-1]) sptr2 = SpikeTrain( times=ts2, units='s', waveforms=np.random.random( (self.n_spikes, self.n_channels, self.n_samples)) * pq.V, description='sptr2', name='spikes 2', t_stop=t_stop2, **{'id': 2}) sptr2.channel_index = chx2 unit2 = Unit(name='unit 2') unit2.spiketrains.append(sptr2) chx2.units.append(unit2) seg.spiketrains.append(sptr2) wf3 = np.random.random( (self.n_spikes, self.n_channels, self.n_samples)) ts3 = np.sort(np.random.random(self.n_spikes)) t_stop3 = np.ceil(ts3[-1]) sptr3 = SpikeTrain( times=ts3, units='s', waveforms=np.random.random( (self.n_spikes, self.n_channels, self.n_samples)) * pq.V, description='sptr3', name='spikes 3', t_stop=t_stop3, **{'id': 3}) sptr3.channel_index = chx2 unit3 = Unit(name='unit 3') unit3.spiketrains.append(sptr3) chx2.units.append(unit3) seg.spiketrains.append(sptr3) t_stop = max([t_stop1, t_stop2, t_stop3]) * pq.s ana = AnalogSignal(np.random.random(self.n_samples), sampling_rate=self.n_samples / t_stop, units='V', name='ana1', description='LFP') assert t_stop == ana.t_stop seg.analogsignals.append(ana) epo = Epoch(np.random.random(self.n_samples), durations=[1] * self.n_samples * pq.s, units='s', name='epo1') seg.epochs.append(epo) self.blk = blk
def read_block( self, lazy=False, get_waveforms=True, cluster_group=None, raw_data_units='uV', get_raw_data=False, ): """ Reads a block with segments and channel_indexes Parameters: get_waveforms: bool, default = False Wether or not to get the waveforms get_raw_data: bool, default = False Wether or not to get the raw traces raw_data_units: str, default = "uV" SI units of the raw trace according to voltage_gain given to klusta cluster_group: str, default = None Which clusters to load, possibilities are "noise", "unsorted", "good", if None all is loaded. """ assert not lazy, 'Do not support lazy' blk = Block() seg = Segment(file_origin=self.filename) blk.segments += [seg] for model in self.models: group_id = model.channel_group group_meta = {'group_id': group_id} group_meta.update(model.metadata) chx = ChannelIndex(name='channel group #{}'.format(group_id), index=model.channels, **group_meta) blk.channel_indexes.append(chx) clusters = model.spike_clusters for cluster_id in model.cluster_ids: meta = model.cluster_metadata[cluster_id] if cluster_group is None: pass elif cluster_group != meta: continue sptr = self.read_spiketrain(cluster_id=cluster_id, model=model, get_waveforms=get_waveforms, raw_data_units=raw_data_units) sptr.annotations.update({ 'cluster_group': meta, 'group_id': model.channel_group }) sptr.channel_index = chx unit = Unit(cluster_group=meta, group_id=model.channel_group, name='unit #{}'.format(cluster_id)) unit.spiketrains.append(sptr) chx.units.append(unit) unit.channel_index = chx seg.spiketrains.append(sptr) if get_raw_data: ana = self.read_analogsignal(model, units=raw_data_units) ana.channel_index = chx seg.analogsignals.append(ana) seg.duration = model.duration * pq.s blk.create_many_to_one_relationship() return blk
def _source_unit_to_neo(self, nix_unit): neo_attrs = self._nix_attr_to_neo(nix_unit) neo_unit = Unit(**neo_attrs) self._object_map[nix_unit.id] = neo_unit return neo_unit
def read_block(self, lazy=False, cascade=True): """Returns a Block containing spike information. There is no obvious way to infer the segment boundaries from raw spike times, so for now all spike times are returned in one big segment. The way around this would be to specify the segment boundaries, and then change this code to put the spikes in the right segments. """ # Create block and segment to hold all the data block = Block() # Search data directory for KlustaKwik files. # If nothing found, return empty block self._fetfiles = self._fp.read_filenames('fet') self._clufiles = self._fp.read_filenames('clu') if len(self._fetfiles) == 0 or not cascade: return block # Create a single segment to hold all of the data seg = Segment(name='seg0', index=0, file_origin=self.filename) block.segments.append(seg) # Load spike times from each group and store in a dict, keyed # by group number self.spiketrains = dict() for group in sorted(self._fetfiles.keys()): # Load spike times fetfile = self._fetfiles[group] spks, features = self._load_spike_times(fetfile) # Load cluster ids or generate if group in self._clufiles: clufile = self._clufiles[group] uids = self._load_unit_id(clufile) else: # unclustered data, assume all zeros uids = np.zeros(spks.shape, dtype=np.int32) # error check if len(spks) != len(uids): raise ValueError("lengths of fet and clu files are different") # Create Unit for each cluster unique_unit_ids = np.unique(uids) for unit_id in sorted(unique_unit_ids): # Initialize the unit u = Unit(name=('unit %d from group %d' % (unit_id, group)), index=unit_id, group=group) # Initialize a new SpikeTrain for the spikes from this unit if lazy: st = SpikeTrain(times=[], units='sec', t_start=0.0, t_stop=spks.max() / self.sampling_rate, name=('unit %d from group %d' % (unit_id, group))) st.lazy_shape = len(spks[uids == unit_id]) else: st = SpikeTrain( times=spks[uids == unit_id] / self.sampling_rate, units='sec', t_start=0.0, t_stop=spks.max() / self.sampling_rate, name=('unit %d from group %d' % (unit_id, group))) st.annotations['cluster'] = unit_id st.annotations['group'] = group # put features in if not lazy and len(features) != 0: st.annotations['waveform_features'] = features # Link u.spiketrains.append(st) seg.spiketrains.append(st) block.create_many_to_one_relationship() return block