def test__children(self): signal = self.signals[0] segment = Segment(name='seg1') segment.analogsignals = [signal] segment.create_many_to_one_relationship() chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1])) chx.analogsignals = [signal] chx.create_many_to_one_relationship() self.assertEqual(signal._single_parent_objects, ('Segment', 'ChannelIndex')) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._single_parent_containers, ('segment', 'channel_index')) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex')) self.assertEqual(signal._parent_containers, ('segment', 'channel_index')) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, 'seg1') self.assertEqual(signal.parents[1].name, 'chx1') assert_neo_object_is_compliant(signal)
def _read_recordingchannelgroup(self, node, parent): # todo: handle Units attributes = self._get_standard_attributes(node) channel_indexes = node["channel_indexes"].value channel_names = node["channel_names"].value if channel_indexes.size: if len(node['recordingchannels']): raise MergeError( "Cannot handle a RecordingChannelGroup which both has a " "'channel_indexes' attribute and contains " "RecordingChannel objects") raise NotImplementedError( "todo") # need to handle node['analogsignalarrays'] else: channels = [] for name, child_node in node['recordingchannels'].items(): if "RecordingChannel" in name: channels.append(self._read_recordingchannel(child_node)) channel_index = ChannelIndex(None, **attributes) channel_index._channels = channels # construction of the index is deferred until we have processed # all RecordingChannelGroup nodes units = [] for name, child_node in node['units'].items(): if "Unit" in name: units.append( self._read_unit(child_node, parent=channel_index)) channel_index.units = units channel_index.block = parent return channel_index
def _read_recordingchannelgroup(self, node, parent): # todo: handle Units attributes = self._get_standard_attributes(node) channel_indexes = node["channel_indexes"].value channel_names = node["channel_names"].value if channel_indexes.size: if len(node['recordingchannels']) : raise MergeError("Cannot handle a RecordingChannelGroup which both has a " "'channel_indexes' attribute and contains " "RecordingChannel objects") raise NotImplementedError("todo") # need to handle node['analogsignalarrays'] else: channels = [] for name, child_node in node['recordingchannels'].items(): if "RecordingChannel" in name: channels.append(self._read_recordingchannel(child_node)) channel_index = ChannelIndex(None, **attributes) channel_index._channels = channels # construction of the index is deferred until we have processed # all RecordingChannelGroup nodes units = [] for name, child_node in node['units'].items(): if "Unit" in name: units.append(self._read_unit(child_node, parent=channel_index)) channel_index.units = units channel_index.block = parent return channel_index
def test__children(self): chx = ChannelIndex(index=np.arange(self.nchildren), name='chx1') chx.units = [self.unit1] chx.create_many_to_one_relationship() assert_neo_object_is_compliant(self.unit1) assert_neo_object_is_compliant(chx) self.assertEqual(self.unit1._container_child_objects, ()) self.assertEqual(self.unit1._data_child_objects, ('SpikeTrain', )) self.assertEqual(self.unit1._single_parent_objects, ('ChannelIndex', )) self.assertEqual(self.unit1._multi_child_objects, ()) self.assertEqual(self.unit1._multi_parent_objects, ()) self.assertEqual(self.unit1._child_properties, ()) self.assertEqual(self.unit1._single_child_objects, ('SpikeTrain', )) self.assertEqual(self.unit1._container_child_containers, ()) self.assertEqual(self.unit1._data_child_containers, ('spiketrains', )) self.assertEqual(self.unit1._single_child_containers, ('spiketrains', )) self.assertEqual(self.unit1._single_parent_containers, ('channel_index', )) self.assertEqual(self.unit1._multi_child_containers, ()) self.assertEqual(self.unit1._multi_parent_containers, ()) self.assertEqual(self.unit1._child_objects, ('SpikeTrain', )) self.assertEqual(self.unit1._child_containers, ('spiketrains', )) self.assertEqual(self.unit1._parent_objects, ('ChannelIndex', )) self.assertEqual(self.unit1._parent_containers, ('channel_index', )) self.assertEqual(len(self.unit1._single_children), self.nchildren) self.assertEqual(len(self.unit1._multi_children), 0) self.assertEqual(len(self.unit1.data_children), self.nchildren) self.assertEqual(len(self.unit1.data_children_recur), self.nchildren) self.assertEqual(len(self.unit1.container_children), 0) self.assertEqual(len(self.unit1.container_children_recur), 0) self.assertEqual(len(self.unit1.children), self.nchildren) self.assertEqual(len(self.unit1.children_recur), self.nchildren) self.assertEqual(self.unit1._multi_children, ()) self.assertEqual(self.unit1.container_children, ()) self.assertEqual(self.unit1.container_children_recur, ()) assert_same_sub_schema(list(self.unit1._single_children), self.trains1a) assert_same_sub_schema(list(self.unit1.data_children), self.trains1a) assert_same_sub_schema(list(self.unit1.data_children_recur), self.trains1a) assert_same_sub_schema(list(self.unit1.children), self.trains1a) assert_same_sub_schema(list(self.unit1.children_recur), self.trains1a) self.assertEqual(len(self.unit1.parents), 1) self.assertEqual(self.unit1.parents[0].name, 'chx1')
def test_channel_index_write(self): block = Block(name=self.rword()) chx = ChannelIndex(name=self.rword(), description=self.rsentence(), index=[1, 2, 3, 5, 8, 13]) block.channel_indexes.append(chx) self.write_and_compare([block]) chx.annotate(**self.rdict(3)) self.write_and_compare([block])
def read_block(self, lazy = False, cascade = True, ): """ """ tree = ElementTree.parse(self.filename) root = tree.getroot() acq = root.find('acquisitionSystem') nbits = int(acq.find('nBits').text) nbchannel = int(acq.find('nChannels').text) sampling_rate = float(acq.find('samplingRate').text)*pq.Hz voltage_range = float(acq.find('voltageRange').text) #offset = int(acq.find('offset').text) amplification = float(acq.find('amplification').text) bl = Block(file_origin = os.path.basename(self.filename).replace('.xml', '')) if cascade: seg = Segment() bl.segments.append(seg) # RCG for i, xml_chx in enumerate(root.find('anatomicalDescription').find('channelGroups').findall('group')): n_channels = len(xml_chx) chx = ChannelIndex(name='Group {0}'.format(i), index=np.arange(n_channels, dtype = int)) chx.channel_ids = np.array([int(xml_rc.text) for xml_rc in xml_chx]) chx.channel_names = np.array(['Channel{0}'.format(id) for id in chx.channel_ids], dtype = 'S') bl.channel_indexes.append(chx) # AnalogSignals reader = RawBinarySignalIO(filename = self.filename.replace('.xml', '.dat')) seg2 = reader.read_segment(cascade = True, lazy = lazy, sampling_rate = sampling_rate, t_start = 0.*pq.s, unit = pq.V, nbchannel = nbchannel, bytesoffset = 0, dtype = np.int16 if nbits<=16 else np.int32, rangemin = -voltage_range/2., rangemax = voltage_range/2.,) for s, sig in enumerate(seg2.analogsignals): if not lazy: sig /= amplification sig.segment = seg seg.analogsignals.append(sig) chx.analogsignals.append(sig) bl.create_many_to_one_relationship() return bl
def test_channel_index_write(self): block = Block(name=self.rword()) chx = ChannelIndex(name=self.rword(), description=self.rsentence(), channel_ids=[10, 20, 30, 50, 80, 130], index=[1, 2, 3, 5, 8, 13]) block.channel_indexes.append(chx) self.write_and_compare([block]) chx.annotate(**self.rdict(3)) self.write_and_compare([block]) chx.channel_names = ["one", "two", "three", "five", "eight", "xiii"] self.write_and_compare([block])
def proc_src_units(srcfile, filename): '''Get the units in an src file that has been processed by the official matlab function. See proc_src for details''' chx = ChannelIndex(file_origin=filename, index=np.array([], dtype=int)) un_unit = Unit(name='UnassignedSpikes', file_origin=filename, elliptic=[], boundaries=[], timestamp=[], max_valid=[]) chx.units.append(un_unit) sortInfo = srcfile['sortInfo'][0, 0] timeslice = sortInfo['timeslice'][0, 0] maxValid = timeslice['maxValid'][0, 0] cluster = timeslice['cluster'][0, 0] if len(cluster): maxValid = maxValid[0, 0] elliptic = [res.flatten() for res in cluster['elliptic'].flatten()] boundaries = [res.flatten() for res in cluster['boundaries'].flatten()] fullclust = zip(elliptic, boundaries) for ielliptic, iboundaries in fullclust: unit = Unit(file_origin=filename, boundaries=[iboundaries], elliptic=[ielliptic], timeStamp=[], max_valid=[maxValid]) chx.units.append(unit) return chx
def read_block(self, lazy=False, cascade=True, channel_index=None): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment(file_origin=self._filename) blk.segments += [seg] if channel_index: if type(channel_index) is int: channel_index = [channel_index] if type(channel_index) is list: channel_index = np.array(channel_index) else: channel_index = np.arange(0, self._attrs['shape'][1]) chx = ChannelIndex(name='all channels', index=channel_index) blk.channel_indexes.append(chx) ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade) ana.channel_index = chx seg.duration = (self._attrs['shape'][0] / self._attrs['kwik']['sample_rate']) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def test__construct_subsegment_by_unit(self): nb_seg = 3 nb_unit = 7 unit_with_sig = np.array([0, 2, 5]) signal_types = ['Vm', 'Conductances'] sig_len = 100 # channelindexes chxs = [ChannelIndex(name='Vm', index=unit_with_sig), ChannelIndex(name='Conductance', index=unit_with_sig)] # Unit all_unit = [] for u in range(nb_unit): un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u])) assert_neo_object_is_compliant(un) all_unit.append(un) blk = Block() blk.channel_indexes = chxs for s in range(nb_seg): seg = Segment(name='Simulation %s' % s) for j in range(nb_unit): st = SpikeTrain([1, 2], units='ms', t_start=0., t_stop=10) st.unit = all_unit[j] for t in signal_types: anasigarr = AnalogSignal(np.zeros((sig_len, len(unit_with_sig))), units='nA', sampling_rate=1000. * pq.Hz, channel_indexes=unit_with_sig) seg.analogsignals.append(anasigarr) blk.create_many_to_one_relationship() for unit in all_unit: assert_neo_object_is_compliant(unit) for chx in chxs: assert_neo_object_is_compliant(chx) assert_neo_object_is_compliant(blk) # what you want newseg = seg.construct_subsegment_by_unit(all_unit[:4]) assert_neo_object_is_compliant(newseg)
def create_all_annotated(cls): times = cls.rquant(1, pq.s) signal = cls.rquant(1, pq.V) blk = Block() blk.annotate(**cls.rdict(3)) cls.populate_dates(blk) seg = Segment() seg.annotate(**cls.rdict(4)) cls.populate_dates(seg) blk.segments.append(seg) asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz) asig.annotate(**cls.rdict(2)) seg.analogsignals.append(asig) isig = IrregularlySampledSignal(times=times, signal=signal, time_units=pq.s) isig.annotate(**cls.rdict(2)) seg.irregularlysampledsignals.append(isig) epoch = Epoch(times=times, durations=times) epoch.annotate(**cls.rdict(4)) seg.epochs.append(epoch) event = Event(times=times) event.annotate(**cls.rdict(4)) seg.events.append(event) spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s) d = cls.rdict(6) d["quantity"] = pq.Quantity(10, "mV") d["qarray"] = pq.Quantity(range(10), "mA") spiketrain.annotate(**d) seg.spiketrains.append(spiketrain) chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10]) chx.annotate(**cls.rdict(5)) blk.channel_indexes.append(chx) unit = Unit() unit.annotate(**cls.rdict(2)) chx.units.append(unit) return blk
def proc_dam(filename): '''Load an dam file that has already been processed by the official matlab file converter. That matlab data is saved to an m-file, which is then converted to a numpy '.npz' file. This numpy file is the file actually loaded. This function converts it to a neo block and returns the block. This block can be compared to the block produced by BrainwareDamIO to make sure BrainwareDamIO is working properly block = proc_dam(filename) filename: The file name of the numpy file to load. It should end with '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property with the value '*.dam', so the filename to compare should fit that pattern. 'py?' should be 'py2' for the python 2 version of the numpy file or 'py3' for the python 3 version of the numpy file. example: filename = 'file1_dam_py2.npz' dam file name = 'file1.dam' ''' with np.load(filename) as damobj: damfile = damobj.items()[0][1].flatten() filename = os.path.basename(filename[:-12] + '.dam') signals = [res.flatten() for res in damfile['signal']] stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']] timestamps = [res[0, 0] for res in damfile['timestamp']] block = Block(file_origin=filename) chx = ChannelIndex(file_origin=filename, index=np.array([0]), channel_ids=np.array([1]), channel_names=np.array(['Chan1'], dtype='S')) block.channel_indexes.append(chx) params = [res['params'][0, 0].flatten() for res in damfile['stim']] values = [res['values'][0, 0].flatten() for res in damfile['stim']] params = [[res1[0] for res1 in res] for res in params] values = [[res1 for res1 in res] for res in values] stims = [dict(zip(param, value)) for param, value in zip(params, values)] fulldam = zip(stimIndexes, timestamps, signals, stims) for stimIndex, timestamp, signal, stim in fulldam: sig = AnalogSignal(signal=signal * pq.mV, t_start=timestamp * pq.d, file_origin=filename, sampling_period=1. * pq.s) segment = Segment(file_origin=filename, index=stimIndex, **stim) segment.analogsignals = [sig] block.segments.append(segment) block.create_many_to_one_relationship() return block
def read_block(self, lazy=False, cascade=True, **kargs): ''' Reads a block from the simple spike data file "fname" generated with BrainWare ''' # there are no keyargs implemented to so far. If someone tries to pass # them they are expecting them to do something or making a mistake, # neither of which should pass silently if kargs: raise NotImplementedError('This method does not have any ' 'argument implemented yet') self._fsrc = None self.__lazy = lazy self._blk = Block(file_origin=self._filename) block = self._blk # if we aren't doing cascade, don't load anything if not cascade: return block # create the objects to store other objects chx = ChannelIndex(file_origin=self._filename, index=np.array([], dtype=np.int)) self.__unit = Unit(file_origin=self._filename) # load objects into their containers block.channel_indexes.append(chx) chx.units.append(self.__unit) # initialize values self.__t_stop = None self.__params = None self.__seg = None self.__spiketimes = None # open the file with open(self._path, 'rb') as self._fsrc: res = True # while the file is not done keep reading segments while res: res = self.__read_id() block.create_many_to_one_relationship() # cleanup attributes self._fsrc = None self.__lazy = False self._blk = None self.__t_stop = None self.__params = None self.__seg = None self.__spiketimes = None return block
def create_all_annotated(cls): times = cls.rquant(1, pq.s) signal = cls.rquant(1, pq.V) blk = Block() blk.annotate(**cls.rdict(3)) seg = Segment() seg.annotate(**cls.rdict(4)) blk.segments.append(seg) asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz) asig.annotate(**cls.rdict(2)) seg.analogsignals.append(asig) isig = IrregularlySampledSignal(times=times, signal=signal, time_units=pq.s) isig.annotate(**cls.rdict(2)) seg.irregularlysampledsignals.append(isig) epoch = Epoch(times=times, durations=times) epoch.annotate(**cls.rdict(4)) seg.epochs.append(epoch) event = Event(times=times) event.annotate(**cls.rdict(4)) seg.events.append(event) spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s) d = cls.rdict(6) d["quantity"] = pq.Quantity(10, "mV") d["qarray"] = pq.Quantity(range(10), "mA") spiketrain.annotate(**d) seg.spiketrains.append(spiketrain) chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10]) chx.annotate(**cls.rdict(5)) blk.channel_indexes.append(chx) unit = Unit() unit.annotate(**cls.rdict(2)) chx.units.append(unit) return blk
def read_channelindex(self, path, cascade=True, lazy=False, read_waveforms=True): channel_group = self._exdir_directory[path] group_id = channel_group.attrs['electrode_group_id'] chx = ChannelIndex( name='Channel group {}'.format(group_id), index=channel_group.attrs['electrode_idx'], channel_ids=channel_group.attrs['electrode_identities'], **{ 'group_id': group_id, 'exdir_path': path }) if 'LFP' in channel_group: for lfp_group in channel_group['LFP'].values(): ana = self.read_analogsignal(lfp_group.name, cascade=cascade, lazy=lazy) chx.analogsignals.append(ana) ana.channel_index = chx if 'MUA' in channel_group: for mua_group in channel_group['MUA'].values(): ana = self.read_analogsignal(mua_group.name, cascade=cascade, lazy=lazy) chx.analogsignals.append(ana) ana.channel_index = chx sptrs = [] if 'UnitTimes' in channel_group: for unit_group in channel_group['UnitTimes'].values(): unit = self.read_unit(unit_group.name, cascade=cascade, lazy=lazy, read_waveforms=read_waveforms) unit.channel_index = chx chx.units.append(unit) sptr = unit.spiketrains[0] sptr.channel_index = chx elif 'EventWaveform' in channel_group: sptr = self.read_spiketrain(channel_group['EventWaveform'].name, cascade=cascade, lazy=lazy, read_waveforms=read_waveforms) unit = Unit(name=sptr.name, **sptr.annotations) unit.spiketrains.append(sptr) unit.channel_index = chx sptr.channel_index = chx chx.units.append(unit) return chx
def setUp(self): self.data1 = np.arange(10.0) self.data1quant = self.data1 * pq.mV self.time1 = np.logspace(1, 5, 10) self.time1quant = self.time1*pq.ms self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant, name='spam', description='eggs', file_origin='testfile.txt', arg1='test') self.signal1.segment = Segment() self.signal1.channel_index = ChannelIndex([0])
def create_channelindex(self, parent=None, name='ChannelIndex', analogsignals=None): channels_num = min([signal.shape[1] for signal in analogsignals]) channelindex = ChannelIndex(index=np.arange(channels_num), channel_names=['Channel{}'.format(i) for i in range(channels_num)], channel_ids=np.arange(channels_num), coordinates=([[1.87, -5.2, 4.0]] * channels_num) * pq.cm) for signal in analogsignals: channelindex.analogsignals.append(signal) self._assign_basic_attributes(channelindex, name) self._assign_annotations(channelindex) return channelindex
def _source_chx_to_neo(self, nix_source): neo_attrs = self._nix_attr_to_neo(nix_source) chx = list( self._nix_attr_to_neo(c) for c in nix_source.sources if c.type == "neo.channelindex") chan_names = list(c["neo_name"] for c in chx if "neo_name" in c) if chan_names: neo_attrs["channel_names"] = chan_names neo_attrs["index"] = np.array([c["index"] for c in chx]) if "coordinates" in chx[0]: coord_units = chx[0]["coordinates.units"] coord_values = list(c["coordinates"] for c in chx) neo_attrs["coordinates"] = pq.Quantity(coord_values, coord_units) rcg = ChannelIndex(**neo_attrs) self._object_map[nix_source.id] = rcg return rcg
def read_block(self, lazy=False, cascade=True, **kargs): ''' Reads a block from the raw data file "fname" generated with BrainWare ''' # there are no keyargs implemented to so far. If someone tries to pass # them they are expecting them to do something or making a mistake, # neither of which should pass silently if kargs: raise NotImplementedError('This method does not have any ' 'arguments implemented yet') self._fsrc = None block = Block(file_origin=self._filename) # if we aren't doing cascade, don't load anything if not cascade: return block # create the objects to store other objects chx = ChannelIndex(file_origin=self._filename, channel_ids=np.array([1]), index=np.array([0]), channel_names=np.array(['Chan1'], dtype='S')) # load objects into their containers block.channel_indexes.append(chx) # open the file with open(self._path, 'rb') as fobject: # while the file is not done keep reading segments while True: seg = self._read_segment(fobject, lazy) # if there are no more Segments, stop if not seg: break # store the segment and signals seg.analogsignals[0].channel_index = chx block.segments.append(seg) # remove the file object self._fsrc = None block.create_many_to_one_relationship() return block
def test_anonymous_objects_write(self): nblocks = 2 nsegs = 2 nanasig = 4 nirrseg = 2 nepochs = 3 nevents = 4 nspiketrains = 3 nchx = 5 nunits = 10 times = self.rquant(1, pq.s) signal = self.rquant(1, pq.V) blocks = [] for blkidx in range(nblocks): blk = Block() blocks.append(blk) for segidx in range(nsegs): seg = Segment() blk.segments.append(seg) for anaidx in range(nanasig): seg.analogsignals.append(AnalogSignal(signal=signal, sampling_rate=pq.Hz)) for irridx in range(nirrseg): seg.irregularlysampledsignals.append( IrregularlySampledSignal(times=times, signal=signal, time_units=pq.s) ) for epidx in range(nepochs): seg.epochs.append(Epoch(times=times, durations=times)) for evidx in range(nevents): seg.events.append(Event(times=times)) for stidx in range(nspiketrains): seg.spiketrains.append(SpikeTrain(times=times, t_stop=times[-1]+pq.s, units=pq.s)) for chidx in range(nchx): chx = ChannelIndex(name="chx{}".format(chidx), index=[1, 2], channel_ids=[11, 22]) blk.channel_indexes.append(chx) for unidx in range(nunits): unit = Unit() chx.units.append(unit) self.writer.write_all_blocks(blocks) self.compare_blocks(blocks, self.reader.blocks)
def test_multiref_write(self): blk = Block("blk1") signal = AnalogSignal(name="sig1", signal=[0, 1, 2], units="mV", sampling_period=pq.Quantity(1, "ms")) othersignal = IrregularlySampledSignal(name="i1", signal=[0, 0, 0], units="mV", times=[1, 2, 3], time_units="ms") event = Event(name="Evee", times=[0.3, 0.42], units="year") epoch = Epoch(name="epoche", times=[0.1, 0.2] * pq.min, durations=[0.5, 0.5] * pq.min) st = SpikeTrain(name="the train of spikes", times=[0.1, 0.2, 10.3], t_stop=11, units="us") for idx in range(3): segname = "seg" + str(idx) seg = Segment(segname) blk.segments.append(seg) seg.analogsignals.append(signal) seg.irregularlysampledsignals.append(othersignal) seg.events.append(event) seg.epochs.append(epoch) seg.spiketrains.append(st) chidx = ChannelIndex([10, 20, 29]) seg = blk.segments[0] st = SpikeTrain(name="choochoo", times=[10, 11, 80], t_stop=1000, units="s") seg.spiketrains.append(st) blk.channel_indexes.append(chidx) for idx in range(6): unit = Unit("unit" + str(idx)) chidx.units.append(unit) unit.spiketrains.append(st) self.writer.write_block(blk) self.compare_blocks([blk], self.reader.blocks)
def test_no_segment_write(self): # Tests storing AnalogSignal, IrregularlySampledSignal, and SpikeTrain # objects in the secondary (ChannelIndex) substructure without them # being attached to a Segment. blk = Block("segmentless block") signal = AnalogSignal(name="sig1", signal=[0, 1, 2], units="mV", sampling_period=pq.Quantity(1, "ms")) othersignal = IrregularlySampledSignal(name="i1", signal=[0, 0, 0], units="mV", times=[1, 2, 3], time_units="ms") sta = SpikeTrain(name="the train of spikes", times=[0.1, 0.2, 10.3], t_stop=11, units="us") stb = SpikeTrain(name="the train of spikes b", times=[1.1, 2.2, 10.1], t_stop=100, units="ms") chidx = ChannelIndex([8, 13, 21]) blk.channel_indexes.append(chidx) chidx.analogsignals.append(signal) chidx.irregularlysampledsignals.append(othersignal) unit = Unit() chidx.units.append(unit) unit.spiketrains.extend([sta, stb]) self.writer.write_block(blk) self.compare_blocks([blk], self.reader.blocks) self.writer.close() reader = NixIO(self.filename, "ro") blk = reader.read_block(neoname="segmentless block") chx = blk.channel_indexes[0] self.assertEqual(len(chx.analogsignals), 1) self.assertEqual(len(chx.irregularlysampledsignals), 1) self.assertEqual(len(chx.units[0].spiketrains), 2)
def test_channel_index_write(self): block = Block(name=self.rword()) chx = ChannelIndex(name=self.rword(), description=self.rsentence(), channel_ids=[10, 20, 30, 50, 80, 130], index=[1, 2, 3, 5, 8, 13]) block.channel_indexes.append(chx) self.write_and_compare([block]) chx.annotate(**self.rdict(3)) self.write_and_compare([block]) chx.channel_names = ["one", "two", "three", "five", "eight", "xiii"] chx.coordinates = self.rquant((6, 3), pq.um) self.write_and_compare([block]) # add an empty channel index and check again newchx = ChannelIndex(np.array([])) block.channel_indexes.append(newchx) self.write_and_compare([block])
def test_multiref_write(self): blk = Block("blk1") signal = AnalogSignal(name="sig1", signal=[0, 1, 2], units="mV", sampling_period=pq.Quantity(1, "ms")) for idx in range(3): segname = "seg" + str(idx) seg = Segment(segname) blk.segments.append(seg) seg.analogsignals.append(signal) chidx = ChannelIndex([10, 20, 29]) seg = blk.segments[0] st = SpikeTrain(name="choochoo", times=[10, 11, 80], t_stop=1000, units="s") seg.spiketrains.append(st) blk.channel_indexes.append(chidx) for idx in range(6): unit = Unit("unit" + str(idx)) chidx.units.append(unit) unit.spiketrains.append(st) self.writer.write_block(blk) self.compare_blocks([blk], self.reader.blocks)
def _create_channelindex(self, group): return ChannelIndex(index=self._read_array(group, 'index'), channel_names=self._read_array(group, 'channel_names'), channel_ids=self._read_array(group, 'channel_ids'), coordinates=self._read_array(group, 'coordinates'))
def read_block(self, lazy=False, cascade=True, t_starts=None, t_stops=None, electrode_list=None, unit_list=None, analogsignals=True, events=False, waveforms=False): """ Reads data in a requested time window and returns block with single segment containing these data. Arguments: lazy : Postpone actual reading of the data files. Default 'False'. cascade : Do not postpone reading subsequent neo types (segments). Default 'True'. t_starts : list of quantities or quantity describing the start of the requested time window to load. If None or [None] the complete session is loaded. Default 'None'. t_stops : list of quantities or quantity describing the end of the requested time window to load. Has to contain the same number of values as t_starts. If None or [None] the complete session is loaded. Default 'None'. electrode_list : list of integers containing the IDs of the requested units to load. If [] or None all available units will be loaded. If False, no unit will be loaded. Default: None. unit_list : list of integers containing the IDs of the requested units to load. If [] all available units will be loaded. Default: None. events : Loading events. If True all available events in the given time window will be read. Default: False. load_waveforms : Load waveform for spikes in the requested time window. Default: False. Returns: Block object containing the requested data in neo structures. Usage: from neo import io import quantities as pq import matplotlib.pyplot as plt session_folder = '../Data/2014-07-24_10-31-02' NIO = io.NeuralynxIO(session_folder,print_diagnostic = True) block = NIO.read_block(lazy = False, cascade = True, t_starts = 0.1*pq.s, t_stops = 0.2*pq.s, channel_list = [1,5,10], unit_list = [1,2,3], events = True, load_waveforms = True) """ # Load neo block block = neo.io.NeuralynxIO.read_block(self, lazy=lazy, cascade=cascade, t_starts=t_starts, t_stops=t_stops, electrode_list=electrode_list, unit_list=unit_list, analogsignals=analogsignals, events=events, waveforms=waveforms) # TODO: odML <-> data files consistency checks? Low priority # Add annotations of odML meta data info if self.odML_avail: """ TODO: * Add electroporation area to recording channel group """ area_dict = self.get_electrodes_by_area() electroporated_areas = self.get_electroporation() channel_indexes = [ r for r in block.channel_indexes if r.name == 'all channels' ] for area, channels in area_dict.items(): electroporated, expression = False, None if area in electroporated_areas.keys(): electroporated = True expression = electroporated_areas[area] chidx = ChannelIndex( name='%s channels' % area, channel_indexes=channels, channel_names=['channel %i' % i for i in channels], electroporated=electroporated, expression=expression) chidx.block = block block.channel_indexes.append(chidx) # raise NotImplementedError('neo block annotation using odmls is not implemented yet.') # ########### Annotate information of 'Recording' Section ############ # # Annotate Amplifier Information # amp_properties = ['LowpassCutoff','HighpassCutoff','SamplingRate'] # ff = lambda x: x.name in amp_properties and 'Amplifier' in x.parent.get_path() # pobj = {p.name:p.value.data for p in self.odML_doc.iterproperties(filter_func=ff)} # block.annotate(amplifier= pobj) # # # Consistency Check with Analogsignal Sampling Rate # if any([pobj['SamplingRate'] != asa.annotations['SamplingFrequency'] # for asa in block.segments[0].analogsignalarrays]): # raise ValueError('Inconsistent sampling rates detected in odml' # ' and original data files (%s / %s)'%( # pobj['SamplingRate'], # [asa.annotations['SamplingFrequency'] for asa in # block.segments[0].analogsignalarray])) # # # Annotate different Recording Areas # # Extracting Recording Area sections # ff = lambda x: 'RecordingArea' in x.name and 'Probe' in x.sections # recording_secs = [p for p in self.odML_doc.itersections(filter_func=ff)] # rec_properties = ['Hemisphere','Probe ID','Channels', # 'SpikingChannels','BrokenChannels','Quality'] # ff2 = lambda x: x.name in rec_properties # area_dict = {} # for recording_sec in recording_secs: # # extracting specific properties of each recording area section # area_dict[recording_sec.name] = {a.name:a.value.data for a in # recording_sec.iterproperties(filter_func=ff2)} # # adding two 'area' properties manually as they have the same name # area_dict[recording_sec.name]['RecordingArea'] = \ # recording_sec.properties['Area'].value.data # area_dict[recording_sec.name]['ReferenceArea'] = \ # recording_sec.get_property_by_path('Reference:Area').value.data # block.annotate(recordingareas=area_dict) return block
def read_block(self, block_index=0, lazy=False, signal_group_mode=None, units_group_mode=None, load_waveforms=False): """ :param block_index: int default 0. In case of several block block_index can be specified. :param lazy: False by default. :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO): This control behavior for grouping channels in AnalogSignal. * 'split-all': each channel will give an AnalogSignal * 'group-by-same-units' all channel sharing the same quantity units ar grouped in a 2D AnalogSignal :param units_group_mode: 'split-all' or 'all-in-one'(default depend IO) This control behavior for grouping Unit in ChannelIndex: * 'split-all': each neo.Unit is assigned to a new neo.ChannelIndex * 'all-in-one': all neo.Unit are grouped in the same neo.ChannelIndex (global spike sorting for instance) :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not. """ if signal_group_mode is None: signal_group_mode = self._prefered_signal_group_mode if self._prefered_signal_group_mode == 'split-all': self.logger.warning("the default signal_group_mode will change from "\ "'split-all' to 'group-by-same-units' in next release") if units_group_mode is None: units_group_mode = self._prefered_units_group_mode # annotations bl_annotations = dict(self.raw_annotations['blocks'][block_index]) bl_annotations.pop('segments') bl_annotations = check_annotations(bl_annotations) bl = Block(**bl_annotations) # ChannelIndex are plit in 2 parts: # * some for AnalogSignals # * some for Units # ChannelIndex for AnalogSignals all_channels = self.header['signal_channels'] channel_indexes_list = self.get_group_channel_indexes() for channel_index in channel_indexes_list: for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups( channel_index, signal_group_mode=signal_group_mode).items(): if signal_group_mode == "split-all": chidx_annotations = self.raw_annotations[ 'signal_channels'][i] elif signal_group_mode == "group-by-same-units": # this should be done with array_annotation soon: keys = list(self.raw_annotations['signal_channels'][ ind_abs[0]].keys()) # take key from first channel of the group chidx_annotations = {key: [] for key in keys} for j in ind_abs: for key in keys: v = self.raw_annotations['signal_channels'][j].get( key, None) chidx_annotations[key].append(v) if 'name' in list(chidx_annotations.keys()): chidx_annotations.pop('name') chidx_annotations = check_annotations(chidx_annotations) # this should be done with array_annotation soon: ch_names = all_channels[ind_abs]['name'].astype('S') neo_channel_index = ChannelIndex( index=ind_within, channel_names=ch_names, channel_ids=all_channels[ind_abs]['id'], name='Channel group {}'.format(i), **chidx_annotations) bl.channel_indexes.append(neo_channel_index) # ChannelIndex and Unit # 2 case are possible in neo defifferent IO have choosen one or other: # * All units are grouped in the same ChannelIndex and indexes are all channels: # 'all-in-one' # * Each units is assigned to one ChannelIndex: 'split-all' # This is kept for compatibility unit_channels = self.header['unit_channels'] if units_group_mode == 'all-in-one': if unit_channels.size > 0: channel_index = ChannelIndex(index=np.array([], dtype='i'), name='ChannelIndex for all Unit') bl.channel_indexes.append(channel_index) for c in range(unit_channels.size): unit_annotations = self.raw_annotations['unit_channels'][c] unit_annotations = check_annotations(unit_annotations) unit = Unit(**unit_annotations) channel_index.units.append(unit) elif units_group_mode == 'split-all': for c in range(len(unit_channels)): unit_annotations = self.raw_annotations['unit_channels'][c] unit_annotations = check_annotations(unit_annotations) unit = Unit(**unit_annotations) channel_index = ChannelIndex(index=np.array([], dtype='i'), name='ChannelIndex for Unit') channel_index.units.append(unit) bl.channel_indexes.append(channel_index) # Read all segments for seg_index in range(self.segment_count(block_index)): seg = self.read_segment(block_index=block_index, seg_index=seg_index, lazy=lazy, signal_group_mode=signal_group_mode, load_waveforms=load_waveforms) bl.segments.append(seg) # create link to other containers ChannelIndex and Units for seg in bl.segments: for c, anasig in enumerate(seg.analogsignals): bl.channel_indexes[c].analogsignals.append(anasig) nsig = len(seg.analogsignals) for c, sptr in enumerate(seg.spiketrains): if units_group_mode == 'all-in-one': bl.channel_indexes[nsig].units[c].spiketrains.append(sptr) elif units_group_mode == 'split-all': bl.channel_indexes[nsig + c].units[0].spiketrains.append(sptr) bl.create_many_to_one_relationship() return bl
def test__children(self): chx = ChannelIndex(index=np.arange(self.nchildren), name='chx1') chx.units = [self.unit1] chx.create_many_to_one_relationship() assert_neo_object_is_compliant(self.unit1) assert_neo_object_is_compliant(chx) self.assertEqual(self.unit1._container_child_objects, ()) self.assertEqual(self.unit1._data_child_objects, ('SpikeTrain',)) self.assertEqual(self.unit1._single_parent_objects, ('ChannelIndex',)) self.assertEqual(self.unit1._multi_child_objects, ()) self.assertEqual(self.unit1._multi_parent_objects, ()) self.assertEqual(self.unit1._child_properties, ()) self.assertEqual(self.unit1._single_child_objects, ('SpikeTrain',)) self.assertEqual(self.unit1._container_child_containers, ()) self.assertEqual(self.unit1._data_child_containers, ('spiketrains',)) self.assertEqual(self.unit1._single_child_containers, ('spiketrains',)) self.assertEqual(self.unit1._single_parent_containers, ('channel_index',)) self.assertEqual(self.unit1._multi_child_containers, ()) self.assertEqual(self.unit1._multi_parent_containers, ()) self.assertEqual(self.unit1._child_objects, ('SpikeTrain',)) self.assertEqual(self.unit1._child_containers, ('spiketrains',)) self.assertEqual(self.unit1._parent_objects, ('ChannelIndex',)) self.assertEqual(self.unit1._parent_containers, ('channel_index',)) self.assertEqual(len(self.unit1._single_children), self.nchildren) self.assertEqual(len(self.unit1._multi_children), 0) self.assertEqual(len(self.unit1.data_children), self.nchildren) self.assertEqual(len(self.unit1.data_children_recur), self.nchildren) self.assertEqual(len(self.unit1.container_children), 0) self.assertEqual(len(self.unit1.container_children_recur), 0) self.assertEqual(len(self.unit1.children), self.nchildren) self.assertEqual(len(self.unit1.children_recur), self.nchildren) self.assertEqual(self.unit1._multi_children, ()) self.assertEqual(self.unit1.container_children, ()) self.assertEqual(self.unit1.container_children_recur, ()) assert_same_sub_schema(list(self.unit1._single_children), self.trains1a) assert_same_sub_schema(list(self.unit1.data_children), self.trains1a) assert_same_sub_schema(list(self.unit1.data_children_recur), self.trains1a) assert_same_sub_schema(list(self.unit1.children), self.trains1a) assert_same_sub_schema(list(self.unit1.children_recur), self.trains1a) self.assertEqual(len(self.unit1.parents), 1) self.assertEqual(self.unit1.parents[0].name, 'chx1')
def proc_f32(filename): '''Load an f32 file that has already been processed by the official matlab file converter. That matlab data is saved to an m-file, which is then converted to a numpy '.npz' file. This numpy file is the file actually loaded. This function converts it to a neo block and returns the block. This block can be compared to the block produced by BrainwareF32IO to make sure BrainwareF32IO is working properly block = proc_f32(filename) filename: The file name of the numpy file to load. It should end with '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property with the value '*.f32', so the filename to compare should fit that pattern. 'py?' should be 'py2' for the python 2 version of the numpy file or 'py3' for the python 3 version of the numpy file. example: filename = 'file1_f32_py2.npz' f32 file name = 'file1.f32' ''' filenameorig = os.path.basename(filename[:-12] + '.f32') # create the objects to store other objects block = Block(file_origin=filenameorig) chx = ChannelIndex(file_origin=filenameorig, index=np.array([], dtype=np.int), channel_names=np.array([], dtype='S')) unit = Unit(file_origin=filenameorig) # load objects into their containers block.channel_indexes.append(chx) chx.units.append(unit) try: with np.load(filename) as f32obj: f32file = f32obj.items()[0][1].flatten() except IOError as exc: if 'as a pickle' in exc.message: block.create_many_to_one_relationship() return block else: raise sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']] stims = [res.flatten().tolist() for res in f32file['stim']] sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size] fullf32 = zip(sweeplengths, stims, sweeps) for sweeplength, stim, sweep in fullf32: for trainpts in sweep: if trainpts.size: trainpts = trainpts.flatten().astype('float32') else: trainpts = [] paramnames = ['Param%s' % i for i in range(len(stim))] params = dict(zip(paramnames, stim)) train = SpikeTrain(trainpts, units=pq.ms, t_start=0, t_stop=sweeplength, file_origin=filenameorig) segment = Segment(file_origin=filenameorig, **params) segment.spiketrains = [train] unit.spiketrains.append(train) block.segments.append(segment) block.create_many_to_one_relationship() return block
def test__issue_285(self): ##Spiketrain train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0) unit = Unit() train.unit = unit unit.spiketrains.append(train) epoch = Epoch([0, 10, 20], [2, 2, 2], ["a", "b", "c"], units="ms") blk = Block() seg = Segment() seg.spiketrains.append(train) seg.epochs.append(epoch) epoch.segment = seg blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.spiketrains[0].unit, Unit) self.assertIsInstance(r_seg.epochs[0], Epoch) os.remove('blk.pkl') ##Epoch train = Epoch(times=np.arange(0, 30, 10)*pq.s,durations=[10, 5, 7]*pq.ms,labels=np.array(['btn0', 'btn1', 'btn2'], dtype='S')) train.segment = Segment() unit = Unit() unit.spiketrains.append(train) blk = Block() seg = Segment() seg.spiketrains.append(train) blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.spiketrains[0].segment, Segment) os.remove('blk.pkl') ##Event train = Event(np.arange(0, 30, 10)*pq.s,labels=np.array(['trig0', 'trig1', 'trig2'],dtype='S')) train.segment = Segment() unit = Unit() unit.spiketrains.append(train) blk = Block() seg = Segment() seg.spiketrains.append(train) blk.segments.append(seg) reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.spiketrains[0].segment, Segment) os.remove('blk.pkl') ##IrregularlySampledSignal train = IrregularlySampledSignal([0.0, 1.23, 6.78], [1, 2, 3],units='mV', time_units='ms') train.segment = Segment() unit = Unit() train.channel_index = ChannelIndex(1) unit.spiketrains.append(train) blk = Block() seg = Segment() seg.spiketrains.append(train) blk.segments.append(seg) blk.segments[0].block = blk reader = PickleIO(filename="blk.pkl") reader.write(blk) reader = PickleIO(filename="blk.pkl") r_blk = reader.read_block() r_seg = r_blk.segments[0] self.assertIsInstance(r_seg.spiketrains[0].segment, Segment) self.assertIsInstance(r_seg.spiketrains[0].channel_index, ChannelIndex) os.remove('blk.pkl')
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal) from quantities import nA, kHz import numpy as np import neo import pickle blk = Block() seg = Segment(name='segment foo') blk.segments.append(seg) source_ids = np.arange(64) channel_ids = source_ids + 42 chx = ChannelIndex(name='Array probe', index=np.arange(64), channel_ids=channel_ids, channel_names=['Channel %i' % chid for chid in channel_ids]) blk.channel_indexes.append(chx) a = AnalogSignal(np.random.randn(10000, 64)*nA, sampling_rate=10*kHz) # link AnalogSignal and ID providing channel_index a.channel_index = chx chx.analogsignals.append(a) seg.analogsignals.append(a) seg1 = blk.segments[0] a1 = seg1.analogsignals[0] chx1 = a1.channel_index print(chx1) print(chx1.index) io = neo.io.PickleIO(filename="test.pickle")
def __init__(self, filename): """ Arguments: filename : the filename """ BaseIO.__init__(self) self._absolute_filename = filename self._path, relative_filename = os.path.split(filename) self._base_filename, extension = os.path.splitext(relative_filename) if extension != ".set": raise ValueError("file extension must be '.set'") with open(self._absolute_filename, "r") as f: text = f.read() params = parse_params(text) self._adc_fullscale = (float(params["ADC_fullscale_mv"]) * 1000.0 * pq.uV) self._duration = float( params["duration"]) * pq.s # TODO convert from samples to seconds self._tracked_spots_count = int(params["tracked_spots"]) self._params = params # TODO this file reading can be removed, perhaps? channel_group_files = glob.glob( os.path.join(self._path, self._base_filename) + ".[0-9]*") self._channel_to_channel_index = {} self._channel_group_to_channel_index = {} self._channel_count = 0 self._channel_group_count = 0 self._channel_indexes = [] for channel_group_file in channel_group_files: # increment before, because channel_groups start at 1 self._channel_group_count += 1 group_id = self._channel_group_count # TODO count from 0? with open(channel_group_file, "rb") as f: channel_group_params = parse_header_and_leave_cursor(f) num_chans = channel_group_params["num_chans"] channel_ids = [] channel_names = [] for i in range(num_chans): channel_id = self._channel_count + i channel_ids.append(channel_id) channel_names.append("channel_{}_group_{}_internal_{}\ ".format(channel_id, group_id, i)) chan_name = 'group_id #{}'.format(group_id) channel_names = np.array(channel_names, dtype="S") channel_index = ChannelIndex(name=chan_name, channel_names=channel_names, index=np.arange(num_chans), channel_ids=np.array(channel_ids), **{'group_id': group_id}) self._channel_indexes.append(channel_index) self._channel_group_to_channel_index[group_id] = channel_index for i in range(num_chans): channel_id = self._channel_count + i self._channel_to_channel_index[channel_id] = channel_index # increment after, because channels start at 0 self._channel_count += num_chans # TODO add channels only for files that exist self._channel_ids = np.arange(self._channel_count)
def read_block( self, lazy=False, get_waveforms=True, cluster_group=None, raw_data_units='uV', get_raw_data=False, ): """ Reads a block with segments and channel_indexes Parameters: get_waveforms: bool, default = False Wether or not to get the waveforms get_raw_data: bool, default = False Wether or not to get the raw traces raw_data_units: str, default = "uV" SI units of the raw trace according to voltage_gain given to klusta cluster_group: str, default = None Which clusters to load, possibilities are "noise", "unsorted", "good", if None all is loaded. """ assert not lazy, 'Do not support lazy' blk = Block() seg = Segment(file_origin=self.filename) blk.segments += [seg] for model in self.models: group_id = model.channel_group group_meta = {'group_id': group_id} group_meta.update(model.metadata) chx = ChannelIndex(name='channel group #{}'.format(group_id), index=model.channels, **group_meta) blk.channel_indexes.append(chx) clusters = model.spike_clusters for cluster_id in model.cluster_ids: meta = model.cluster_metadata[cluster_id] if cluster_group is None: pass elif cluster_group != meta: continue sptr = self.read_spiketrain(cluster_id=cluster_id, model=model, get_waveforms=get_waveforms, raw_data_units=raw_data_units) sptr.annotations.update({ 'cluster_group': meta, 'group_id': model.channel_group }) sptr.channel_index = chx unit = Unit(cluster_group=meta, group_id=model.channel_group, name='unit #{}'.format(cluster_id)) unit.spiketrains.append(sptr) chx.units.append(unit) unit.channel_index = chx seg.spiketrains.append(sptr) if get_raw_data: ana = self.read_analogsignal(model, units=raw_data_units) ana.channel_index = chx seg.analogsignals.append(ana) seg.duration = model.duration * pq.s blk.create_many_to_one_relationship() return blk