def test__children(self): signal = self.signals[0] segment = Segment(name='seg1') segment.analogsignals = [signal] segment.create_many_to_one_relationship() rchan = RecordingChannel(name='rchan1') rchan.analogsignals = [signal] rchan.create_many_to_one_relationship() self.assertEqual(signal._single_parent_objects, ('Segment', 'RecordingChannel')) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._single_parent_containers, ('segment', 'recordingchannel')) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._parent_objects, ('Segment', 'RecordingChannel')) self.assertEqual(signal._parent_containers, ('segment', 'recordingchannel')) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, 'seg1') self.assertEqual(signal.parents[1].name, 'rchan1') assert_neo_object_is_compliant(signal)
def proc_dam(filename): '''Load an dam file that has already been processed by the official matlab file converter. That matlab data is saved to an m-file, which is then converted to a numpy '.npz' file. This numpy file is the file actually loaded. This function converts it to a neo block and returns the block. This block can be compared to the block produced by BrainwareDamIO to make sure BrainwareDamIO is working properly block = proc_dam(filename) filename: The file name of the numpy file to load. It should end with '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property with the value '*.dam', so the filename to compare should fit that pattern. 'py?' should be 'py2' for the python 2 version of the numpy file or 'py3' for the python 3 version of the numpy file. example: filename = 'file1_dam_py2.npz' dam file name = 'file1.dam' ''' with np.load(filename) as damobj: damfile = damobj.items()[0][1].flatten() filename = os.path.basename(filename[:-12] + '.dam') signals = [res.flatten() for res in damfile['signal']] stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']] timestamps = [res[0, 0] for res in damfile['timestamp']] block = Block(file_origin=filename) rcg = RecordingChannelGroup(file_origin=filename) chan = RecordingChannel(file_origin=filename, index=0, name='Chan1') rcg.channel_indexes = np.array([1]) rcg.channel_names = np.array(['Chan1'], dtype='S') block.recordingchannelgroups.append(rcg) rcg.recordingchannels.append(chan) params = [res['params'][0, 0].flatten() for res in damfile['stim']] values = [res['values'][0, 0].flatten() for res in damfile['stim']] params = [[res1[0] for res1 in res] for res in params] values = [[res1 for res1 in res] for res in values] stims = [dict(zip(param, value)) for param, value in zip(params, values)] fulldam = zip(stimIndexes, timestamps, signals, stims) for stimIndex, timestamp, signal, stim in fulldam: sig = AnalogSignal(signal=signal * pq.mV, t_start=timestamp * pq.d, file_origin=filename, sampling_period=1. * pq.s) segment = Segment(file_origin=filename, index=stimIndex, **stim) segment.analogsignals = [sig] block.segments.append(segment) create_many_to_one_relationship(block) return block
def populate_RecordingChannel(bl, remove_from_annotation=True): """ When a Block is Block>Segment>AnalogSIgnal this function auto create all RecordingChannel following these rules: * when 'channel_index ' is in AnalogSIgnal the corresponding RecordingChannel is created. * 'channel_index ' is then set to None if remove_from_annotation * only one RecordingChannelGroup is created It is a utility at the end of creating a Block for IO. Usage: >>> populate_RecordingChannel(a_block) """ recordingchannels = {} for seg in bl.segments: for anasig in seg.analogsignals: if getattr(anasig, 'channel_index', None) is not None: ind = int(anasig.channel_index) if ind not in recordingchannels: recordingchannels[ind] = RecordingChannel(index=ind) if 'channel_name' in anasig.annotations: channel_name = anasig.annotations['channel_name'] recordingchannels[ind].name = channel_name if remove_from_annotation: anasig.annotations.pop('channel_name') recordingchannels[ind].analogsignals.append(anasig) anasig.recordingchannel = recordingchannels[ind] if remove_from_annotation: anasig.channel_index = None indexes = np.sort(list(recordingchannels.keys())).astype('i') names = np.array([recordingchannels[idx].name for idx in indexes], dtype='S') rcg = RecordingChannelGroup(name='all channels', channel_indexes=indexes, channel_names=names) bl.recordingchannelgroups.append(rcg) for ind in indexes: # many to many relationship rcg.recordingchannels.append(recordingchannels[ind]) recordingchannels[ind].recordingchannelgroups.append(rcg)
def setup_recordingchannels(self): rchanname11 = 'chan 1 1' rchanname12 = 'chan 1 2' rchanname21 = 'chan 2 1' rchanname22 = 'chan 2 2' self.rchannames1 = [rchanname11, rchanname12] self.rchannames2 = [rchanname21, rchanname22, rchanname11] self.rchannames = [rchanname11, rchanname12, rchanname21, rchanname22] rchan11 = RecordingChannel(name=rchanname11) rchan12 = RecordingChannel(name=rchanname12) rchan21 = RecordingChannel(name=rchanname21) rchan22 = RecordingChannel(name=rchanname22) rchan23 = RecordingChannel(name=rchanname11) self.rchan1 = [rchan11, rchan12] self.rchan2 = [rchan21, rchan22, rchan23] self.rchan = [rchan11, rchan12, rchan21, rchan22]
def test__children(self): signal = self.signals[0] segment = Segment(name="seg1") segment.analogsignals = [signal] segment.create_many_to_one_relationship() rchan = RecordingChannel(name="rchan1") rchan.analogsignals = [signal] rchan.create_many_to_one_relationship() self.assertEqual(signal._container_child_objects, ()) self.assertEqual(signal._data_child_objects, ()) self.assertEqual(signal._single_parent_objects, ("Segment", "RecordingChannel")) self.assertEqual(signal._multi_child_objects, ()) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._child_properties, ()) self.assertEqual(signal._single_child_objects, ()) self.assertEqual(signal._container_child_containers, ()) self.assertEqual(signal._data_child_containers, ()) self.assertEqual(signal._single_child_containers, ()) self.assertEqual(signal._single_parent_containers, ("segment", "recordingchannel")) self.assertEqual(signal._multi_child_containers, ()) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._child_objects, ()) self.assertEqual(signal._child_containers, ()) self.assertEqual(signal._parent_objects, ("Segment", "RecordingChannel")) self.assertEqual(signal._parent_containers, ("segment", "recordingchannel")) self.assertEqual(signal.children, ()) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, "seg1") self.assertEqual(signal.parents[1].name, "rchan1") signal.create_many_to_one_relationship() signal.create_many_to_many_relationship() signal.create_relationship() assert_neo_object_is_compliant(signal)
def proc_src(filename): '''Load an src file that has already been processed by the official matlab file converter. That matlab data is saved to an m-file, which is then converted to a numpy '.npz' file. This numpy file is the file actually loaded. This function converts it to a neo block and returns the block. This block can be compared to the block produced by BrainwareSrcIO to make sure BrainwareSrcIO is working properly block = proc_src(filename) filename: The file name of the numpy file to load. It should end with '*_src_py?.npz'. This will be converted to a neo 'file_origin' property with the value '*.src', so the filename to compare should fit that pattern. 'py?' should be 'py2' for the python 2 version of the numpy file or 'py3' for the python 3 version of the numpy file. example: filename = 'file1_src_py2.npz' src file name = 'file1.src' ''' with np.load(filename) as srcobj: srcfile = srcobj.items()[0][1] filename = os.path.basename(filename[:-12] + '.src') block = Block(file_origin=filename) NChannels = srcfile['NChannels'][0, 0][0, 0] side = str(srcfile['side'][0, 0][0]) ADperiod = srcfile['ADperiod'][0, 0][0, 0] comm_seg = proc_src_comments(srcfile, filename) block.segments.append(comm_seg) rcg = proc_src_units(srcfile, filename) chan_nums = np.arange(NChannels, dtype='int') chan_names = [] for i in chan_nums: name = 'Chan' + str(i) chan_names.append(name) chan = RecordingChannel(file_origin='filename', name=name, index=int(i)) rcg.recordingchannels.append(chan) rcg.channel_indexes = chan_nums rcg.channel_names = np.array(chan_names, dtype='string_') block.recordingchannelgroups.append(rcg) for rep in srcfile['sets'][0, 0].flatten(): proc_src_condition(rep, filename, ADperiod, side, block) block.create_many_to_one_relationship() return block
def read_block(self, lazy=False, cascade=True, **kargs): ''' Reads a block from the raw data file "fname" generated with BrainWare ''' # there are no keyargs implemented to so far. If someone tries to pass # them they are expecting them to do something or making a mistake, # neither of which should pass silently if kargs: raise NotImplementedError('This method does not have any ' 'argument implemented yet') self._fsrc = None block = Block(file_origin=self._filename) # if we aren't doing cascade, don't load anything if not cascade: return block # create the objects to store other objects rcg = RecordingChannelGroup(file_origin=self._filename) rchan = RecordingChannel(file_origin=self._filename, index=1, name='Chan1') # load objects into their containers rcg.recordingchannels.append(rchan) block.recordingchannelgroups.append(rcg) rcg.channel_indexes = np.array([1]) rcg.channel_names = np.array(['Chan1'], dtype='S') # open the file with open(self._path, 'rb') as fobject: # while the file is not done keep reading segments while True: seg = self._read_segment(fobject, lazy) # if there are no more Segments, stop if not seg: break # store the segment and signals block.segments.append(seg) rchan.analogsignals.append(seg.analogsignals[0]) # remove the file object self._fsrc = None create_many_to_one_relationship(block) return block
def read_block(self, lazy=False, cascade=True, load_waveforms=False): """ """ # Create block bl = Block(file_origin=self.filename) if not cascade: return bl seg = self.read_segment(self.filename, lazy=lazy, cascade=cascade, load_waveforms=load_waveforms) bl.segments.append(seg) neo.io.tools.populate_RecordingChannel(bl, remove_from_annotation=False) # This create rc and RCG for attaching Units rcg0 = bl.recordingchannelgroups[0] def find_rc(chan): for rc in rcg0.recordingchannels: if rc.index == chan: return rc for st in seg.spiketrains: chan = st.annotations['channel_index'] rc = find_rc(chan) if rc is None: rc = RecordingChannel(index=chan) rcg0.recordingchannels.append(rc) rc.recordingchannelgroups.append(rcg0) if len(rc.recordingchannelgroups) == 1: rcg = RecordingChannelGroup(name='Group {}'.format(chan)) rcg.recordingchannels.append(rc) rc.recordingchannelgroups.append(rcg) bl.recordingchannelgroups.append(rcg) else: rcg = rc.recordingchannelgroups[1] unit = Unit(name=st.name) rcg.units.append(unit) unit.spiketrains.append(st) bl.create_many_to_one_relationship() return bl
def read_block(self, lazy=False, cascade=True, channel_index=None): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment(file_origin=self._filename) blk.segments += [seg] if channel_index: if type(channel_index) is int: channel_index = [channel_index] if type(channel_index) is list: channel_index = np.array(channel_index) else: channel_index = np.arange(0, self._attrs['shape'][1]) rcg = RecordingChannelGroup(name='all channels', channel_indexes=channel_index) blk.recordingchannelgroups.append(rcg) for idx in channel_index: # read nested analosignal ana = self.read_analogsignal( channel_index=idx, lazy=lazy, cascade=cascade, ) chan = RecordingChannel(index=int(idx)) seg.analogsignals += [ana] chan.analogsignals += [ana] rcg.recordingchannels.append(chan) seg.duration = (self._attrs['shape'][0] / self._attrs['kwik']['sample_rate']) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def read_block( self, lazy=False, cascade=True, ): """ """ tree = ElementTree.parse(self.filename) root = tree.getroot() acq = root.find('acquisitionSystem') nbits = int(acq.find('nBits').text) nbchannel = int(acq.find('nChannels').text) sampling_rate = float(acq.find('samplingRate').text) * pq.Hz voltage_range = float(acq.find('voltageRange').text) #offset = int(acq.find('offset').text) amplification = float(acq.find('amplification').text) bl = Block( file_origin=os.path.basename(self.filename).replace('.xml', '')) if cascade: seg = Segment() bl.segments.append(seg) # RC and RCG rc_list = [] for i, xml_rcg in enumerate( root.find('anatomicalDescription').find( 'channelGroups').findall('group')): rcg = RecordingChannelGroup(name='Group {}'.format(i)) bl.recordingchannelgroups.append(rcg) for xml_rc in xml_rcg: rc = RecordingChannel(index=int(xml_rc.text)) rc_list.append(rc) rcg.recordingchannels.append(rc) rc.recordingchannelgroups.append(rcg) rcg.channel_indexes = np.array( [rc.index for rc in rcg.recordingchannels], dtype=int) rcg.channel_names = np.array([ 'Channel{}'.format(rc.index) for rc in rcg.recordingchannels ], dtype='S') # AnalogSignals reader = RawBinarySignalIO( filename=self.filename.replace('.xml', '.dat')) seg2 = reader.read_segment( cascade=True, lazy=lazy, sampling_rate=sampling_rate, t_start=0. * pq.s, unit=pq.V, nbchannel=nbchannel, bytesoffset=0, dtype=np.int16 if nbits <= 16 else np.int32, rangemin=-voltage_range / 2., rangemax=voltage_range / 2., ) for s, sig in enumerate(seg2.analogsignals): if not lazy: sig /= amplification sig.segment = seg seg.analogsignals.append(sig) rc_list[s].analogsignals.append(sig) create_many_to_one_relationship(bl) return bl