def test_read_nse_data(self): t_start, t_stop = None, None # in samples nio = NeuralynxIO(self.sn, use_cache='never') seg = Segment('testsegment') for el_id, el_dict in nio.parameters_nse.iteritems(): filepath = nio.parameters_nse[el_id]['recording_file_name'] filename = filepath.split('/')[-1].split('\\')[-1].split('.')[0] nio.read_nse(filename, seg, t_start=t_start, t_stop=t_stop, waveforms=True) spiketrain = seg.filter({'electrode_id': el_id}, objects=SpikeTrain)[0] # target_data = np.zeros((500, 32)) # timestamps = np.zeros(500) entries = [] with open(self.pd + '/%s.txt' % filename) as datafile: for i, line in enumerate(datafile): line = line.strip('\xef\xbb\xbf') entries.append(line.split()) entries = np.asarray(entries, dtype=float) target_data = entries[:-1, 11:] timestamps = entries[:-1, 0] timestamps = (timestamps * pq.microsecond - nio.parameters_global['t_start']) np.testing.assert_array_equal(timestamps.magnitude, spiketrain.magnitude) np.testing.assert_array_equal(target_data, spiketrain.waveforms)
def _handle_epochs_group(self, block): # Note that an NWB Epoch corresponds to a Neo Segment, not to a Neo Epoch. epochs = self._file.get('epochs') # todo: handle epochs.attrs.get('tags') for name, epoch in epochs.items(): # todo: handle epoch.attrs.get('links') timeseries = [] for key, value in epoch.items(): if key == 'start_time': t_start = value * pq.second elif key == 'stop_time': t_stop = value * pq.second else: # todo: handle value['count'] # todo: handle value['idx_start'] timeseries.append(self._handle_timeseries(key, value.get('timeseries'))) segment = Segment(name=name) for obj in timeseries: obj.segment = segment if isinstance(obj, AnalogSignal): segment.analogsignals.append(obj) elif isinstance(obj, IrregularlySampledSignal): segment.irregularlysampledsignals.append(obj) elif isinstance(obj, Event): segment.events.append(obj) elif isinstance(obj, Epoch): segment.epochs.append(obj) segment.block = block block.segments.append(segment)
def test__children(self): params = {"test2": "y1", "test3": True} evt = Event( 1.5 * pq.ms, label="test epoch", name="test", description="tester", file_origin="test.file", test1=1, **params ) evt.annotate(test1=1.1, test0=[1, 2]) assert_neo_object_is_compliant(evt) segment = Segment(name="seg1") segment.events = [evt] segment.create_many_to_one_relationship() self.assertEqual(evt._single_parent_objects, ("Segment",)) self.assertEqual(evt._multi_parent_objects, ()) self.assertEqual(evt._single_parent_containers, ("segment",)) self.assertEqual(evt._multi_parent_containers, ()) self.assertEqual(evt._parent_objects, ("Segment",)) self.assertEqual(evt._parent_containers, ("segment",)) self.assertEqual(len(evt.parents), 1) self.assertEqual(evt.parents[0].name, "seg1") assert_neo_object_is_compliant(evt)
def test__children(self): signal = self.signals[0] segment = Segment(name='seg1') segment.analogsignals = [signal] segment.create_many_to_one_relationship() rchan = RecordingChannel(name='rchan1') rchan.analogsignals = [signal] rchan.create_many_to_one_relationship() self.assertEqual(signal._single_parent_objects, ('Segment', 'RecordingChannel')) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._single_parent_containers, ('segment', 'recordingchannel')) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._parent_objects, ('Segment', 'RecordingChannel')) self.assertEqual(signal._parent_containers, ('segment', 'recordingchannel')) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, 'seg1') self.assertEqual(signal.parents[1].name, 'rchan1') assert_neo_object_is_compliant(signal)
def test__children(self): segment = Segment(name='seg1') segment.spikes = [self.spike1] segment.create_many_to_one_relationship() unit = Unit(name='unit1') unit.spikes = [self.spike1] unit.create_many_to_one_relationship() self.assertEqual(self.spike1._single_parent_objects, ('Segment', 'Unit')) self.assertEqual(self.spike1._multi_parent_objects, ()) self.assertEqual(self.spike1._single_parent_containers, ('segment', 'unit')) self.assertEqual(self.spike1._multi_parent_containers, ()) self.assertEqual(self.spike1._parent_objects, ('Segment', 'Unit')) self.assertEqual(self.spike1._parent_containers, ('segment', 'unit')) self.assertEqual(len(self.spike1.parents), 2) self.assertEqual(self.spike1.parents[0].name, 'seg1') self.assertEqual(self.spike1.parents[1].name, 'unit1') assert_neo_object_is_compliant(self.spike1)
def test__children(self): signal = self.signals[0] segment = Segment(name='seg1') segment.analogsignals = [signal] segment.create_many_to_one_relationship() chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1])) chx.analogsignals = [signal] chx.create_many_to_one_relationship() self.assertEqual(signal._single_parent_objects, ('Segment', 'ChannelIndex')) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._single_parent_containers, ('segment', 'channel_index')) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex')) self.assertEqual(signal._parent_containers, ('segment', 'channel_index')) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, 'seg1') self.assertEqual(signal.parents[1].name, 'chx1') assert_neo_object_is_compliant(signal)
def read_segment(self, lazy = False, cascade = True, group = 0, series = 0): seg = Segment( name = 'test') if cascade: tree = getbyroute(self.pul.tree,[0,group,series]) for sw,sweep in enumerate(tree['children']): if sw == 0: starttime = pq.Quantity(float(sweep['contents'].swTimer),'s') for ch,channel in enumerate(sweep['children']): sig = self.read_analogsignal(group=group, series=series, sweep=sw, channel = ch) annotations = sweep['contents'].__dict__.keys() annotations.remove('readlist') for a in annotations: d = {a:str(sweep['contents'].__dict__[a])} sig.annotate(**d) sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime seg.analogsignals.append(sig) annotations = tree['contents'].__dict__.keys() annotations.remove('readlist') for a in annotations: d = {a:str(tree['contents'].__dict__[a])} seg.annotate(**d) create_many_to_one_relationship(seg) return seg
def read_block(self, lazy=False, cascade=True, channel_index=None): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment(file_origin=self._filename) blk.segments += [seg] if channel_index: if type(channel_index) is int: channel_index = [channel_index] if type(channel_index) is list: channel_index = np.array(channel_index) else: channel_index = np.arange(0, self._attrs["shape"][1]) chx = ChannelIndex(name="all channels", index=channel_index) blk.channel_indexes.append(chx) ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade) ana.channel_index = chx seg.duration = (self._attrs["shape"][0] / self._attrs["kwik"]["sample_rate"]) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def test__children(self): params = {'test2': 'y1', 'test3': True} epc = Epoch([1.1, 1.5, 1.7]*pq.ms, durations=[20, 40, 60]*pq.ns, labels=np.array(['test epoch 1', 'test epoch 2', 'test epoch 3'], dtype='S'), name='test', description='tester', file_origin='test.file', test1=1, **params) epc.annotate(test1=1.1, test0=[1, 2]) assert_neo_object_is_compliant(epc) segment = Segment(name='seg1') segment.epochs = [epc] segment.create_many_to_one_relationship() self.assertEqual(epc._single_parent_objects, ('Segment',)) self.assertEqual(epc._multi_parent_objects, ()) self.assertEqual(epc._single_parent_containers, ('segment',)) self.assertEqual(epc._multi_parent_containers, ()) self.assertEqual(epc._parent_objects, ('Segment',)) self.assertEqual(epc._parent_containers, ('segment',)) self.assertEqual(len(epc.parents), 1) self.assertEqual(epc.parents[0].name, 'seg1') assert_neo_object_is_compliant(epc)
def proc_dam(filename): '''Load an dam file that has already been processed by the official matlab file converter. That matlab data is saved to an m-file, which is then converted to a numpy '.npz' file. This numpy file is the file actually loaded. This function converts it to a neo block and returns the block. This block can be compared to the block produced by BrainwareDamIO to make sure BrainwareDamIO is working properly block = proc_dam(filename) filename: The file name of the numpy file to load. It should end with '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property with the value '*.dam', so the filename to compare should fit that pattern. 'py?' should be 'py2' for the python 2 version of the numpy file or 'py3' for the python 3 version of the numpy file. example: filename = 'file1_dam_py2.npz' dam file name = 'file1.dam' ''' with np.load(filename) as damobj: damfile = damobj.items()[0][1].flatten() filename = os.path.basename(filename[:-12]+'.dam') signals = [res.flatten() for res in damfile['signal']] stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']] timestamps = [res[0, 0] for res in damfile['timestamp']] block = Block(file_origin=filename) rcg = RecordingChannelGroup(file_origin=filename) chan = RecordingChannel(file_origin=filename, index=0, name='Chan1') rcg.channel_indexes = np.array([1]) rcg.channel_names = np.array(['Chan1'], dtype='S') block.recordingchannelgroups.append(rcg) rcg.recordingchannels.append(chan) params = [res['params'][0, 0].flatten() for res in damfile['stim']] values = [res['values'][0, 0].flatten() for res in damfile['stim']] params = [[res1[0] for res1 in res] for res in params] values = [[res1 for res1 in res] for res in values] stims = [dict(zip(param, value)) for param, value in zip(params, values)] fulldam = zip(stimIndexes, timestamps, signals, stims) for stimIndex, timestamp, signal, stim in fulldam: sig = AnalogSignal(signal=signal*pq.mV, t_start=timestamp*pq.d, file_origin=filename, sampling_period=1.*pq.s) segment = Segment(file_origin=filename, index=stimIndex, **stim) segment.analogsignals = [sig] block.segments.append(segment) create_many_to_one_relationship(block) return block
def _group_to_neo(self, nix_group): neo_attrs = self._nix_attr_to_neo(nix_group) neo_segment = Segment(**neo_attrs) neo_segment.rec_datetime = datetime.fromtimestamp( nix_group.created_at ) self._neo_map[nix_group.name] = neo_segment return neo_segment
def test_segment_write(self): block = Block(name=self.rword()) segment = Segment(name=self.rword(), description=self.rword()) block.segments.append(segment) self.write_and_compare([block]) segment.annotate(**self.rdict(2)) self.write_and_compare([block])
def read_segment(self, n_start, n_stop, chlist=None, lazy=False, cascade=True): """Reads a Segment from the file and stores in database. The Segment will contain one AnalogSignal for each channel and will go from n_start to n_stop (in samples). Arguments: n_start : time in samples that the Segment begins n_stop : time in samples that the Segment ends Python indexing is used, so n_stop is not inclusive. Returns a Segment object containing the data. """ # If no channel numbers provided, get all of them if chlist is None: chlist = self.loader.get_neural_channel_numbers() # Conversion from bits to full_range units conversion = self.full_range / 2**(8*self.header.sample_width) # Create the Segment seg = Segment(file_origin=self.filename) t_start = float(n_start) / self.header.f_samp t_stop = float(n_stop) / self.header.f_samp seg.annotate(t_start=t_start) seg.annotate(t_stop=t_stop) # Load data from each channel and store for ch in chlist: if lazy: sig = np.array([]) * conversion else: # Get the data from the loader sig = np.array(\ self.loader._get_channel(ch)[n_start:n_stop]) * conversion # Create an AnalogSignal with the data in it anasig = AnalogSignal(signal=sig, sampling_rate=self.header.f_samp*pq.Hz, t_start=t_start*pq.s, file_origin=self.filename, description='Channel %d from %f to %f' % (ch, t_start, t_stop), channel_index=int(ch)) if lazy: anasig.lazy_shape = n_stop-n_start # Link the signal to the segment seg.analogsignals.append(anasig) # Link the signal to the recording channel from which it came #rc = self.channel_number_to_recording_channel[ch] #rc.analogsignals.append(anasig) return seg
def read_segment(self, lazy=False, cascade=True, gdf_id_list=None, time_unit=pq.ms, t_start=None, t_stop=None, id_column=0, time_column=1, **args): """ Read a Segment which contains SpikeTrain(s) with specified neuron IDs from the GDF data. Parameters ---------- lazy : bool, optional, default: False cascade : bool, optional, default: True gdf_id_list : list or tuple, default: None Can be either list of GDF IDs of which to return SpikeTrain(s) or a tuple specifying the range (includes boundaries [start, stop]) of GDF IDs. Must be specified if the GDF file contains neuron IDs, the default None then raises an error. Specify an empty list [] to retrieve the spike trains of all neurons with at least one spike. time_unit : Quantity (time), optional, default: quantities.ms The time unit of recorded time stamps. t_start : Quantity (time), default: None Start time of SpikeTrain. t_start must be specified, the default None raises an error. t_stop : Quantity (time), default: None Stop time of SpikeTrain. t_stop must be specified, the default None raises an error. id_column : int, optional, default: 0 Column index of neuron IDs. time_column : int, optional, default: 1 Column index of time stamps. Returns ------- seg : Segment The Segment contains one SpikeTrain for each ID in gdf_id_list. """ if isinstance(gdf_id_list, tuple): gdf_id_list = range(gdf_id_list[0], gdf_id_list[1] + 1) # __read_spiketrains() needs a list of IDs if gdf_id_list is None: gdf_id_list = [None] # create an empty Segment and fill in the spike trains seg = Segment() seg.spiketrains = self.__read_spiketrains(gdf_id_list, time_unit, t_start, t_stop, id_column, time_column, **args) return seg
def _read_segment(self, node, parent): attributes = self._get_standard_attributes(node) segment = Segment(**attributes) signals = [] for name, child_node in node['analogsignals'].items(): if "AnalogSignal" in name: signals.append(self._read_analogsignal(child_node, parent=segment)) if signals and self.merge_singles: segment.unmerged_analogsignals = signals # signals will be merged later signals = [] for name, child_node in node['analogsignalarrays'].items(): if "AnalogSignalArray" in name: signals.append(self._read_analogsignalarray(child_node, parent=segment)) segment.analogsignals = signals irr_signals = [] for name, child_node in node['irregularlysampledsignals'].items(): if "IrregularlySampledSignal" in name: irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment)) if irr_signals and self.merge_singles: segment.unmerged_irregularlysampledsignals = irr_signals irr_signals = [] segment.irregularlysampledsignals = irr_signals epochs = [] for name, child_node in node['epochs'].items(): if "Epoch" in name: epochs.append(self._read_epoch(child_node, parent=segment)) if self.merge_singles: epochs = self._merge_data_objects(epochs) for name, child_node in node['epocharrays'].items(): if "EpochArray" in name: epochs.append(self._read_epocharray(child_node, parent=segment)) segment.epochs = epochs events = [] for name, child_node in node['events'].items(): if "Event" in name: events.append(self._read_event(child_node, parent=segment)) if self.merge_singles: events = self._merge_data_objects(events) for name, child_node in node['eventarrays'].items(): if "EventArray" in name: events.append(self._read_eventarray(child_node, parent=segment)) segment.events = events spiketrains = [] for name, child_node in node['spikes'].items(): raise NotImplementedError('Spike objects not yet handled.') for name, child_node in node['spiketrains'].items(): if "SpikeTrain" in name: spiketrains.append(self._read_spiketrain(child_node, parent=segment)) segment.spiketrains = spiketrains segment.block = parent return segment
def create_segment(self, parent=None, name='Segment'): segment = Segment() segment.block = parent self._assign_basic_attributes(segment, name=name) self._assign_datetime_attributes(segment) self._assign_index_attribute(segment) self._create_segment_children(segment) self._assign_annotations(segment) return segment
def read_segment(self, lazy = False, cascade = True, group = 0, series = 0): seg = Segment( name = 'test') if cascade: tree = getbyroute(self.pul.tree,[0,group,series]) for sw,sweep in enumerate(tree['children']): if sw == 0: starttime = pq.Quantity(float(sweep['contents'].swTimer),'s') for ch,channel in enumerate(sweep['children']): sig = self.read_analogsignal(group=group, series=series, sweep=sw, channel = ch) annotations = sweep['contents'].__dict__.keys() annotations.remove('readlist') for a in annotations: d = {a:str(sweep['contents'].__dict__[a])} sig.annotate(**d) sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime seg.analogsignals.append(sig) annotations = tree['contents'].__dict__.keys() annotations.remove('readlist') for a in annotations: d = {a:str(tree['contents'].__dict__[a])} seg.annotate(**d) create_many_to_one_relationship(seg) ### add protocols to signals for sig_index,sig in enumerate(seg.analogsignals): pgf_index = sig.annotations['pgf_index'] st_rec = self.pgf.tree['children'][pgf_index]['contents'] chnls = [ch for ch in self.pgf.tree['children'][pgf_index]['children']] for ch_index, chnl in enumerate(chnls): ep_start = sig.t_start for se_epoch_index, se_epoch in enumerate(chnl['children']): se_rec = se_epoch['contents'] se_duration = pq.Quantity(float(se_rec.seDuration),'s') if not(int(se_rec.seVoltageSource)): se_voltage = pq.Quantity(float(se_rec.seVoltage),'V') else: se_voltage = pq.Quantity(float(chnl['contents'].chHolding),'V') epoch = neo.Epoch(ep_start,se_duration,'protocol_epoch',value=se_voltage,channel_index=ch_index) fully_annototate(chnl,epoch) epoch.annotations['sig_index'] = sig_index ep_start = ep_start + se_duration seg.epochs.append(epoch) return seg
def read_segment(self, lazy=False, cascade=True): data, metadata = self._read_file_contents() annotations = dict((k, metadata.get(k, 'unknown')) for k in ("label", "variable", "first_id", "last_id")) seg = Segment(**annotations) if cascade: if metadata['variable'] == 'spikes': for i in range(metadata['first_index'], metadata['last_index'] + 1): spiketrain = self._extract_spikes(data, metadata, i, lazy) if spiketrain is not None: seg.spiketrains.append(spiketrain) seg.annotate(dt=metadata['dt']) # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal else: signal = self._extract_signals(data, metadata, lazy) if signal is not None: seg.analogsignals.append(signal) seg.create_many_to_one_relationship() return seg
def test__children(self): signal = self.signals[0] segment = Segment(name='seg1') segment.analogsignalarrays = [signal] segment.create_many_to_one_relationship() rcg = RecordingChannelGroup(name='rcg1') rcg.analogsignalarrays = [signal] rcg.create_many_to_one_relationship() self.assertEqual(signal._container_child_objects, ()) self.assertEqual(signal._data_child_objects, ()) self.assertEqual(signal._single_parent_objects, ('Segment', 'RecordingChannelGroup')) self.assertEqual(signal._multi_child_objects, ()) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._child_properties, ()) self.assertEqual(signal._single_child_objects, ()) self.assertEqual(signal._container_child_containers, ()) self.assertEqual(signal._data_child_containers, ()) self.assertEqual(signal._single_child_containers, ()) self.assertEqual(signal._single_parent_containers, ('segment', 'recordingchannelgroup')) self.assertEqual(signal._multi_child_containers, ()) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._child_objects, ()) self.assertEqual(signal._child_containers, ()) self.assertEqual(signal._parent_objects, ('Segment', 'RecordingChannelGroup')) self.assertEqual(signal._parent_containers, ('segment', 'recordingchannelgroup')) self.assertEqual(signal.children, ()) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, 'seg1') self.assertEqual(signal.parents[1].name, 'rcg1') signal.create_many_to_one_relationship() signal.create_many_to_many_relationship() signal.create_relationship() assert_neo_object_is_compliant(signal)
def test__children(self): params = {'testarg2': 'yes', 'testarg3': True} evta = EventArray([1.1, 1.5, 1.7]*pq.ms, labels=np.array(['test event 1', 'test event 2', 'test event 3'], dtype='S'), name='test', description='tester', file_origin='test.file', testarg1=1, **params) evta.annotate(testarg1=1.1, testarg0=[1, 2, 3]) assert_neo_object_is_compliant(evta) segment = Segment(name='seg1') segment.eventarrays = [evta] segment.create_many_to_one_relationship() self.assertEqual(evta._container_child_objects, ()) self.assertEqual(evta._data_child_objects, ()) self.assertEqual(evta._single_parent_objects, ('Segment',)) self.assertEqual(evta._multi_child_objects, ()) self.assertEqual(evta._multi_parent_objects, ()) self.assertEqual(evta._child_properties, ()) self.assertEqual(evta._single_child_objects, ()) self.assertEqual(evta._container_child_containers, ()) self.assertEqual(evta._data_child_containers, ()) self.assertEqual(evta._single_child_containers, ()) self.assertEqual(evta._single_parent_containers, ('segment',)) self.assertEqual(evta._multi_child_containers, ()) self.assertEqual(evta._multi_parent_containers, ()) self.assertEqual(evta._child_objects, ()) self.assertEqual(evta._child_containers, ()) self.assertEqual(evta._parent_objects, ('Segment',)) self.assertEqual(evta._parent_containers, ('segment',)) self.assertEqual(evta.children, ()) self.assertEqual(len(evta.parents), 1) self.assertEqual(evta.parents[0].name, 'seg1') evta.create_many_to_one_relationship() evta.create_many_to_many_relationship() evta.create_relationship() assert_neo_object_is_compliant(evta)
def read_block(self, lazy=False, cascade=True, channel_index=None ): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment( file_origin=self._filename ) blk.segments += [ seg ] if channel_index: if type(channel_index) is int: channel_index = [ channel_index ] if type(channel_index) is list: channel_index = np.array( channel_index ) else: channel_index = np.arange(0,self._attrs['shape'][1]) rcg = RecordingChannelGroup(name='all channels', channel_indexes=channel_index) blk.recordingchannelgroups.append(rcg) for idx in channel_index: # read nested analosignal ana = self.read_analogsignal(channel_index=idx, lazy=lazy, cascade=cascade, ) chan = RecordingChannel(index=int(idx)) seg.analogsignals += [ ana ] chan.analogsignals += [ ana ] rcg.recordingchannels.append(chan) seg.duration = (self._attrs['shape'][0] / self._attrs['kwik']['sample_rate']) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def test_read_nev_data(self): t_start, t_stop = 0 * pq.s, 1000 * pq.s nio = NeuralynxIO(self.sn, use_cache='never') seg = Segment('testsegment') filename = 'Events' nio.read_nev(filename + '.nev', seg, t_start=t_start, t_stop=t_stop) timestamps = [] nttls = [] names = [] event_ids = [] with open(self.pd + '/%s.txt' % filename) as datafile: for i, line in enumerate(datafile): line = line.strip('\xef\xbb\xbf') entries = line.split('\t') nttls.append(int(entries[5])) timestamps.append(int(entries[3])) names.append(entries[10].rstrip('\r\n')) event_ids.append(int(entries[4])) timestamps = (np.array(timestamps) * pq.microsecond - nio.parameters_global['t_start']) # masking only requested spikes mask = np.where(timestamps < t_stop)[0] # return if no event fits criteria if len(mask) == 0: return timestamps = timestamps[mask] nttls = np.asarray(nttls)[mask] names = np.asarray(names)[mask] event_ids = np.asarray(event_ids)[mask] for i in range(len(timestamps)): events = seg.filter({'nttl': nttls[i]}, objects=Event) events = [e for e in events if (e.annotations['marker_id'] == event_ids[i] and e.labels == names[i])] self.assertTrue(len(events) == 1) self.assertTrue(timestamps[i] in events[0].times)
def create_all_annotated(cls): times = cls.rquant(1, pq.s) signal = cls.rquant(1, pq.V) blk = Block() blk.annotate(**cls.rdict(3)) seg = Segment() seg.annotate(**cls.rdict(4)) blk.segments.append(seg) asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz) asig.annotate(**cls.rdict(2)) seg.analogsignals.append(asig) isig = IrregularlySampledSignal(times=times, signal=signal, time_units=pq.s) isig.annotate(**cls.rdict(2)) seg.irregularlysampledsignals.append(isig) epoch = Epoch(times=times, durations=times) epoch.annotate(**cls.rdict(4)) seg.epochs.append(epoch) event = Event(times=times) event.annotate(**cls.rdict(4)) seg.events.append(event) spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s) d = cls.rdict(6) d["quantity"] = pq.Quantity(10, "mV") d["qarray"] = pq.Quantity(range(10), "mA") spiketrain.annotate(**d) seg.spiketrains.append(spiketrain) chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10]) chx.annotate(**cls.rdict(5)) blk.channel_indexes.append(chx) unit = Unit() unit.annotate(**cls.rdict(2)) chx.units.append(unit) return blk
def test__children(self): params = {'testarg2': 'yes', 'testarg3': True} epc = Epoch(1.5*pq.ms, duration=20*pq.ns, label='test epoch', name='test', description='tester', file_origin='test.file', testarg1=1, **params) epc.annotate(testarg1=1.1, testarg0=[1, 2, 3]) assert_neo_object_is_compliant(epc) segment = Segment(name='seg1') segment.epochs = [epc] segment.create_many_to_one_relationship() self.assertEqual(epc._container_child_objects, ()) self.assertEqual(epc._data_child_objects, ()) self.assertEqual(epc._single_parent_objects, ('Segment',)) self.assertEqual(epc._multi_child_objects, ()) self.assertEqual(epc._multi_parent_objects, ()) self.assertEqual(epc._child_properties, ()) self.assertEqual(epc._single_child_objects, ()) self.assertEqual(epc._container_child_containers, ()) self.assertEqual(epc._data_child_containers, ()) self.assertEqual(epc._single_child_containers, ()) self.assertEqual(epc._single_parent_containers, ('segment',)) self.assertEqual(epc._multi_child_containers, ()) self.assertEqual(epc._multi_parent_containers, ()) self.assertEqual(epc._child_objects, ()) self.assertEqual(epc._child_containers, ()) self.assertEqual(epc._parent_objects, ('Segment',)) self.assertEqual(epc._parent_containers, ('segment',)) self.assertEqual(epc.children, ()) self.assertEqual(len(epc.parents), 1) self.assertEqual(epc.parents[0].name, 'seg1') epc.create_many_to_one_relationship() epc.create_many_to_many_relationship() epc.create_relationship() assert_neo_object_is_compliant(epc)
def test__children(self): signal = self.signals[0] segment = Segment(name="seg1") segment.analogsignals = [signal] segment.create_many_to_one_relationship() rchan = RecordingChannel(name="rchan1") rchan.analogsignals = [signal] rchan.create_many_to_one_relationship() self.assertEqual(signal._container_child_objects, ()) self.assertEqual(signal._data_child_objects, ()) self.assertEqual(signal._single_parent_objects, ("Segment", "RecordingChannel")) self.assertEqual(signal._multi_child_objects, ()) self.assertEqual(signal._multi_parent_objects, ()) self.assertEqual(signal._child_properties, ()) self.assertEqual(signal._single_child_objects, ()) self.assertEqual(signal._container_child_containers, ()) self.assertEqual(signal._data_child_containers, ()) self.assertEqual(signal._single_child_containers, ()) self.assertEqual(signal._single_parent_containers, ("segment", "recordingchannel")) self.assertEqual(signal._multi_child_containers, ()) self.assertEqual(signal._multi_parent_containers, ()) self.assertEqual(signal._child_objects, ()) self.assertEqual(signal._child_containers, ()) self.assertEqual(signal._parent_objects, ("Segment", "RecordingChannel")) self.assertEqual(signal._parent_containers, ("segment", "recordingchannel")) self.assertEqual(signal.children, ()) self.assertEqual(len(signal.parents), 2) self.assertEqual(signal.parents[0].name, "seg1") self.assertEqual(signal.parents[1].name, "rchan1") signal.create_many_to_one_relationship() signal.create_many_to_many_relationship() signal.create_relationship() assert_neo_object_is_compliant(signal)
def read_segment(self, lazy = False, cascade = True, delimiter = '\t', t_start = 0.*pq.s, unit = pq.s, ): """ Arguments: delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';' t_start : time start of all spiketrain 0 by default unit : unit of spike times, can be a str or directly a Quantities """ unit = pq.Quantity(1, unit) seg = Segment(file_origin = os.path.basename(self.filename)) if not cascade: return seg f = open(self.filename, 'Ur') for i,line in enumerate(f) : alldata = line[:-1].split(delimiter) if alldata[-1] == '': alldata = alldata[:-1] if alldata[0] == '': alldata = alldata[1:] if lazy: spike_times = [ ] t_stop = t_start else: spike_times = np.array(alldata).astype('f') t_stop = spike_times.max()*unit sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop) if lazy: sptr.lazy_shape = len(alldata) sptr.annotate(channel_index = i) seg.spiketrains.append(sptr) f.close() seg.create_many_to_one_relationship() return seg
def proc_src_comments(srcfile, filename): '''Get the comments in an src file that has been#!N processed by the official matlab function. See proc_src for details''' comm_seg = Segment(name='Comments', file_origin=filename) commentarray = srcfile['comments'].flatten()[0] senders = [res[0] for res in commentarray['sender'].flatten()] texts = [res[0] for res in commentarray['text'].flatten()] timeStamps = [res[0, 0] for res in commentarray['timeStamp'].flatten()] timeStamps = np.array(timeStamps, dtype=np.float32) t_start = timeStamps.min() timeStamps = pq.Quantity(timeStamps - t_start, units=pq.d).rescale(pq.s) texts = np.array(texts, dtype='S') senders = np.array(senders, dtype='S') t_start = brainwaresrcio.convert_brainwaresrc_timestamp(t_start.tolist()) comments = Event(times=timeStamps, labels=texts, senders=senders) comm_seg.events = [comments] comm_seg.rec_datetime = t_start return comm_seg
def test__children(self): segment = Segment(name="seg1") segment.spikes = [self.spike1] segment.create_many_to_one_relationship() unit = Unit(name="unit1") unit.spikes = [self.spike1] unit.create_many_to_one_relationship() self.assertEqual(self.spike1._container_child_objects, ()) self.assertEqual(self.spike1._data_child_objects, ()) self.assertEqual(self.spike1._single_parent_objects, ("Segment", "Unit")) self.assertEqual(self.spike1._multi_child_objects, ()) self.assertEqual(self.spike1._multi_parent_objects, ()) self.assertEqual(self.spike1._child_properties, ()) self.assertEqual(self.spike1._single_child_objects, ()) self.assertEqual(self.spike1._container_child_containers, ()) self.assertEqual(self.spike1._data_child_containers, ()) self.assertEqual(self.spike1._single_child_containers, ()) self.assertEqual(self.spike1._single_parent_containers, ("segment", "unit")) self.assertEqual(self.spike1._multi_child_containers, ()) self.assertEqual(self.spike1._multi_parent_containers, ()) self.assertEqual(self.spike1._child_objects, ()) self.assertEqual(self.spike1._child_containers, ()) self.assertEqual(self.spike1._parent_objects, ("Segment", "Unit")) self.assertEqual(self.spike1._parent_containers, ("segment", "unit")) self.assertEqual(self.spike1.children, ()) self.assertEqual(len(self.spike1.parents), 2) self.assertEqual(self.spike1.parents[0].name, "seg1") self.assertEqual(self.spike1.parents[1].name, "unit1") self.spike1.create_many_to_one_relationship() self.spike1.create_many_to_many_relationship() self.spike1.create_relationship() assert_neo_object_is_compliant(self.spike1)
def test_read_ncs_data(self): t_start, t_stop = 0, 500 * 512 # in samples nio = NeuralynxIO(self.sn, use_cache='never') seg = Segment('testsegment') for el_id, el_dict in nio.parameters_ncs.iteritems(): filepath = nio.parameters_ncs[el_id]['recording_file_name'] filename = filepath.split('/')[-1].split('\\')[-1].split('.')[0] nio.read_ncs(filename, seg, t_start=t_start, t_stop=t_stop) anasig = seg.filter({'electrode_id': el_id}, objects=AnalogSignal)[0] target_data = np.zeros((16679, 512)) with open(self.pd + '/%s.txt' % filename) as datafile: for i, line in enumerate(datafile): line = line.strip('\xef\xbb\xbf') entries = line.split() target_data[i, :] = np.asarray(entries[4:]) target_data = target_data.reshape((-1, 1)) * el_dict['ADBitVolts'] np.testing.assert_array_equal(target_data[:len(anasig)], anasig.magnitude)
def read_segment(self, import_neuroshare_segment = True, lazy=False, cascade=True): """ Arguments: import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all. """ seg = Segment( file_origin = os.path.basename(self.filename), ) if sys.platform.startswith('win'): neuroshare = ctypes.windll.LoadLibrary(self.dllname) elif sys.platform.startswith('linux'): neuroshare = ctypes.cdll.LoadLibrary(self.dllname) neuroshare = DllWithError(neuroshare) #elif sys.platform.startswith('darwin'): # API version info = ns_LIBRARYINFO() neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info)) seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin)) if not cascade: return seg # open file hFile = ctypes.c_uint32(0) neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile)) fileinfo = ns_FILEINFO() neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo)) # read all entities for dwEntityID in range(fileinfo.dwEntityCount): entityInfo = ns_ENTITYINFO() neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo)) # EVENT if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT': pEventInfo = ns_EVENTINFO() neuroshare.ns_GetEventInfo ( hFile, dwEntityID, ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo)) if pEventInfo.dwEventType == 0: #TEXT pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 1:#CVS pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 2:# 8bit pData = ctypes.c_byte(0) elif pEventInfo.dwEventType == 3:# 16bit pData = ctypes.c_int16(0) elif pEventInfo.dwEventType == 4:# 32bit pData = ctypes.c_int32(0) pdTimeStamp = ctypes.c_double(0.) pdwDataRetSize = ctypes.c_uint32(0) ea = Event(name = str(entityInfo.szEntityLabel),) if not lazy: times = [ ] labels = [ ] for dwIndex in range(entityInfo.dwItemCount ): neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), ctypes.byref(pData), ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) ) times.append(pdTimeStamp.value) labels.append(str(pData.value)) ea.times = times*pq.s ea.labels = np.array(labels, dtype ='S') else : ea.lazy_shape = entityInfo.dwItemCount seg.eventarrays.append(ea) # analog if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG': pAnalogInfo = ns_ANALOGINFO() neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) ) dwIndexCount = entityInfo.dwItemCount if lazy: signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits) else: pdwContCount = ctypes.c_uint32(0) pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64') total_read = 0 while total_read< entityInfo.dwItemCount: dwStartIndex = ctypes.c_uint32(total_read) dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read) neuroshare.ns_GetAnalogData( hFile, dwEntityID, dwStartIndex, dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double))) total_read += pdwContCount.value signal = pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False) #t_start dwIndex = 0 pdTime = ctypes.c_double(0) neuroshare.ns_GetTimeByIndex( hFile, dwEntityID, dwIndex, ctypes.byref(pdTime)) anaSig = AnalogSignal(signal, sampling_rate = pAnalogInfo.dSampleRate*pq.Hz, t_start = pdTime.value * pq.s, name = str(entityInfo.szEntityLabel), ) anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo)) if lazy: anaSig.lazy_shape = entityInfo.dwItemCount seg.analogsignals.append( anaSig ) #segment if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment: pdwSegmentInfo = ns_SEGMENTINFO() if not str(entityInfo.szEntityLabel).startswith('spks'): continue neuroshare.ns_GetSegmentInfo( hFile, dwEntityID, ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) ) nsource = pdwSegmentInfo.dwSourceCount pszMsgBuffer = ctypes.create_string_buffer(" "*256) neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256) for dwSourceID in range(pdwSegmentInfo.dwSourceCount) : pSourceInfo = ns_SEGSOURCEINFO() neuroshare.ns_GetSegmentSourceInfo( hFile, dwEntityID, dwSourceID, ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) ) if lazy: sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s) sptr.lazy_shape = entityInfo.dwItemCount else: pdTimeStamp = ctypes.c_double(0.) dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount pData = np.zeros( (dwDataBufferSize), dtype = 'float64') pdwSampleCount = ctypes.c_uint32(0) pdwUnitID= ctypes.c_uint32(0) nsample = int(dwDataBufferSize) times = np.empty( (entityInfo.dwItemCount), dtype = 'f') waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f') for dwIndex in range(entityInfo.dwItemCount ): neuroshare.ns_GetSegmentData ( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), dwDataBufferSize * 8, ctypes.byref(pdwSampleCount), ctypes.byref(pdwUnitID ) ) times[dwIndex] = pdTimeStamp.value waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose() sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False), t_stop = times.max(), waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ), left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s, sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz, name = str(entityInfo.szEntityLabel), ) seg.spiketrains.append(sptr) # neuralevent if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT': pNeuralInfo = ns_NEURALINFO() neuroshare.ns_GetNeuralInfo ( hFile, dwEntityID, ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo)) if lazy: times = [ ]*pq.s t_stop = 0*pq.s else: pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64') dwStartIndex = 0 dwIndexCount = entityInfo.dwItemCount neuroshare.ns_GetNeuralData( hFile, dwEntityID, dwStartIndex, dwIndexCount, pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) times = pData*pq.s t_stop = times.max() sptr = SpikeTrain(times, t_stop =t_stop, name = str(entityInfo.szEntityLabel),) if lazy: sptr.lazy_shape = entityInfo.dwItemCount seg.spiketrains.append(sptr) # close neuroshare.ns_CloseFile(hFile) seg.create_many_to_one_relationship() return seg