def _read_recordingchannelgroup(self, node, parent): # todo: handle Units attributes = self._get_standard_attributes(node) channel_indexes = node["channel_indexes"].value channel_names = node["channel_names"].value if channel_indexes.size: if len(node['recordingchannels']): raise MergeError( "Cannot handle a RecordingChannelGroup which both has a " "'channel_indexes' attribute and contains " "RecordingChannel objects") raise NotImplementedError( "todo") # need to handle node['analogsignalarrays'] else: channels = [] for name, child_node in node['recordingchannels'].items(): if "RecordingChannel" in name: channels.append(self._read_recordingchannel(child_node)) channel_index = ChannelIndex(None, **attributes) channel_index._channels = channels # construction of the index is deferred until we have processed # all RecordingChannelGroup nodes units = [] for name, child_node in node['units'].items(): if "Unit" in name: units.append( self._read_unit(child_node, parent=channel_index)) channel_index.units = units channel_index.block = parent return channel_index
def _read_recordingchannelgroup(self, node, parent): # todo: handle Units attributes = self._get_standard_attributes(node) channel_indexes = node["channel_indexes"].value channel_names = node["channel_names"].value if channel_indexes.size: if len(node['recordingchannels']) : raise MergeError("Cannot handle a RecordingChannelGroup which both has a " "'channel_indexes' attribute and contains " "RecordingChannel objects") raise NotImplementedError("todo") # need to handle node['analogsignalarrays'] else: channels = [] for name, child_node in node['recordingchannels'].items(): if "RecordingChannel" in name: channels.append(self._read_recordingchannel(child_node)) channel_index = ChannelIndex(None, **attributes) channel_index._channels = channels # construction of the index is deferred until we have processed # all RecordingChannelGroup nodes units = [] for name, child_node in node['units'].items(): if "Unit" in name: units.append(self._read_unit(child_node, parent=channel_index)) channel_index.units = units channel_index.block = parent return channel_index
def read_block(self, lazy=False, cascade=True, t_starts=None, t_stops=None, electrode_list=None, unit_list=None, analogsignals=True, events=False, waveforms=False): """ Reads data in a requested time window and returns block with single segment containing these data. Arguments: lazy : Postpone actual reading of the data files. Default 'False'. cascade : Do not postpone reading subsequent neo types (segments). Default 'True'. t_starts : list of quantities or quantity describing the start of the requested time window to load. If None or [None] the complete session is loaded. Default 'None'. t_stops : list of quantities or quantity describing the end of the requested time window to load. Has to contain the same number of values as t_starts. If None or [None] the complete session is loaded. Default 'None'. electrode_list : list of integers containing the IDs of the requested units to load. If [] or None all available units will be loaded. If False, no unit will be loaded. Default: None. unit_list : list of integers containing the IDs of the requested units to load. If [] all available units will be loaded. Default: None. events : Loading events. If True all available events in the given time window will be read. Default: False. load_waveforms : Load waveform for spikes in the requested time window. Default: False. Returns: Block object containing the requested data in neo structures. Usage: from neo import io import quantities as pq import matplotlib.pyplot as plt session_folder = '../Data/2014-07-24_10-31-02' NIO = io.NeuralynxIO(session_folder,print_diagnostic = True) block = NIO.read_block(lazy = False, cascade = True, t_starts = 0.1*pq.s, t_stops = 0.2*pq.s, channel_list = [1,5,10], unit_list = [1,2,3], events = True, load_waveforms = True) """ # Load neo block block = neo.io.NeuralynxIO.read_block(self, lazy=lazy, cascade=cascade, t_starts=t_starts, t_stops=t_stops, electrode_list=electrode_list, unit_list=unit_list, analogsignals=analogsignals, events=events, waveforms=waveforms) # TODO: odML <-> data files consistency checks? Low priority # Add annotations of odML meta data info if self.odML_avail: """ TODO: * Add electroporation area to recording channel group """ area_dict = self.get_electrodes_by_area() electroporated_areas = self.get_electroporation() channel_indexes = [ r for r in block.channel_indexes if r.name == 'all channels' ] for area, channels in area_dict.items(): electroporated, expression = False, None if area in electroporated_areas.keys(): electroporated = True expression = electroporated_areas[area] chidx = ChannelIndex( name='%s channels' % area, channel_indexes=channels, channel_names=['channel %i' % i for i in channels], electroporated=electroporated, expression=expression) chidx.block = block block.channel_indexes.append(chidx) # raise NotImplementedError('neo block annotation using odmls is not implemented yet.') # ########### Annotate information of 'Recording' Section ############ # # Annotate Amplifier Information # amp_properties = ['LowpassCutoff','HighpassCutoff','SamplingRate'] # ff = lambda x: x.name in amp_properties and 'Amplifier' in x.parent.get_path() # pobj = {p.name:p.value.data for p in self.odML_doc.iterproperties(filter_func=ff)} # block.annotate(amplifier= pobj) # # # Consistency Check with Analogsignal Sampling Rate # if any([pobj['SamplingRate'] != asa.annotations['SamplingFrequency'] # for asa in block.segments[0].analogsignalarrays]): # raise ValueError('Inconsistent sampling rates detected in odml' # ' and original data files (%s / %s)'%( # pobj['SamplingRate'], # [asa.annotations['SamplingFrequency'] for asa in # block.segments[0].analogsignalarray])) # # # Annotate different Recording Areas # # Extracting Recording Area sections # ff = lambda x: 'RecordingArea' in x.name and 'Probe' in x.sections # recording_secs = [p for p in self.odML_doc.itersections(filter_func=ff)] # rec_properties = ['Hemisphere','Probe ID','Channels', # 'SpikingChannels','BrokenChannels','Quality'] # ff2 = lambda x: x.name in rec_properties # area_dict = {} # for recording_sec in recording_secs: # # extracting specific properties of each recording area section # area_dict[recording_sec.name] = {a.name:a.value.data for a in # recording_sec.iterproperties(filter_func=ff2)} # # adding two 'area' properties manually as they have the same name # area_dict[recording_sec.name]['RecordingArea'] = \ # recording_sec.properties['Area'].value.data # area_dict[recording_sec.name]['ReferenceArea'] = \ # recording_sec.get_property_by_path('Reference:Area').value.data # block.annotate(recordingareas=area_dict) return block