def read_block(self, lazy=False, cascade=True, channel_index=None): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment(file_origin=self._filename) blk.segments += [seg] if channel_index: if type(channel_index) is int: channel_index = [channel_index] if type(channel_index) is list: channel_index = np.array(channel_index) else: channel_index = np.arange(0, self._attrs["shape"][1]) chx = ChannelIndex(name="all channels", index=channel_index) blk.channel_indexes.append(chx) ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade) ana.channel_index = chx seg.duration = (self._attrs["shape"][0] / self._attrs["kwik"]["sample_rate"]) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def read_block(self, lazy=False, cascade=True): """ """ blk = Block() if cascade: seg = Segment(file_origin=self._absolute_filename) blk.channel_indexes = self._channel_indexes blk.segments += [seg] seg.analogsignals = self.read_analogsignal(lazy=lazy, cascade=cascade) try: seg.irregularlysampledsignals = self.read_tracking() except Exception as e: print('Warning: unable to read tracking') print(e) seg.spiketrains = self.read_spiketrain() # TODO Call all other read functions seg.duration = self._duration # TODO May need to "populate_RecordingChannel" # spiketrain = self.read_spiketrain() # seg.spiketrains.append() blk.create_many_to_one_relationship() return blk
def read_block(self, lazy=False, cascade=True, channel_index=None): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment(file_origin=self._filename) blk.segments += [seg] if channel_index: if type(channel_index) is int: channel_index = [channel_index] if type(channel_index) is list: channel_index = np.array(channel_index) else: channel_index = np.arange(0, self._attrs['shape'][1]) chx = ChannelIndex(name='all channels', index=channel_index) blk.channel_indexes.append(chx) ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade) ana.channel_index = chx seg.duration = (self._attrs['shape'][0] / self._attrs['kwik']['sample_rate']) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def read_block(self, lazy=False, cascade=True, channel_index=None ): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment( file_origin=self._filename ) blk.segments += [ seg ] if channel_index: if type(channel_index) is int: channel_index = [ channel_index ] if type(channel_index) is list: channel_index = np.array( channel_index ) else: channel_index = np.arange(0,self._attrs['shape'][1]) rcg = RecordingChannelGroup(name='all channels', channel_indexes=channel_index) blk.recordingchannelgroups.append(rcg) for idx in channel_index: # read nested analosignal ana = self.read_analogsignal(channel_index=idx, lazy=lazy, cascade=cascade, ) chan = RecordingChannel(index=int(idx)) seg.analogsignals += [ ana ] chan.analogsignals += [ ana ] rcg.recordingchannels.append(chan) seg.duration = (self._attrs['shape'][0] / self._attrs['kwik']['sample_rate']) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def read_block(self, lazy=False, cascade=True, channel_index=None): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) """ blk = Block() if cascade: seg = Segment(file_origin=self._filename) blk.segments += [seg] if channel_index: if type(channel_index) is int: channel_index = [channel_index] if type(channel_index) is list: channel_index = np.array(channel_index) else: channel_index = np.arange(0, self._attrs['shape'][1]) rcg = RecordingChannelGroup(name='all channels', channel_indexes=channel_index) blk.recordingchannelgroups.append(rcg) for idx in channel_index: # read nested analosignal ana = self.read_analogsignal( channel_index=idx, lazy=lazy, cascade=cascade, ) chan = RecordingChannel(index=int(idx)) seg.analogsignals += [ana] chan.analogsignals += [ana] rcg.recordingchannels.append(chan) seg.duration = (self._attrs['shape'][0] / self._attrs['kwik']['sample_rate']) * pq.s # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def read_block( self, lazy=False, cascade=True, channel_index=None, tracking=False, tracking_ttl_chan=None, stim_ttl_chan=None, ): """ Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s) respectively # TODO multiple stimulus channels """ blk = Block() if cascade: seg = Segment(file_origin=self._path) blk.segments += [seg] # if channel_index: # if type(channel_index) is int: channel_index = [ channel_index ] # if type(channel_index) is list: channel_index = np.array( channel_index ) # else: # channel_index = np.arange(0,self._attrs['shape'][1]) # # rcg = RecordingChannelGroup(name='all channels', # channel_indexes=channel_index) # blk.recordingchannelgroups.append(rcg) # # for idx in channel_index: # # read nested analosignal # ana = self.read_analogsignal(channel_index=idx, # lazy=lazy, # cascade=cascade, # ) # chan = RecordingChannel(index=int(idx)) # seg.analogsignals += [ ana ] # chan.analogsignals += [ ana ] # rcg.recordingchannels.append(chan) seg.duration = (self._attrs['shape'][0] / self._attrs['kwe']['sample_rate']) * pq.s if lazy: pass else: if tracking: if tracking_ttl_chan is not None: events, irsigs = self._get_tracking( channel=tracking_ttl_chan, conversion=1) seg.Events += [events] else: irsigs = self._get_tracking(channel=tracking_ttl_chan, conversion=1) for irsig in irsigs: seg.irregularlysampledsignals += [irsig] if stim_ttl_chan is not None: try: for chan in stim_ttl_chan: epo = self._get_stim(channel=chan) seg.epochs += [epo] except: epo = self._get_stim(channel=stim_ttl_chan) seg.epochs += [epo] # neo.tools.populate_RecordingChannel(blk) blk.create_many_to_one_relationship() return blk
def read_block(self, lazy=False, cascade=True, get_waveforms=True, cluster_metadata='all', raw_data_units='uV', get_raw_data=False, ): """ Reads a block with segments and channel_indexes Parameters: get_waveforms: bool, default = False Wether or not to get the waveforms get_raw_data: bool, default = False Wether or not to get the raw traces raw_data_units: str, default = "uV" SI units of the raw trace according to voltage_gain given to klusta cluster_metadata: str, default = "all" Which clusters to load, possibilities are "noise", "unsorted", "good", "all", if all is selected noise is omitted. """ assert isinstance(cluster_metadata, str) blk = Block() if cascade: seg = Segment(file_origin=self.filename) blk.segments += [seg] for model in self.models: group_id = model.channel_group group_meta = {'group_id': group_id} group_meta.update(model.metadata) chx = ChannelIndex(name='channel group #{}'.format(group_id), index=model.channels, **group_meta) blk.channel_indexes.append(chx) clusters = model.spike_clusters for cluster_id in model.cluster_ids: meta = model.cluster_metadata[cluster_id] if cluster_metadata == 'all': if meta == 'noise': continue elif cluster_metadata != meta: continue sptr = self.read_spiketrain(cluster_id=cluster_id, model=model, lazy=lazy, cascade=cascade, get_waveforms=get_waveforms) sptr.annotations.update({'cluster_metadata': meta, 'group_id': model.channel_group}) sptr.channel_index = chx unit = Unit() unit.spiketrains.append(sptr) chx.units.append(unit) unit.channel_index = chx seg.spiketrains.append(sptr) if get_raw_data: ana = self.read_analogsignal(model, raw_data_units, lazy, cascade) ana.channel_index = chx seg.analogsignals.append(ana) seg.duration = model.duration * pq.s blk.create_many_to_one_relationship() return blk
def read_block( self, lazy=False, get_waveforms=True, cluster_group=None, raw_data_units='uV', get_raw_data=False, ): """ Reads a block with segments and channel_indexes Parameters: get_waveforms: bool, default = False Wether or not to get the waveforms get_raw_data: bool, default = False Wether or not to get the raw traces raw_data_units: str, default = "uV" SI units of the raw trace according to voltage_gain given to klusta cluster_group: str, default = None Which clusters to load, possibilities are "noise", "unsorted", "good", if None all is loaded. """ assert not lazy, 'Do not support lazy' blk = Block() seg = Segment(file_origin=self.filename) blk.segments += [seg] for model in self.models: group_id = model.channel_group group_meta = {'group_id': group_id} group_meta.update(model.metadata) chx = ChannelIndex(name='channel group #{}'.format(group_id), index=model.channels, **group_meta) blk.channel_indexes.append(chx) clusters = model.spike_clusters for cluster_id in model.cluster_ids: meta = model.cluster_metadata[cluster_id] if cluster_group is None: pass elif cluster_group != meta: continue sptr = self.read_spiketrain(cluster_id=cluster_id, model=model, get_waveforms=get_waveforms, raw_data_units=raw_data_units) sptr.annotations.update({ 'cluster_group': meta, 'group_id': model.channel_group }) sptr.channel_index = chx unit = Unit(cluster_group=meta, group_id=model.channel_group, name='unit #{}'.format(cluster_id)) unit.spiketrains.append(sptr) chx.units.append(unit) unit.channel_index = chx seg.spiketrains.append(sptr) if get_raw_data: ana = self.read_analogsignal(model, units=raw_data_units) ana.channel_index = chx seg.analogsignals.append(ana) seg.duration = model.duration * pq.s blk.create_many_to_one_relationship() return blk