def __init__(self, datfile, samplerate, numchan, dtype, recording_channels=None, frames_first=True, geom=None, offset=0, gain=None): RecordingExtractor.__init__(self) self._datfile = Path(datfile) self._frame_first = frames_first self._dtype = str(dtype) self._timeseries = read_binary(self._datfile, numchan, dtype, frames_first, offset) self._samplerate = float(samplerate) self._gain = gain self._geom = geom if recording_channels is not None: assert len(recording_channels) == self._timeseries.shape[0], \ 'Provided recording channels have the wrong length' self._channels = recording_channels else: self._channels = list(range(self._timeseries.shape[0])) if geom is not None: for m in range(self._timeseries.shape[0]): self.set_channel_property(m, 'location', self._geom[m, :])
def __init__(self, npx_file, x_pitch=None, y_pitch=None): RecordingExtractor.__init__(self) self._npxfile = Path(npx_file) numchan = 385 dtype = 'int16' root = str(self._npxfile.stem).split('.')[0] # find metafile in same folder metafile = [ x for x in self._npxfile.parent.iterdir() if 'meta' in str(x) and root in str(x) and 'ap' in str(x) ] if len(metafile) == 0: raise Exception( "'meta' file for ap traces should be in the same folder.") else: metafile = metafile[0] tot_chan, ap_chan, samplerate, locations = _parse_spikeglx_metafile( metafile, x_pitch, y_pitch) frames_first = True self._timeseries = read_binary(self._npxfile, tot_chan, dtype, frames_first, offset=0) self._samplerate = float(samplerate) if ap_chan < tot_chan: self._timeseries = self._timeseries[:ap_chan] self._channels = list(range(self._timeseries.shape[0])) if len(locations) > 0: for m in range(self._timeseries.shape[0]): self.set_channel_property(m, 'location', locations[m])
def __init__(self, file_path, sampling_frequency, numchan, dtype, recording_channels=None, time_axis=0, geom=None, offset=0, gain=None): RecordingExtractor.__init__(self) self._datfile = Path(file_path) self._time_axis = time_axis self._dtype = str(dtype) self._timeseries = read_binary(self._datfile, numchan, dtype, time_axis, offset) self._sampling_frequency = float(sampling_frequency) self._gain = gain self._geom = geom if recording_channels is not None: assert len(recording_channels) == self._timeseries.shape[0], \ 'Provided recording channels have the wrong length' self._channels = recording_channels else: self._channels = list(range(self._timeseries.shape[0])) if geom is not None: for m in range(self._timeseries.shape[0]): self.set_channel_property(m, 'location', self._geom[m, :])
def __init__(self, file_path, sampling_frequency, numchan, dtype, recording_channels=None, time_axis=0, geom=None, offset=0, gain=None, is_filtered=None): RecordingExtractor.__init__(self) self._datfile = Path(file_path) self._time_axis = time_axis self._dtype = str(dtype) self._sampling_frequency = float(sampling_frequency) self._gain = gain self._numchan = numchan self._geom = geom self._offset = offset self._timeseries = read_binary(self._datfile, numchan, dtype, time_axis, offset) # keep track of filter status when dumping if is_filtered is not None: self.is_filtered = is_filtered else: self.is_filtered = False if recording_channels is not None: assert len(recording_channels) == self._timeseries.shape[0], \ 'Provided recording channels have the wrong length' self._channels = recording_channels else: self._channels = list(range(self._timeseries.shape[0])) if geom is not None: for idx, channel in enumerate(self._channels): self.set_channel_property(channel, 'location', self._geom[idx, :]) if 'numpy' in str(dtype): dtype_str = str(dtype).replace("<class '", "").replace("'>", "") # drop 'numpy dtype_str = dtype_str.split('.')[1] else: dtype_str = str(dtype) self._kwargs = { 'file_path': str(Path(file_path).absolute()), 'sampling_frequency': sampling_frequency, 'numchan': numchan, 'dtype': dtype_str, 'recording_channels': recording_channels, 'time_axis': time_axis, 'geom': geom, 'offset': offset, 'gain': gain, 'is_filtered': is_filtered }
def __init__(self, file_path: PathType, sampling_frequency: float, numchan: int, dtype: DtypeType, time_axis: int = 0, recording_channels: Optional[list] = None, geom: Optional[ArrayType] = None, offset: Optional[float] = 0, gain: Optional[float] = None, gain_first: bool = True, is_filtered: Optional[bool] = None): RecordingExtractor.__init__(self) self._datfile = Path(file_path) self._time_axis = time_axis self._dtype = str(dtype) self._sampling_frequency = float(sampling_frequency) self._gain = gain self._numchan = numchan self._geom = geom self._offset = offset self._gain_first = gain_first self._timeseries = read_binary(self._datfile, numchan, dtype, time_axis, offset) # keep track of filter status when dumping if is_filtered is not None: self.is_filtered = is_filtered else: self.is_filtered = False if recording_channels is not None: assert len(recording_channels) <= self._timeseries.shape[0], \ 'Provided recording channels have the wrong length' self._channels = recording_channels else: self._channels = list(range(self._timeseries.shape[0])) if len(self._channels) == self._timeseries.shape[0]: self._complete_channels = True else: assert max(self._channels) < self._timeseries.shape[0], "Channel ids exceed the number of " \ "available channels" self._complete_channels = False if geom is not None: self.set_channel_locations(self._geom) if 'numpy' in str(dtype): dtype_str = str(dtype).replace("<class '", "").replace("'>", "") # drop 'numpy dtype_str = dtype_str.split('.')[1] else: dtype_str = str(dtype) self._kwargs = {'file_path': str(Path(file_path).absolute()), 'sampling_frequency': sampling_frequency, 'numchan': numchan, 'dtype': dtype_str, 'recording_channels': recording_channels, 'time_axis': time_axis, 'geom': geom, 'offset': offset, 'gain': gain, 'is_filtered': is_filtered}