def __init__(self, stream_id=None, **neo_kwargs):

        _NeoBaseExtractor.__init__(self, **neo_kwargs)

        # check channel
        # TODO propose a meachanisim to select the appropriate channel groups
        # in neo one channel group have the same dtype/sampling_rate/group_id
        # ~ channel_indexes_list = self.neo_reader.get_group_signal_channel_indexes()
        stream_channels = self.neo_reader.header['signal_streams']
        stream_ids = stream_channels['id']
        if stream_id is None:
            if stream_channels.size > 1:
                raise ValueError(f'This reader have several streams({stream_ids}), specify it with stream_id=')
            else:
                stream_id = stream_ids[0]
        else:
            assert stream_id in stream_ids, f'stream_id {stream_id} is no in {stream_ids}'

        self.stream_index = list(stream_ids).index(stream_id)
        self.stream_id = stream_id

        # need neo 0.10.0
        signal_channels = self.neo_reader.header['signal_channels']
        mask = signal_channels['stream_id'] == stream_id
        signal_channels = signal_channels[mask]

        # check channel groups
        chan_ids = signal_channels['id']

        sampling_frequency = self.neo_reader.get_signal_sampling_rate(stream_index=self.stream_index)
        dtype = signal_channels['dtype'][0]
        BaseRecording.__init__(self, sampling_frequency, chan_ids, dtype)

        # find the gain to uV
        gains = signal_channels['gain']
        offsets = signal_channels['offset']

        units = signal_channels['units']
        if not np.all(np.isin(units, ['V', 'Volt', 'mV', 'uV'])):
            # check that units are V, mV or uV
            error = f'This extractor based on  neo.{self.NeoRawIOClass} have strange units not in (V, mV, uV) {units}'
            print(error)
        additional_gain = np.ones(units.size, dtype='float')
        additional_gain[units == 'V'] = 1e6
        additional_gain[units == 'Volt'] = 1e6
        additional_gain[units == 'mV'] = 1e3
        additional_gain[units == 'uV'] = 1.
        additional_gain = additional_gain

        final_gains = gains * additional_gain
        final_offsets = offsets * additional_gain

        self.set_property('gain_to_uV', final_gains)
        self.set_property('offset_to_uV', final_offsets)
        self.set_property('channel_name', signal_channels["name"])

        nseg = self.neo_reader.segment_count(block_index=0)
        for segment_index in range(nseg):
            rec_segment = NeoRecordingSegment(self.neo_reader, segment_index, self.stream_index)
            self.add_recording_segment(rec_segment)
示例#2
0
    def __init__(self, traces_list, sampling_frequency, channel_ids=None):
        if isinstance(traces_list, list):
            assert all(isinstance(e, np.ndarray)
                       for e in traces_list), 'must give a list of numpy array'
        else:
            assert isinstance(traces_list,
                              np.ndarray), 'must give a list of numpy array'
            traces_list = [traces_list]

        dtype = traces_list[0].dtype
        assert all(dtype == ts.dtype for ts in traces_list)

        if channel_ids is None:
            channel_ids = np.arange(traces_list[0].shape[1])
        else:
            channel_ids = np.asarray(channel_ids)
            assert channel_ids.size == traces_list[0].shape[1]
        BaseRecording.__init__(self, sampling_frequency, channel_ids, dtype)

        self.is_dumpable = False

        for traces in traces_list:
            rec_segment = NumpyRecordingSegment(traces)
            self.add_recording_segment(rec_segment)

        self._kwargs = {
            'traces_list': traces_list,
            'sampling_frequency': sampling_frequency,
        }
    def __init__(self, oldapi_recording_extractor):
        BaseRecording.__init__(
            self, oldapi_recording_extractor.get_sampling_frequency(),
            oldapi_recording_extractor.get_channel_ids(),
            oldapi_recording_extractor.get_dtype(return_scaled=False))

        # set is_dumpable to False to use dumping mechanism of old extractor
        self.is_dumpable = False
        self.annotate(is_filtered=oldapi_recording_extractor.is_filtered)

        # add old recording as a recording segment
        recording_segment = OldToNewRecordingSegment(
            oldapi_recording_extractor)
        self.add_recording_segment(recording_segment)
        self.set_channel_locations(
            oldapi_recording_extractor.get_channel_locations())

        # add old properties
        copy_properties(oldapi_extractor=oldapi_recording_extractor,
                        new_extractor=self,
                        skip_properties=["gain", "offset"])
        # set correct gains and offsets
        gains, offsets = find_old_gains_offsets_recursively(
            oldapi_recording_extractor.dump_to_dict())
        if gains is not None:
            if np.any(gains != 1):
                self.set_channel_gains(gains)
        if offsets is not None:
            if np.any(offsets != 0):
                self.set_channel_offsets(offsets)

        self._kwargs = {
            'oldapi_recording_extractor': oldapi_recording_extractor
        }
示例#4
0
    def __init__(self, traces_list, sampling_frequency, t_starts=None, channel_ids=None):
        if isinstance(traces_list, list):
            assert all(isinstance(e, np.ndarray) for e in traces_list), 'must give a list of numpy array'
        else:
            assert isinstance(traces_list, np.ndarray), 'must give a list of numpy array'
            traces_list = [traces_list]

        dtype = traces_list[0].dtype
        assert all(dtype == ts.dtype for ts in traces_list)

        if channel_ids is None:
            channel_ids = np.arange(traces_list[0].shape[1])
        else:
            channel_ids = np.asarray(channel_ids)
            assert channel_ids.size == traces_list[0].shape[1]
        BaseRecording.__init__(self, sampling_frequency, channel_ids, dtype)

        if t_starts is not None:
            assert len(t_starts) == len(traces_list), 't_starts must be a list of same size than traces_list'
            t_starts = [float(t_start) for t_start in t_starts]

        self.is_dumpable = False

        for i, traces in enumerate(traces_list):
            if t_starts is None:
                t_start = None
            else:
                t_start = t_starts[i]
            rec_segment = NumpyRecordingSegment(traces, sampling_frequency, t_start)
            self.add_recording_segment(rec_segment)

        self._kwargs = {'traces_list': traces_list, 't_starts': t_starts,
                        'sampling_frequency': sampling_frequency,
                        }
示例#5
0
 def __init__(self,
              folder_path,
              raw_fname='raw.mda',
              params_fname='params.json',
              geom_fname='geom.csv'):
     folder_path = Path(folder_path)
     self._folder_path = folder_path
     self._dataset_params = read_dataset_params(self._folder_path,
                                                params_fname)
     self._timeseries_path = self._folder_path / raw_fname
     geom = np.loadtxt(self._folder_path / geom_fname,
                       delimiter=',',
                       ndmin=2)
     self._diskreadmda = DiskReadMda(str(self._timeseries_path))
     dtype = self._diskreadmda.dt()
     num_channels = self._diskreadmda.N1()
     assert geom.shape[0] == self._diskreadmda.N1(), f'Incompatible dimensions between geom.csv and timeseries ' \
                                                     f'file: {geom.shape[0]} <> {self._diskreadmda.N1()}'
     BaseRecording.__init__(
         self,
         sampling_frequency=self._dataset_params['samplerate'] * 1.0,
         channel_ids=np.arange(num_channels),
         dtype=dtype)
     rec_segment = MdaRecordingSegment(self._diskreadmda)
     self.add_recording_segment(rec_segment)
     self.set_dummy_probe_from_locations(geom)
     self._kwargs = {
         'folder_path': str(Path(folder_path).absolute()),
         'raw_fname': raw_fname,
         'params_fname': params_fname,
         'geom_fname': geom_fname
     }
    def __init__(self, recording, sampling_frequency=None, channel_ids=None, dtype=None):
        assert isinstance(recording, BaseRecording), "'recording' must be a RecordingExtractor"

        self._parent_recording = recording
        if sampling_frequency is None:
            sampling_frequency = recording.get_sampling_frequency()
        if channel_ids is None:
            channel_ids = recording.channel_ids
        if dtype is None:
            dtype = recording.get_dtype()

        BaseRecording.__init__(self, sampling_frequency, channel_ids, dtype)
        recording.copy_metadata(self, only_main=False, ids=None)
示例#7
0
 def __init__(self, raw_path: str, params: dict, geom):
     self._dataset_params = params
     self._timeseries_path = raw_path
     self._diskreadmda = DiskReadMda(str(self._timeseries_path))
     dtype = self._diskreadmda.dt()
     num_channels = self._diskreadmda.N1()
     sampling_frequency=float(self._dataset_params['samplerate'])
     BaseRecording.__init__(self, sampling_frequency=sampling_frequency,
                            channel_ids=np.arange(num_channels), dtype=dtype)
     rec_segment = MdaRecordingSegment(self._diskreadmda, sampling_frequency)
     self.add_recording_segment(rec_segment)
     self.set_dummy_probe_from_locations(np.array(geom))
     self._kwargs = {'raw_path': str(Path(raw_path).absolute()),
                     'params': params,
                     'geom': geom}
示例#8
0
    def __init__(self, oldapi_recording_extractor):
        BaseRecording.__init__(
            self, oldapi_recording_extractor.get_sampling_frequency(),
            oldapi_recording_extractor.get_channel_ids(),
            oldapi_recording_extractor.get_dtype())

        # get properties from old recording
        self.is_dumpable = oldapi_recording_extractor.is_dumpable
        self.annotate(is_filtered=oldapi_recording_extractor.is_filtered)

        # add old recording as a recording segment
        recording_segment = OldToNewRecordingSegment(
            oldapi_recording_extractor)
        self.add_recording_segment(recording_segment)
        self.set_channel_locations(
            oldapi_recording_extractor.get_channel_locations())
    def __init__(self,
                 file_path: PathType,
                 electrical_series_name: str = None):
        """
        Load an NWBFile as a RecordingExtractor.

        Parameters
        ----------
        file_path: path to NWB file
        electrical_series_name: str, optional
        """
        assert self.HAVE_NWB, self.installation_mesg
        self._file_path = str(file_path)
        with NWBHDF5IO(self._file_path, 'r') as io:
            nwbfile = io.read()
            if electrical_series_name is not None:
                electrical_series_name = electrical_series_name
            else:
                a_names = list(nwbfile.acquisition)
                if len(a_names) > 1:
                    raise ValueError(
                        "More than one acquisition found! You must specify 'electrical_series_name'."
                    )
                if len(a_names) == 0:
                    raise ValueError("No acquisitions found in the .nwb file.")
                electrical_series_name = a_names[0]
            es = nwbfile.acquisition[electrical_series_name]
            if hasattr(es, 'timestamps') and es.timestamps:
                sampling_frequency = 1. / np.median(np.diff(es.timestamps))
                recording_start_time = es.timestamps[0]
            else:
                sampling_frequency = es.rate
                if hasattr(es, 'starting_time'):
                    recording_start_time = es.starting_time
                else:
                    recording_start_time = 0.

            num_frames = int(es.data.shape[0])
            num_channels = len(es.electrodes.data)

            # Channels gains - for RecordingExtractor, these are values to cast traces to uV
            if es.channel_conversion is not None:
                gains = es.conversion * es.channel_conversion[:] * 1e6
            else:
                gains = es.conversion * np.ones(num_channels) * 1e6
            # Extractors channel groups must be integers, but Nwb electrodes group_name can be strings
            if 'group_name' in nwbfile.electrodes.colnames:
                unique_grp_names = list(
                    np.unique(nwbfile.electrodes['group_name'][:]))

            # Fill channel properties dictionary from electrodes table
            channel_ids = [
                es.electrodes.table.id[x] for x in es.electrodes.data
            ]

            # If gains are not 1, set has_scaled to True
            if np.any(gains != 1):
                self.set_channel_gains(gains)

            BaseRecording.__init__(self,
                                   channel_ids=channel_ids,
                                   sampling_frequency=sampling_frequency)
            recording_segment = NwbRecordingSegment(
                path=self._file_path,
                electrical_series_name=electrical_series_name,
                num_frames=num_frames)
            self.add_recording_segment(recording_segment)

            # Add properties
            properties = dict()
            for es_ind, (channel_id, electrode_table_index) in enumerate(
                    zip(channel_ids, es.electrodes.data)):
                if 'rel_x' in nwbfile.electrodes:
                    if 'location' not in properties:
                        properties['location'] = np.zeros(
                            (self.get_num_channels(), 2), dtype=float)
                    properties['location'][
                        es_ind,
                        0] = nwbfile.electrodes['rel_x'][electrode_table_index]
                    if 'rel_y' in nwbfile.electrodes:
                        properties['location'][es_ind, 1] = nwbfile.electrodes[
                            'rel_x'][electrode_table_index]

                for col in nwbfile.electrodes.colnames:
                    if isinstance(
                            nwbfile.electrodes[col][electrode_table_index],
                            ElectrodeGroup):
                        continue
                    elif col == 'group_name':
                        group = unique_grp_names.index(
                            nwbfile.electrodes[col][electrode_table_index])
                        if 'group' not in properties:
                            properties['group'] = np.zeros(
                                self.get_num_channels(), dtype=type(group))
                        properties['group'][es_ind] = group
                    elif col == 'location':
                        brain_area = nwbfile.electrodes[col][
                            electrode_table_index]
                        if 'brain_area' not in properties:
                            properties['brain_area'] = np.zeros(
                                self.get_num_channels(),
                                dtype=type(brain_area))
                        properties['brain_area'][es_ind] = brain_area
                    elif col == 'offset':
                        offset = nwbfile.electrodes[col][electrode_table_index]
                        if 'offset' not in properties:
                            properties['offset'] = np.zeros(
                                self.get_num_channels(), dtype=type(offset))
                        properties['offset'][es_ind] = offset
                    elif col in ['x', 'y', 'z', 'rel_x', 'rel_y']:
                        continue
                    else:
                        val = nwbfile.electrodes[col][electrode_table_index]
                        if col not in properties:
                            properties[col] = np.zeros(self.get_num_channels(),
                                                       dtype=type(val))
                        properties[col][es_ind] = val

            for prop_name, values in properties.items():
                if prop_name == "location":
                    self.set_dummy_probe_from_locations(values)
                elif prop_name == "group":
                    self.set_channel_groups(val)
                else:
                    self.set_property(prop_name, values)

            self._kwargs = {
                'file_path': str(Path(file_path).absolute()),
                'electrical_series_name': electrical_series_name
            }
示例#10
0
    def __init__(self,
                 file_path: PathType,
                 electrical_series_name: str = None,
                 load_time_vector: bool = False,
                 samples_for_rate_estimation: int = 100000):
        check_nwb_install()
        self._file_path = str(file_path)
        self._electrical_series_name = electrical_series_name

        io = NWBHDF5IO(self._file_path, mode='r', load_namespaces=True)
        self._nwbfile = io.read()
        self._es = get_electrical_series(self._nwbfile,
                                         self._electrical_series_name)

        sampling_frequency = None
        if hasattr(self._es, 'rate'):
            sampling_frequency = self._es.rate

        if hasattr(self._es, 'starting_time'):
            t_start = self._es.starting_time
        else:
            t_start = None

        timestamps = None
        if hasattr(self._es, 'timestamps'):
            if self._es.timestamps is not None:
                timestamps = self._es.timestamps
                t_start = self._es.timestamps[0]

        # if rate is unknown, estimate from timestamps
        if sampling_frequency is None:
            assert timestamps is not None, "Could not find rate information as both 'rate' and "\
                                           "'timestamps' are missing from the file. "\
                                           "Use the 'sampling_frequency' argument."
            sampling_frequency = 1. / np.median(
                np.diff(timestamps[samples_for_rate_estimation]))

        if load_time_vector and timestamps is not None:
            times_kwargs = dict(time_vector=self._es.timestamps)
        else:
            times_kwargs = dict(sampling_frequency=sampling_frequency,
                                t_start=t_start)

        num_frames = int(self._es.data.shape[0])
        num_channels = len(self._es.electrodes.data)

        # Channels gains - for RecordingExtractor, these are values to cast traces to uV
        if self._es.channel_conversion is not None:
            gains = self._es.conversion * self._es.channel_conversion[:] * 1e6
        else:
            gains = self._es.conversion * np.ones(num_channels) * 1e6
        # Extractors channel groups must be integers, but Nwb electrodes group_name can be strings
        if 'group_name' in self._nwbfile.electrodes.colnames:
            unique_grp_names = list(
                np.unique(self._nwbfile.electrodes['group_name'][:]))

        # Fill channel properties dictionary from electrodes table
        channel_ids = [
            self._es.electrodes.table.id[x] for x in self._es.electrodes.data
        ]

        dtype = self._es.data.dtype

        BaseRecording.__init__(self,
                               channel_ids=channel_ids,
                               sampling_frequency=sampling_frequency,
                               dtype=dtype)
        recording_segment = NwbRecordingSegment(
            nwbfile=self._nwbfile,
            electrical_series_name=self._electrical_series_name,
            num_frames=num_frames,
            times_kwargs=times_kwargs)
        self.add_recording_segment(recording_segment)

        # If gains are not 1, set has_scaled to True
        if np.any(gains != 1):
            self.set_channel_gains(gains)

        # Add properties
        properties = dict()
        for es_ind, (channel_id, electrode_table_index) in enumerate(
                zip(channel_ids, self._es.electrodes.data)):
            if 'rel_x' in self._nwbfile.electrodes:
                if 'location' not in properties:
                    properties['location'] = np.zeros(
                        (self.get_num_channels(), 2), dtype=float)
                properties['location'][es_ind, 0] = self._nwbfile.electrodes[
                    'rel_x'][electrode_table_index]
                if 'rel_y' in self._nwbfile.electrodes:
                    properties['location'][es_ind,
                                           1] = self._nwbfile.electrodes[
                                               'rel_y'][electrode_table_index]

            for col in self._nwbfile.electrodes.colnames:
                if isinstance(
                        self._nwbfile.electrodes[col][electrode_table_index],
                        ElectrodeGroup):
                    continue
                elif col == 'group_name':
                    group = unique_grp_names.index(
                        self._nwbfile.electrodes[col][electrode_table_index])
                    if 'group' not in properties:
                        properties['group'] = np.zeros(self.get_num_channels(),
                                                       dtype=type(group))
                    properties['group'][es_ind] = group
                elif col == 'location':
                    brain_area = self._nwbfile.electrodes[col][
                        electrode_table_index]
                    if 'brain_area' not in properties:
                        properties['brain_area'] = np.zeros(
                            self.get_num_channels(), dtype=type(brain_area))
                    properties['brain_area'][es_ind] = brain_area
                elif col == 'offset':
                    offset = self._nwbfile.electrodes[col][
                        electrode_table_index]
                    if 'offset' not in properties:
                        properties['offset'] = np.zeros(
                            self.get_num_channels(), dtype=type(offset))
                    properties['offset'][es_ind] = offset
                elif col in ['x', 'y', 'z', 'rel_x', 'rel_y']:
                    continue
                else:
                    val = self._nwbfile.electrodes[col][electrode_table_index]
                    if col not in properties:
                        properties[col] = np.zeros(self.get_num_channels(),
                                                   dtype=type(val))
                    properties[col][es_ind] = val

        for prop_name, values in properties.items():
            if prop_name == "location":
                self.set_dummy_probe_from_locations(values)
            elif prop_name == "group":
                if np.isscalar(values):
                    groups = [values] * len(channel_ids)
                else:
                    groups = values
                self.set_channel_groups(groups)
            else:
                self.set_property(prop_name, values)

        self._kwargs = {
            'file_path': str(Path(file_path).absolute()),
            'electrical_series_name': self._electrical_series_name,
            'load_time_vector': load_time_vector,
            'samples_for_rate_estimation': samples_for_rate_estimation
        }