def __init__(self, parent_sorting, curation_steps=None):
        SortingExtractor.__init__(self)
        self._parent_sorting = parent_sorting
        self._original_unit_ids = list(np.copy(parent_sorting.get_unit_ids()))
        self._all_ids = list(np.copy(parent_sorting.get_unit_ids()))
        self._sampling_frequency = parent_sorting.get_sampling_frequency()
        self.set_tmp_folder(parent_sorting.get_tmp_folder())

        # Create and store roots with original unit ids and cached spiketrains
        self._roots = []
        for unit_id in self._original_unit_ids:
            root = Unit(unit_id)
            root.set_spike_train(parent_sorting.get_unit_spike_train(unit_id))
            self._roots.append(root)
        '''
        Copies over properties and spike features from parent_sorting.
        Only spike features will be preserved with merges and splits, properties
        cannot be resolved in these cases.
        '''
        self.copy_unit_properties(parent_sorting)
        self.copy_unit_spike_features(parent_sorting)
        self.copy_epochs(parent_sorting)
        self.copy_times(parent_sorting)

        self.curation_steps = curation_steps
        self._kwargs = {
            'parent_sorting': parent_sorting.make_serialized_dict(),
            'curation_steps': self.curation_steps
        }

        self.curation_steps = []
        if curation_steps is not None:
            assert isinstance(
                curation_steps, list
            ), "previous_curation_steps must be a list of previous curation commands"
            for i, curation_step in enumerate(curation_steps):
                command, arguments = curation_step
                if command == 'exclude_units':
                    assert len(
                        arguments
                    ) == 1, "Length of arguments must be 1 for exclude_units"
                    unit_ids = arguments[0]
                    self.exclude_units(unit_ids=unit_ids)
                elif command == 'merge_units':
                    assert len(
                        arguments
                    ) == 1, "Length of arguments must be 1 for merge_units"
                    unit_ids = arguments[0]
                    self.merge_units(unit_ids=unit_ids)
                elif command == 'split_unit':
                    assert len(
                        arguments
                    ) == 2, "Length of arguments must be 2 for split_unit"
                    unit_id = arguments[0]
                    indices = arguments[1]

                    self.split_unit(unit_id=unit_id, indices=indices)
                else:
                    raise ValueError(
                        "{} is not a valid curation command".format(command))
Пример #2
0
 def __init__(self, sorting, save_path=None):
     SortingExtractor.__init__(
         self)  # init tmp folder before constructing NpzSortingExtractor
     tmp_folder = self.get_tmp_folder()
     self._sorting = sorting
     if save_path is None:
         self._is_tmp = True
         self._tmp_file = tempfile.NamedTemporaryFile(suffix=".npz",
                                                      dir=tmp_folder).name
     else:
         save_path = Path(save_path)
         if save_path.suffix != '.npz':
             save_path = save_path.with_suffix('.npz')
         if not save_path.parent.is_dir():
             os.makedirs(save_path.parent)
         self._is_tmp = False
         self._tmp_file = save_path
     NpzSortingExtractor.write_sorting(self._sorting, self._tmp_file)
     NpzSortingExtractor.__init__(self, self._tmp_file)
     # keep Npz kwargs
     self._npz_kwargs = deepcopy(self._kwargs)
     self.set_tmp_folder(tmp_folder)
     self.copy_unit_properties(sorting)
     self.copy_unit_spike_features(sorting)
     self._kwargs = {'sorting': sorting}
    def __init__(self, folder_path, chan_grp=None):
        assert HAVE_TDC, self.installation_mesg
        tdc_folder = Path(folder_path)
        SortingExtractor.__init__(self)

        dataio = tdc.DataIO(str(tdc_folder))
        if chan_grp is None:
            # if chan_grp is not provided, take the first one if unique
            chan_grps = list(dataio.channel_groups.keys())
            assert len(
                chan_grps
            ) == 1, 'There are several groups in the folder, specify chan_grp=...'
            chan_grp = chan_grps[0]

        self.chan_grp = chan_grp

        catalogue = dataio.load_catalogue(name='initial', chan_grp=chan_grp)

        labels = catalogue['clusters']['cluster_label']
        labels = labels[labels >= 0]
        self._unit_ids = list(labels)
        # load all spike in memory (this avoid to lock the folder with memmap throug dataio
        self._all_spikes = dataio.get_spikes(seg_num=0,
                                             chan_grp=self.chan_grp,
                                             i_start=None,
                                             i_stop=None).copy()

        self._sampling_frequency = dataio.sample_rate
        self._kwargs = {
            'folder_path': str(Path(folder_path).absolute()),
            'chan_grp': chan_grp
        }
    def __init__(self, spikes_matfile_path: PathType):
        SortingExtractor.__init__(self)

        spikes_matfile_path = Path(spikes_matfile_path)
        assert spikes_matfile_path.is_file(
        ), f"The spikes_matfile_path ({spikes_matfile_path}) must exist!"
        folder_path = spikes_matfile_path.parent
        sorting_id = spikes_matfile_path.name.split(".")[0]

        session_info_matfile_path = folder_path / f"{sorting_id}.sessionInfo.mat"
        assert session_info_matfile_path.is_file(
        ), "No sessionInfo.mat file found in the folder!"
        session_info_mat = loadmat(
            file_name=str(session_info_matfile_path.absolute()))
        assert session_info_mat['sessionInfo']['rates'][0][0]['wideband'], "The sesssionInfo.mat file must contain " \
            "a 'sessionInfo' struct with field 'rates' containing field 'wideband'!"
        self._sampling_frequency = float(
            session_info_mat['sessionInfo']['rates'][0][0]['wideband'][0][0][0]
            [0]
        )  # careful not to confuse it with the lfpsamplingrate; reported in units Hz

        spikes_mat = loadmat(file_name=str(spikes_matfile_path.absolute()))
        assert np.all(np.isin(['UID', 'times'], spikes_mat['spikes'].dtype.names)), \
            "The spikes.cellinfo.mat file must contain a 'spikes' struct with fields 'UID' and 'times'!"

        self._unit_ids = np.asarray(spikes_mat['spikes']['UID'][0][0][0],
                                    dtype=int)
        # CellExplorer reports spike times in units seconds; SpikeExtractors uses time units of sampling frames
        # Rounding is necessary to prevent data loss from int-casting floating point errors
        self._spiketrains = [(np.array([y[0] for y in x]) *
                              self._sampling_frequency).round().astype(int)
                             for x in spikes_mat['spikes']['times'][0][0][0]]

        self._kwargs = dict(
            spikes_matfile_path=str(spikes_matfile_path.absolute()))
Пример #5
0
    def __init__(self, phy_folder):
        SortingExtractor.__init__(self)
        phy_folder = Path(phy_folder)

        spike_times = np.load(phy_folder / 'spike_times.npy')
        spike_templates = np.load(phy_folder / 'spike_templates.npy')
        amplitudes = np.load(phy_folder / 'amplitudes.npy')
        pc_features = np.load(phy_folder / 'pc_features.npy')

        if (phy_folder / 'spike_clusters.npy').is_file():
            spike_clusters = np.load(phy_folder / 'spike_clusters.npy')
        else:
            spike_clusters = spike_templates

        self._spiketrains = []
        self._amps = []
        self._pc_features = []
        clust_id = np.unique(spike_clusters)
        self._unit_ids = list(clust_id)
        spike_times.astype(int)

        self._spiketrais = []
        for clust in self._unit_ids:
            idx = np.where(spike_clusters == clust)[0]
            self._spiketrains.append(spike_times[idx])
            self._amps.append(amplitudes[idx])
            self._pc_features.append(pc_features[idx])

        # set features
        for u_i, unit in enumerate(self.getUnitIds()):
            self.setUnitSpikeFeatures(unit, 'amplitudes', self._amps[u_i])
            self.setUnitSpikeFeatures(unit, 'pc_features',
                                      self._pc_features[u_i])
Пример #6
0
    def __init__(self, exdir_file, sample_rate=None):
        exdir, pq = _load_required_modules()

        SortingExtractor.__init__(self)
        self._exdir_file = exdir_file
        exdir_group = exdir.File(exdir_file, plugins=exdir.plugins.quantities)

        if 'acquisition' in exdir_group.keys():
            if 'timeseries' in exdir_group['acquisition'].keys():
                sample_rate = exdir_group['acquisition']['timeseries'].attrs['sample_rate']
        else:
            if sample_rate is None:
                raise Exception("Provide 'sample_rate' argument (Hz)")
            else:
                sample_rate = sample_rate * pq.Hz

        electrophysiology = exdir_group['processing']['electrophysiology']
        self._unit_ids = []
        current_unit = 1
        self._spike_trains = []
        for chan_name, channel in electrophysiology.items():
            group = int(chan_name.split('_')[-1])
            for units, unit_times in channel['UnitTimes'].items():
                self._unit_ids.append(current_unit)
                self._spike_trains.append((unit_times['times'].data.rescale('s')*sample_rate).magnitude)
                self.setUnitProperty(current_unit, 'group', group)
                current_unit += 1
    def __init__(self, firings_file):
        SortingExtractor.__init__(self)
        self._firings_path = firings_file

        self._firings = readmda(self._firings_path)
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
Пример #8
0
 def __init__(self, recording_file):
     SortingExtractor.__init__(self)
     self._recording_file = recording_file
     self._rf = h5py.File(self._recording_file, mode='r')
     self._unit_ids = set(self._rf['cluster_id'].value)
     if 'centres' in self._rf.keys():
         self._unit_locs = self._rf[
             'centres'].value  # cache for faster access
 def __init__(self, recording_path):
     SortingExtractor.__init__(self)
     self._recording_path = recording_path
     self._num_units = None
     self._spike_trains = None
     self._unit_ids = None
     self._fs = None
     self._initialize()
Пример #10
0
    def __init__(self, h5_path):
        SortingExtractor.__init__(self)
        self._h5_path = h5_path
        self._loaded_spike_trains = {}

        with h5py.File(self._h5_path, 'r') as f:
            self._unit_ids = np.array(f.get('unit_ids'))
            self._sampling_frequency = np.array(f.get('sampling_frequency'))[0]
Пример #11
0
 def __init__(self, recording_file, *, experiment_id=0, recording_id=0):
     assert HAVE_OE, "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n"
     SortingExtractor.__init__(self)
     self._recording_file = recording_file
     self._recording = pyopenephys.File(
         recording_file).experiments[experiment_id].recordings[recording_id]
     self._spiketrains = self._recording.spiketrains
     self._unit_ids = list(
         [np.unique(st.clusters)[0] for st in self._spiketrains])
Пример #12
0
 def __init__(self, file_path):
     SortingExtractor.__init__(self)
     self._recording_path = file_path
     self._num_units = None
     self._spike_trains = None
     self._unit_ids = None
     self._fs = None
     self._initialize()
     self._kwargs = {'file_path': str(Path(file_path).absolute())}
Пример #13
0
 def __init__(self, file_path):
     assert self.installed, self.installation_mesg
     SortingExtractor.__init__(self)
     self._file_path = file_path
     self._filehandle = None
     self._mapping = None
     self._version = None
     self._initialize()
     self._kwargs = {'file_path': str(Path(file_path).absolute())}
Пример #14
0
 def __init__(self, file_path):
     SortingExtractor.__init__(self)
     self._file = nix.File.open(file_path, nix.FileMode.ReadOnly)
     md = self._file.sections
     if "sampling_frequency" in md:
         sfreq = md["sampling_frequency"]
         self._sampling_frequency = sfreq
     self._load_properties()
     self._kwargs = {'file_path': str(Path(file_path).absolute())}
Пример #15
0
 def __init__(self, recording_path):
     neo, pq, h5py, yaml = _load_required_modules()
     SortingExtractor.__init__(self)
     self._recording_path = recording_path
     self._num_units = None
     self._spike_trains = None
     self._unit_ids = None
     self._fs = None
     self._initialize()
Пример #16
0
    def __init__(self, kwik_file_or_folder):
        assert HAVE_KLSX, "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n"
        SortingExtractor.__init__(self)
        kwik_file_or_folder = Path(kwik_file_or_folder)
        kwikfile = None
        klustafolder = None
        if kwik_file_or_folder.is_file():
            assert kwik_file_or_folder.suffix == '.kwik', "Not a '.kwik' file"
            kwikfile = Path(kwik_file_or_folder).absolute()
            klustafolder = kwikfile.parent
        elif kwik_file_or_folder.is_dir():
            klustafolder = kwik_file_or_folder
            kwikfiles = [f for f in kwik_file_or_folder.iterdir() if f.suffix == '.kwik']
            if len(kwikfiles) == 1:
                kwikfile = kwikfiles[0]
        assert kwikfile is not None, "Could not load '.kwik' file"

        try:
            config_file = [f for f in klustafolder.iterdir() if f.suffix == '.prm'][0]
            config = read_python(str(config_file))
            sample_rate = config['traces']['sample_rate']
            self._sampling_frequency = sample_rate
        except Exception as e:
            print("Could not load sampling frequency info")

        F = h5py.File(kwikfile)
        channel_groups = F.get('channel_groups')
        self._spiketrains = []
        self._unit_ids = []
        unique_units = []
        klusta_units = []
        groups = []
        unit = 0
        for cgroup in channel_groups:
            group_id = int(cgroup)
            try:
                cluster_ids = channel_groups[cgroup]['clusters']['main']
            except Exception as e:
                print('Unable to extract clusters from', kwikfile)
                continue
            for cluster_id in channel_groups[cgroup]['clusters']['main']:
                clusters = np.array(channel_groups[cgroup]['spikes']['clusters']['main'])
                idx = np.nonzero(clusters == int(cluster_id))
                st = np.array(channel_groups[cgroup]['spikes']['time_samples'])[idx]
                self._spiketrains.append(st)
                klusta_units.append(int(cluster_id))
                unique_units.append(unit)
                unit += 1
                groups.append(group_id)
        if len(np.unique(klusta_units)) == len(np.unique(unique_units)):
            self._unit_ids = klusta_units
        else:
            print('Klusta units are not unique! Using unique unit ids')
            self._unit_ids = unique_units
        for i, u in enumerate(self._unit_ids):
            self.set_unit_property(u, 'group', groups[i])
Пример #17
0
    def __init__(self, file_path, delimiter=','):
        SortingExtractor.__init__(self)

        if os.path.isfile(file_path):
            self._spike_clusters = sbio.SpikeClusters()
            self._spike_clusters.fromCSV(file_path, None, delimiter=delimiter)
        else:
            raise FileNotFoundError(
                'the ground truth file "{}" could not be found'.format(
                    file_path))
Пример #18
0
    def __init__(self, file_path, delimiter=','):
        assert HAVE_SBEX, self.installation_mesg
        SortingExtractor.__init__(self)

        if os.path.isfile(file_path):
            self._spike_clusters = sbio.SpikeClusters()
            self._spike_clusters.fromCSV(file_path, None, delimiter=delimiter)
        else:
            raise FileNotFoundError('the ground truth file "{}" could not be found'.format(file_path))
        self._kwargs = {'file_path': str(Path(file_path).absolute()), 'delimiter': delimiter}
 def __init__(self, file_path, *, experiment_id=0, recording_id=0):
     assert HAVE_OE, "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n"
     SortingExtractor.__init__(self)
     self._recording_file = file_path
     self._recording = pyopenephys.File(
         file_path).experiments[experiment_id].recordings[recording_id]
     self._spiketrains = self._recording.spiketrains
     self._unit_ids = list(
         [np.unique(st.clusters)[0] for st in self._spiketrains])
     self._sampling_frequency = float(
         self._recording.sample_rate.rescale('Hz').magnitude)
    def __init__(self, folder_path, *, experiment_id=0, recording_id=0):
        assert HAVE_OE, self.installation_mesg
        SortingExtractor.__init__(self)
        self._recording_file = folder_path
        self._recording = pyopenephys.File(folder_path).experiments[experiment_id].recordings[recording_id]
        self._spiketrains = self._recording.spiketrains
        self._unit_ids = list([np.unique(st.clusters)[0] for st in self._spiketrains])
        self._sampling_frequency = float(self._recording.sample_rate.rescale('Hz').magnitude)

        self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'experiment_id': experiment_id,
                        'recording_id': recording_id}
Пример #21
0
    def __init__(self, folder_path):
        assert HAVE_SCSX, self.installation_mesg
        SortingExtractor.__init__(self)
        spykingcircus_folder = Path(folder_path)
        listfiles = spykingcircus_folder.iterdir()
        results = None
        sample_rate = None

        parent_folder = None
        result_folder = None
        for f in listfiles:
            if f.is_dir():
                if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                    parent_folder = spykingcircus_folder
                    result_folder = f

        if parent_folder is None:
            parent_folder = spykingcircus_folder.parent
            for f in parent_folder.iterdir():
                if f.is_dir():
                    if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                        result_folder = spykingcircus_folder

        assert isinstance(parent_folder, Path) and isinstance(
            result_folder, Path), "Not a valid spyking circus folder"

        # load files
        for f in result_folder.iterdir():
            if 'result.hdf5' in str(f):
                results = f
            if 'result-merged.hdf5' in str(f):
                results = f
                break

        # load params
        for f in parent_folder.iterdir():
            if f.suffix == '.params':
                sample_rate = _load_sample_rate(f)

        if sample_rate is not None:
            self._sampling_frequency = sample_rate

        if results is None:
            raise Exception(spykingcircus_folder,
                            " is not a spyking circus folder")
        f_results = h5py.File(results, 'r')
        self._spiketrains = []
        self._unit_ids = []
        for temp in f_results['spiketimes'].keys():
            self._spiketrains.append(
                np.array(f_results['spiketimes'][temp]).astype('int64'))
            self._unit_ids.append(int(temp.split('_')[-1]))

        self._kwargs = {'folder_path': str(Path(folder_path).absolute())}
Пример #22
0
    def __init__(self, folder_path, sampling_frequency=None, channel_group=None, load_waveforms=False):
        assert HAVE_EXDIR, "To use the ExdirExtractors run:\n\n pip install exdir\n\n"
        SortingExtractor.__init__(self)
        self._exdir_file = folder_path
        exdir_group = exdir.File(folder_path, plugins=exdir.plugins.quantities)

        electrophysiology = None
        sf = copy(sampling_frequency)
        if 'processing' in exdir_group.keys():
            if 'electrophysiology' in exdir_group['processing']:
                electrophysiology = exdir_group['processing']['electrophysiology']
                ephys_attrs = electrophysiology.attrs
                if 'sample_rate' in ephys_attrs:
                    sf = ephys_attrs['sample_rate']
        else:
            if sf is None:
                raise Exception("Sampling rate information not found. Please provide it with the 'sampling_frequency' "
                                "argument")
            else:
                sf = sf * pq.Hz
        self._sampling_frequency = float(sf.rescale('Hz').magnitude)

        if electrophysiology is None:
            raise Exception("'electrophysiology' group not found!")

        self._unit_ids = []
        current_unit = 1
        self._spike_trains = []
        for chan_name, channel in electrophysiology.items():
            if 'channel' in chan_name:
                group = int(chan_name.split('_')[-1])
                if channel_group is not None:
                    if group != channel_group:
                        continue
                if load_waveforms:
                    if 'Clustering' in channel.keys() and 'EventWaveform' in channel.keys():
                        clustering = channel.require_group('Clustering')
                        eventwaveform = channel.require_group('EventWaveform')
                        nums = clustering['nums'].data
                        waveforms = eventwaveform.require_group('waveform_timeseries')['data'].data
                if 'UnitTimes' in channel.keys():
                    for unit, unit_times in channel['UnitTimes'].items():
                        self._unit_ids.append(current_unit)
                        self._spike_trains.append((unit_times['times'].data.rescale('s') * sf).magnitude)
                        attrs = unit_times.attrs
                        for k, v in attrs.items():
                            self.set_unit_property(current_unit, k, v)
                        if load_waveforms:
                            unit_idxs = np.where(nums == int(unit))
                            wf = waveforms[unit_idxs]
                            self.set_unit_spike_features(current_unit, 'waveforms', wf)
                        current_unit += 1
        self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'sampling_frequency': sampling_frequency,
                        'channel_group': channel_group, 'load_waveforms': load_waveforms}
Пример #23
0
    def __init__(self, firings_file, samplerate):
        SortingExtractor.__init__(self)
        self._firings_path = ka.load_file(firings_file)
        if not self._firings_path:
            raise Exception('Unable to load firings file: ' + firings_file)

        self._firings = readmda(self._firings_path)
        self._sampling_frequency = samplerate
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
Пример #24
0
 def __init__(self, file_path, well_name='well000', rec_name='rec0000'):
     assert self.installed, self.installation_mesg
     SortingExtractor.__init__(self)
     self._file_path = file_path
     self._well_name = well_name
     self._rec_name = rec_name
     self._filehandle = None
     self._mapping = None
     self._version = None
     self._initialize()
     self._sampling_frequency = self._fs
     self._kwargs = {'file_path': str(Path(file_path).absolute()), 'well_name': well_name, 'rec_name': rec_name}
    def __init__(self, tdc_folder, chan_grp=None):
        assert HAVE_TDC, "must install tridesclous"
        tdc_folder = Path(tdc_folder)
        SortingExtractor.__init__(self)
        self.dataio = tdc.DataIO(str(tdc_folder))
        if chan_grp is None:
            # if chan_grp is not provided, take the first one if unique
            chan_grps = list(self.dataio.channel_groups.keys())
            assert len(chan_grps) == 1, 'There are several in the folder chan_grp, specify it'
            chan_grp = chan_grps[0]

        self.chan_grp = chan_grp
        self.catalogue = self.dataio.load_catalogue(name='initial', chan_grp=chan_grp)
    def __init__(self, spykingcircus_folder):
        assert HAVE_SCSX, "To use the SpykingCircusSortingExtractor install h5py: \n\n pip install h5py\n\n"
        SortingExtractor.__init__(self)
        spykingcircus_folder = Path(spykingcircus_folder)
        listfiles = spykingcircus_folder.iterdir()
        results = None
        sample_rate = None

        parent_folder = None
        result_folder = None
        for f in listfiles:
            if f.is_dir():
                if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                    parent_folder = spykingcircus_folder
                    result_folder = f

        if parent_folder is None:
            parent_folder = spykingcircus_folder.parent
            for f in parent_folder.iterdir():
                if f.is_dir():
                    if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                        result_folder = spykingcircus_folder

        assert isinstance(parent_folder, Path) and isinstance(
            result_folder, Path), "Not a valid spyking circus folder"

        # load files
        for f in result_folder.iterdir():
            if 'result.hdf5' in str(f):
                results = f
            if 'result-merged.hdf5' in str(f):
                results = f
                break

        # load params
        for f in parent_folder.iterdir():
            if f.suffix == '.params':
                sample_rate = _load_sample_rate(f)

        if sample_rate is not None:
            self._sampling_frequency = sample_rate

        if results is None:
            raise Exception(spykingcircus_folder,
                            " is not a spyking circus folder")
        f_results = h5py.File(results)
        self._spiketrains = []
        self._unit_ids = []
        for temp in f_results['spiketimes'].keys():
            self._spiketrains.append(f_results['spiketimes'][temp])
            self._unit_ids.append(int(temp.split('_')[-1]))
Пример #27
0
    def __init__(self, file_path, sampling_frequency=None):

        SortingExtractor.__init__(self)
        self._firings_path = file_path
        self._firings = readmda(self._firings_path)
        self._max_channels = self._firings[0, :]
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
        self._sampling_frequency = sampling_frequency
        for unit_id in self._unit_ids:
            inds = np.where(self._labels == unit_id)
            max_channels = self._max_channels[inds].astype(int)
            self.set_unit_property(unit_id, 'max_channel', max_channels[0])
Пример #28
0
    def __init__(self, h5_path):
        SortingExtractor.__init__(self)
        self._h5_path = h5_path
        self._loaded_spike_trains = {}

        with h5py.File(self._h5_path, 'r') as f:
            self._unit_ids = np.array(f.get('unit_ids'))
            self._sampling_frequency = np.array(
                f.get('sampling_frequency'))[0].item()
            if np.isnan(self._sampling_frequency):
                print(
                    'WARNING: sampling frequency is nan. Using 30000 for now. Please correct the snippets file.'
                )
                self._sampling_frequency = 30000
Пример #29
0
    def __init__(self, block_index=None, seg_index=None, **kargs):
        SortingExtractor.__init__(self)
        _NeoBaseExtractor.__init__(self, block_index=block_index, seg_index=seg_index, **kargs)

        # the sampling frequency is quite tricky because in neo
        # spike are handle in s or ms
        # internally many format do have have the spike time stamps
        # at the same speed as the signal but at a higher clocks speed.
        # here in spikeinterface we need spike index to be at the same speed
        # that signal it do not make sens to have spikes at 50kHz sample
        # when the sig is 10kHz.
        # neo handle this but not spikeextractors

        self._handle_sampling_frequency()
 def __init__(self, resfile, clufile):
     SortingExtractor.__init__(self)
     res = np.loadtxt(resfile, dtype=np.int64, usecols=0, ndmin=1)
     clu = np.loadtxt(clufile, dtype=np.int64, usecols=0, ndmin=1)
     if len(res) > 0:
         n_clu = clu[0]
         clu = np.delete(clu, 0)
         self._spiketrains = []
         self._unit_ids = list(x + 1 for x in range(n_clu))
         for s_id in self._unit_ids:
             self._spiketrains.append(res[(clu == s_id).nonzero()])
     else:
         self._spiketrains = []
         self._unit_ids = []