def __init__(self, phy_folder):
        SortingExtractor.__init__(self)
        phy_folder = Path(phy_folder)

        spike_times = np.load(phy_folder / 'spike_times.npy')
        spike_templates = np.load(phy_folder / 'spike_templates.npy')
        amplitudes = np.load(phy_folder / 'amplitudes.npy')
        pc_features = np.load(phy_folder / 'pc_features.npy')

        if (phy_folder / 'spike_clusters.npy').is_file():
            spike_clusters = np.load(phy_folder / 'spike_clusters.npy')
        else:
            spike_clusters = spike_templates

        self._spiketrains = []
        self._amps = []
        self._pc_features = []
        clust_id = np.unique(spike_clusters)
        self._unit_ids = list(clust_id)
        spike_times.astype(int)

        self._spiketrais = []
        for clust in self._unit_ids:
            idx = np.where(spike_clusters == clust)[0]
            self._spiketrains.append(spike_times[idx])
            self._amps.append(amplitudes[idx])
            self._pc_features.append(pc_features[idx])

        # set features
        for u_i, unit in enumerate(self.getUnitIds()):
            self.setUnitSpikeFeatures(unit, 'amplitudes', self._amps[u_i])
            self.setUnitSpikeFeatures(unit, 'pc_features',
                                      self._pc_features[u_i])
Beispiel #2
0
 def __init__(self, sorting, save_path=None):
     SortingExtractor.__init__(
         self)  # init tmp folder before constructing NpzSortingExtractor
     tmp_folder = self.get_tmp_folder()
     self._sorting = sorting
     if save_path is None:
         self._is_tmp = True
         self._tmp_file = tempfile.NamedTemporaryFile(suffix=".npz",
                                                      dir=tmp_folder).name
     else:
         save_path = Path(save_path)
         if save_path.suffix != '.npz':
             save_path = save_path.with_suffix('.npz')
         if not save_path.parent.is_dir():
             os.makedirs(save_path.parent)
         self._is_tmp = False
         self._tmp_file = save_path
     NpzSortingExtractor.write_sorting(self._sorting, self._tmp_file)
     NpzSortingExtractor.__init__(self, self._tmp_file)
     # keep Npz kwargs
     self._npz_kwargs = deepcopy(self._kwargs)
     self.set_tmp_folder(tmp_folder)
     self.copy_unit_properties(sorting)
     self.copy_unit_spike_features(sorting)
     self._kwargs = {'sorting': sorting}
Beispiel #3
0
 def add_sorting(self, *, recording_id: str, label: str,
                 sorting: se.SortingExtractor):
     sorting_id = 'S-' + _random_id()
     if recording_id not in self._recordings:
         raise Exception(f'Recording not found: {recording_id}')
     if sorting_id in self._sortings:
         raise Exception(f'Duplicate sorting ID: {sorting_id}')
     le_recording = self._recordings[recording_id]
     x = {
         'sortingId':
         sorting_id,
         'sortingLabel':
         label,
         'sortingPath':
         kp.store_object(sorting.object(), basename=f'{label}.json'),
         'sortingObject':
         sorting.object(),
         'recordingId':
         recording_id,
         'recordingPath':
         le_recording['recordingPath'],
         'recordingObject':
         le_recording['recordingObject'],
         'description':
         f'Imported from Python: {label}'
     }
     sortings_subfeed = self._feed.get_subfeed(
         dict(workspaceName=self._workspace_name, key='sortings'))
     _import_le_sorting(sortings_subfeed, x)
     self._sortings[sorting_id] = x
     return x
Beispiel #4
0
    def write_sorting(sorting: SortingExtractor, save_path: PathType):
        # if multiple groups, use the NeuroscopeMultiSortingExtactor write function
        if 'group' in sorting.get_shared_unit_property_names():
            NeuroscopeMultiSortingExtractor.write_sorting(sorting, save_path)
        else:
            save_path.mkdir(parents=True, exist_ok=True)

            if save_path.suffix == '':
                sorting_name = save_path.name
            else:
                sorting_name = save_path.stem
            xml_name = sorting_name
            save_xml_filepath = save_path / (str(xml_name) + '.xml')

            # create parameters file if none exists
            if save_xml_filepath.is_file():
                raise FileExistsError(f'{save_xml_filepath} already exists!')

            xml_root = et.Element('xml')
            et.SubElement(xml_root, 'acquisitionSystem')
            et.SubElement(xml_root.find('acquisitionSystem'), 'samplingRate')
            xml_root.find('acquisitionSystem').find('samplingRate').text = str(
                sorting.get_sampling_frequency())
            et.ElementTree(xml_root).write(str(save_xml_filepath.absolute()),
                                           pretty_print=True)

            # Create and save .res and .clu files from the current sorting object
            save_res = save_path / f'{sorting_name}.res'
            save_clu = save_path / f'{sorting_name}.clu'

            res, clu = _extract_res_clu_arrays(sorting)

            np.savetxt(save_res, res, fmt='%i')
            np.savetxt(save_clu, clu, fmt='%i')
    def __init__(self, folder_path, chan_grp=None):
        assert HAVE_TDC, self.installation_mesg
        tdc_folder = Path(folder_path)
        SortingExtractor.__init__(self)

        dataio = tdc.DataIO(str(tdc_folder))
        if chan_grp is None:
            # if chan_grp is not provided, take the first one if unique
            chan_grps = list(dataio.channel_groups.keys())
            assert len(
                chan_grps
            ) == 1, 'There are several groups in the folder, specify chan_grp=...'
            chan_grp = chan_grps[0]

        self.chan_grp = chan_grp

        catalogue = dataio.load_catalogue(name='initial', chan_grp=chan_grp)

        labels = catalogue['clusters']['cluster_label']
        labels = labels[labels >= 0]
        self._unit_ids = list(labels)
        # load all spike in memory (this avoid to lock the folder with memmap throug dataio
        self._all_spikes = dataio.get_spikes(seg_num=0,
                                             chan_grp=self.chan_grp,
                                             i_start=None,
                                             i_stop=None).copy()

        self._sampling_frequency = dataio.sample_rate
        self._kwargs = {
            'folder_path': str(Path(folder_path).absolute()),
            'chan_grp': chan_grp
        }
    def write_sorting(sorting: SortingExtractor, save_path: PathType):
        assert save_path.suffixes == [
            ".spikes",
            ".cellinfo",
            ".mat",
        ], "The save_path must correspond to the CellExplorer format of sorting_id.spikes.cellinfo.mat!"

        base_path = save_path.parent
        sorting_id = save_path.name.split(".")[0]
        session_info_save_path = base_path / f"{sorting_id}.sessionInfo.mat"
        spikes_save_path = save_path
        base_path.mkdir(parents=True, exist_ok=True)

        sampling_frequency = sorting.get_sampling_frequency()
        session_info_mat_dict = dict(sessionInfo=dict(rates=dict(
            wideband=sampling_frequency)))

        scipy.io.savemat(file_name=session_info_save_path,
                         mdict=session_info_mat_dict)

        spikes_mat_dict = dict(spikes=dict(
            UID=sorting.get_unit_ids(),
            times=[[[y / sampling_frequency] for y in x]
                   for x in sorting.get_units_spike_train()],
        ))
        # If, in the future, it is ever desired to allow this to write unit properties, they must conform
        # to the format here: https://cellexplorer.org/datastructure/data-structure-and-format/
        scipy.io.savemat(file_name=spikes_save_path, mdict=spikes_mat_dict)
    def __init__(self, exdir_file, sample_rate=None):
        exdir, pq = _load_required_modules()

        SortingExtractor.__init__(self)
        self._exdir_file = exdir_file
        exdir_group = exdir.File(exdir_file, plugins=exdir.plugins.quantities)

        if 'acquisition' in exdir_group.keys():
            if 'timeseries' in exdir_group['acquisition'].keys():
                sample_rate = exdir_group['acquisition']['timeseries'].attrs['sample_rate']
        else:
            if sample_rate is None:
                raise Exception("Provide 'sample_rate' argument (Hz)")
            else:
                sample_rate = sample_rate * pq.Hz

        electrophysiology = exdir_group['processing']['electrophysiology']
        self._unit_ids = []
        current_unit = 1
        self._spike_trains = []
        for chan_name, channel in electrophysiology.items():
            group = int(chan_name.split('_')[-1])
            for units, unit_times in channel['UnitTimes'].items():
                self._unit_ids.append(current_unit)
                self._spike_trains.append((unit_times['times'].data.rescale('s')*sample_rate).magnitude)
                self.setUnitProperty(current_unit, 'group', group)
                current_unit += 1
def import_sorting(*, feed: kp.Feed, workspace_name: str,
                   recording: se.RecordingExtractor,
                   sorting: se.SortingExtractor, recording_id: str,
                   sorting_label: str):
    sorting_id = 'S-' + random_id()
    x = {
        'sortingId':
        sorting_id,
        'sortingLabel':
        sorting_label,
        'sortingPath':
        ka.store_object(sorting.object(), basename=f'{sorting_label}.json'),
        'sortingObject':
        sorting.object(),
        'recordingId':
        recording_id,
        'recordingPath':
        ka.store_object(recording.object(), basename=f'{recording_id}.json'),
        'recordingObject':
        recording.object(),
        'description':
        f'Imported from Python: {sorting_label}'
    }
    sortings_subfeed = feed.get_subfeed(
        dict(workspaceName=workspace_name, key='sortings'))
    _import_le_sorting(sortings_subfeed, x)
    return x
    def __init__(self, spikes_matfile_path: PathType):
        SortingExtractor.__init__(self)

        spikes_matfile_path = Path(spikes_matfile_path)
        assert spikes_matfile_path.is_file(
        ), f"The spikes_matfile_path ({spikes_matfile_path}) must exist!"
        folder_path = spikes_matfile_path.parent
        sorting_id = spikes_matfile_path.name.split(".")[0]

        session_info_matfile_path = folder_path / f"{sorting_id}.sessionInfo.mat"
        assert session_info_matfile_path.is_file(
        ), "No sessionInfo.mat file found in the folder!"
        session_info_mat = loadmat(
            file_name=str(session_info_matfile_path.absolute()))
        assert session_info_mat['sessionInfo']['rates'][0][0]['wideband'], "The sesssionInfo.mat file must contain " \
            "a 'sessionInfo' struct with field 'rates' containing field 'wideband'!"
        self._sampling_frequency = float(
            session_info_mat['sessionInfo']['rates'][0][0]['wideband'][0][0][0]
            [0]
        )  # careful not to confuse it with the lfpsamplingrate; reported in units Hz

        spikes_mat = loadmat(file_name=str(spikes_matfile_path.absolute()))
        assert np.all(np.isin(['UID', 'times'], spikes_mat['spikes'].dtype.names)), \
            "The spikes.cellinfo.mat file must contain a 'spikes' struct with fields 'UID' and 'times'!"

        self._unit_ids = np.asarray(spikes_mat['spikes']['UID'][0][0][0],
                                    dtype=int)
        # CellExplorer reports spike times in units seconds; SpikeExtractors uses time units of sampling frames
        # Rounding is necessary to prevent data loss from int-casting floating point errors
        self._spiketrains = [(np.array([y[0] for y in x]) *
                              self._sampling_frequency).round().astype(int)
                             for x in spikes_mat['spikes']['times'][0][0][0]]

        self._kwargs = dict(
            spikes_matfile_path=str(spikes_matfile_path.absolute()))
    def __init__(self, parent_sorting, curation_steps=None):
        SortingExtractor.__init__(self)
        self._parent_sorting = parent_sorting
        self._original_unit_ids = list(np.copy(parent_sorting.get_unit_ids()))
        self._all_ids = list(np.copy(parent_sorting.get_unit_ids()))
        self._sampling_frequency = parent_sorting.get_sampling_frequency()
        self.set_tmp_folder(parent_sorting.get_tmp_folder())

        # Create and store roots with original unit ids and cached spiketrains
        self._roots = []
        for unit_id in self._original_unit_ids:
            root = Unit(unit_id)
            root.set_spike_train(parent_sorting.get_unit_spike_train(unit_id))
            self._roots.append(root)
        '''
        Copies over properties and spike features from parent_sorting.
        Only spike features will be preserved with merges and splits, properties
        cannot be resolved in these cases.
        '''
        self.copy_unit_properties(parent_sorting)
        self.copy_unit_spike_features(parent_sorting)
        self.copy_epochs(parent_sorting)
        self.copy_times(parent_sorting)

        self.curation_steps = curation_steps
        self._kwargs = {
            'parent_sorting': parent_sorting.make_serialized_dict(),
            'curation_steps': self.curation_steps
        }

        self.curation_steps = []
        if curation_steps is not None:
            assert isinstance(
                curation_steps, list
            ), "previous_curation_steps must be a list of previous curation commands"
            for i, curation_step in enumerate(curation_steps):
                command, arguments = curation_step
                if command == 'exclude_units':
                    assert len(
                        arguments
                    ) == 1, "Length of arguments must be 1 for exclude_units"
                    unit_ids = arguments[0]
                    self.exclude_units(unit_ids=unit_ids)
                elif command == 'merge_units':
                    assert len(
                        arguments
                    ) == 1, "Length of arguments must be 1 for merge_units"
                    unit_ids = arguments[0]
                    self.merge_units(unit_ids=unit_ids)
                elif command == 'split_unit':
                    assert len(
                        arguments
                    ) == 2, "Length of arguments must be 2 for split_unit"
                    unit_id = arguments[0]
                    indices = arguments[1]

                    self.split_unit(unit_id=unit_id, indices=indices)
                else:
                    raise ValueError(
                        "{} is not a valid curation command".format(command))
 def __init__(self, recording_file):
     SortingExtractor.__init__(self)
     self._recording_file = recording_file
     self._rf = h5py.File(self._recording_file, mode='r')
     self._unit_ids = set(self._rf['cluster_id'].value)
     if 'centres' in self._rf.keys():
         self._unit_locs = self._rf[
             'centres'].value  # cache for faster access
    def __init__(self, firings_file):
        SortingExtractor.__init__(self)
        self._firings_path = firings_file

        self._firings = readmda(self._firings_path)
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
 def __init__(self, recording_path):
     SortingExtractor.__init__(self)
     self._recording_path = recording_path
     self._num_units = None
     self._spike_trains = None
     self._unit_ids = None
     self._fs = None
     self._initialize()
Beispiel #14
0
    def __init__(self, h5_path):
        SortingExtractor.__init__(self)
        self._h5_path = h5_path
        self._loaded_spike_trains = {}

        with h5py.File(self._h5_path, 'r') as f:
            self._unit_ids = np.array(f.get('unit_ids'))
            self._sampling_frequency = np.array(f.get('sampling_frequency'))[0]
Beispiel #15
0
 def __init__(self, recording_file, *, experiment_id=0, recording_id=0):
     assert HAVE_OE, "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n"
     SortingExtractor.__init__(self)
     self._recording_file = recording_file
     self._recording = pyopenephys.File(
         recording_file).experiments[experiment_id].recordings[recording_id]
     self._spiketrains = self._recording.spiketrains
     self._unit_ids = list(
         [np.unique(st.clusters)[0] for st in self._spiketrains])
Beispiel #16
0
 def __init__(self, file_path):
     assert self.installed, self.installation_mesg
     SortingExtractor.__init__(self)
     self._file_path = file_path
     self._filehandle = None
     self._mapping = None
     self._version = None
     self._initialize()
     self._kwargs = {'file_path': str(Path(file_path).absolute())}
 def __init__(self, file_path):
     SortingExtractor.__init__(self)
     self._recording_path = file_path
     self._num_units = None
     self._spike_trains = None
     self._unit_ids = None
     self._fs = None
     self._initialize()
     self._kwargs = {'file_path': str(Path(file_path).absolute())}
Beispiel #18
0
 def __init__(self, file_path):
     SortingExtractor.__init__(self)
     self._file = nix.File.open(file_path, nix.FileMode.ReadOnly)
     md = self._file.sections
     if "sampling_frequency" in md:
         sfreq = md["sampling_frequency"]
         self._sampling_frequency = sfreq
     self._load_properties()
     self._kwargs = {'file_path': str(Path(file_path).absolute())}
Beispiel #19
0
 def __init__(self, recording_path):
     neo, pq, h5py, yaml = _load_required_modules()
     SortingExtractor.__init__(self)
     self._recording_path = recording_path
     self._num_units = None
     self._spike_trains = None
     self._unit_ids = None
     self._fs = None
     self._initialize()
    def __init__(self, kwik_file_or_folder):
        assert HAVE_KLSX, "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n"
        SortingExtractor.__init__(self)
        kwik_file_or_folder = Path(kwik_file_or_folder)
        kwikfile = None
        klustafolder = None
        if kwik_file_or_folder.is_file():
            assert kwik_file_or_folder.suffix == '.kwik', "Not a '.kwik' file"
            kwikfile = Path(kwik_file_or_folder).absolute()
            klustafolder = kwikfile.parent
        elif kwik_file_or_folder.is_dir():
            klustafolder = kwik_file_or_folder
            kwikfiles = [f for f in kwik_file_or_folder.iterdir() if f.suffix == '.kwik']
            if len(kwikfiles) == 1:
                kwikfile = kwikfiles[0]
        assert kwikfile is not None, "Could not load '.kwik' file"

        try:
            config_file = [f for f in klustafolder.iterdir() if f.suffix == '.prm'][0]
            config = read_python(str(config_file))
            sample_rate = config['traces']['sample_rate']
            self._sampling_frequency = sample_rate
        except Exception as e:
            print("Could not load sampling frequency info")

        F = h5py.File(kwikfile)
        channel_groups = F.get('channel_groups')
        self._spiketrains = []
        self._unit_ids = []
        unique_units = []
        klusta_units = []
        groups = []
        unit = 0
        for cgroup in channel_groups:
            group_id = int(cgroup)
            try:
                cluster_ids = channel_groups[cgroup]['clusters']['main']
            except Exception as e:
                print('Unable to extract clusters from', kwikfile)
                continue
            for cluster_id in channel_groups[cgroup]['clusters']['main']:
                clusters = np.array(channel_groups[cgroup]['spikes']['clusters']['main'])
                idx = np.nonzero(clusters == int(cluster_id))
                st = np.array(channel_groups[cgroup]['spikes']['time_samples'])[idx]
                self._spiketrains.append(st)
                klusta_units.append(int(cluster_id))
                unique_units.append(unit)
                unit += 1
                groups.append(group_id)
        if len(np.unique(klusta_units)) == len(np.unique(unique_units)):
            self._unit_ids = klusta_units
        else:
            print('Klusta units are not unique! Using unique unit ids')
            self._unit_ids = unique_units
        for i, u in enumerate(self._unit_ids):
            self.set_unit_property(u, 'group', groups[i])
Beispiel #21
0
    def __init__(self, file_path, delimiter=','):
        SortingExtractor.__init__(self)

        if os.path.isfile(file_path):
            self._spike_clusters = sbio.SpikeClusters()
            self._spike_clusters.fromCSV(file_path, None, delimiter=delimiter)
        else:
            raise FileNotFoundError(
                'the ground truth file "{}" could not be found'.format(
                    file_path))
    def __init__(self, file_path, delimiter=','):
        assert HAVE_SBEX, self.installation_mesg
        SortingExtractor.__init__(self)

        if os.path.isfile(file_path):
            self._spike_clusters = sbio.SpikeClusters()
            self._spike_clusters.fromCSV(file_path, None, delimiter=delimiter)
        else:
            raise FileNotFoundError('the ground truth file "{}" could not be found'.format(file_path))
        self._kwargs = {'file_path': str(Path(file_path).absolute()), 'delimiter': delimiter}
 def __init__(self, file_path, *, experiment_id=0, recording_id=0):
     assert HAVE_OE, "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n"
     SortingExtractor.__init__(self)
     self._recording_file = file_path
     self._recording = pyopenephys.File(
         file_path).experiments[experiment_id].recordings[recording_id]
     self._spiketrains = self._recording.spiketrains
     self._unit_ids = list(
         [np.unique(st.clusters)[0] for st in self._spiketrains])
     self._sampling_frequency = float(
         self._recording.sample_rate.rescale('Hz').magnitude)
    def __init__(self, folder_path, *, experiment_id=0, recording_id=0):
        assert HAVE_OE, self.installation_mesg
        SortingExtractor.__init__(self)
        self._recording_file = folder_path
        self._recording = pyopenephys.File(folder_path).experiments[experiment_id].recordings[recording_id]
        self._spiketrains = self._recording.spiketrains
        self._unit_ids = list([np.unique(st.clusters)[0] for st in self._spiketrains])
        self._sampling_frequency = float(self._recording.sample_rate.rescale('Hz').magnitude)

        self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'experiment_id': experiment_id,
                        'recording_id': recording_id}
Beispiel #25
0
    def __init__(self, firings_file, samplerate):
        SortingExtractor.__init__(self)
        self._firings_path = ka.load_file(firings_file)
        if not self._firings_path:
            raise Exception('Unable to load firings file: ' + firings_file)

        self._firings = readmda(self._firings_path)
        self._sampling_frequency = samplerate
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
Beispiel #26
0
    def __init__(self, folder_path, sampling_frequency=None, channel_group=None, load_waveforms=False):
        assert HAVE_EXDIR, "To use the ExdirExtractors run:\n\n pip install exdir\n\n"
        SortingExtractor.__init__(self)
        self._exdir_file = folder_path
        exdir_group = exdir.File(folder_path, plugins=exdir.plugins.quantities)

        electrophysiology = None
        sf = copy(sampling_frequency)
        if 'processing' in exdir_group.keys():
            if 'electrophysiology' in exdir_group['processing']:
                electrophysiology = exdir_group['processing']['electrophysiology']
                ephys_attrs = electrophysiology.attrs
                if 'sample_rate' in ephys_attrs:
                    sf = ephys_attrs['sample_rate']
        else:
            if sf is None:
                raise Exception("Sampling rate information not found. Please provide it with the 'sampling_frequency' "
                                "argument")
            else:
                sf = sf * pq.Hz
        self._sampling_frequency = float(sf.rescale('Hz').magnitude)

        if electrophysiology is None:
            raise Exception("'electrophysiology' group not found!")

        self._unit_ids = []
        current_unit = 1
        self._spike_trains = []
        for chan_name, channel in electrophysiology.items():
            if 'channel' in chan_name:
                group = int(chan_name.split('_')[-1])
                if channel_group is not None:
                    if group != channel_group:
                        continue
                if load_waveforms:
                    if 'Clustering' in channel.keys() and 'EventWaveform' in channel.keys():
                        clustering = channel.require_group('Clustering')
                        eventwaveform = channel.require_group('EventWaveform')
                        nums = clustering['nums'].data
                        waveforms = eventwaveform.require_group('waveform_timeseries')['data'].data
                if 'UnitTimes' in channel.keys():
                    for unit, unit_times in channel['UnitTimes'].items():
                        self._unit_ids.append(current_unit)
                        self._spike_trains.append((unit_times['times'].data.rescale('s') * sf).magnitude)
                        attrs = unit_times.attrs
                        for k, v in attrs.items():
                            self.set_unit_property(current_unit, k, v)
                        if load_waveforms:
                            unit_idxs = np.where(nums == int(unit))
                            wf = waveforms[unit_idxs]
                            self.set_unit_spike_features(current_unit, 'waveforms', wf)
                        current_unit += 1
        self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'sampling_frequency': sampling_frequency,
                        'channel_group': channel_group, 'load_waveforms': load_waveforms}
Beispiel #27
0
    def __init__(self, folder_path):
        assert HAVE_SCSX, self.installation_mesg
        SortingExtractor.__init__(self)
        spykingcircus_folder = Path(folder_path)
        listfiles = spykingcircus_folder.iterdir()
        results = None
        sample_rate = None

        parent_folder = None
        result_folder = None
        for f in listfiles:
            if f.is_dir():
                if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                    parent_folder = spykingcircus_folder
                    result_folder = f

        if parent_folder is None:
            parent_folder = spykingcircus_folder.parent
            for f in parent_folder.iterdir():
                if f.is_dir():
                    if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                        result_folder = spykingcircus_folder

        assert isinstance(parent_folder, Path) and isinstance(
            result_folder, Path), "Not a valid spyking circus folder"

        # load files
        for f in result_folder.iterdir():
            if 'result.hdf5' in str(f):
                results = f
            if 'result-merged.hdf5' in str(f):
                results = f
                break

        # load params
        for f in parent_folder.iterdir():
            if f.suffix == '.params':
                sample_rate = _load_sample_rate(f)

        if sample_rate is not None:
            self._sampling_frequency = sample_rate

        if results is None:
            raise Exception(spykingcircus_folder,
                            " is not a spyking circus folder")
        f_results = h5py.File(results, 'r')
        self._spiketrains = []
        self._unit_ids = []
        for temp in f_results['spiketimes'].keys():
            self._spiketrains.append(
                np.array(f_results['spiketimes'][temp]).astype('int64'))
            self._unit_ids.append(int(temp.split('_')[-1]))

        self._kwargs = {'folder_path': str(Path(folder_path).absolute())}
Beispiel #28
0
 def __init__(self, file_path, well_name='well000', rec_name='rec0000'):
     assert self.installed, self.installation_mesg
     SortingExtractor.__init__(self)
     self._file_path = file_path
     self._well_name = well_name
     self._rec_name = rec_name
     self._filehandle = None
     self._mapping = None
     self._version = None
     self._initialize()
     self._sampling_frequency = self._fs
     self._kwargs = {'file_path': str(Path(file_path).absolute()), 'well_name': well_name, 'rec_name': rec_name}
    def __init__(self, tdc_folder, chan_grp=None):
        assert HAVE_TDC, "must install tridesclous"
        tdc_folder = Path(tdc_folder)
        SortingExtractor.__init__(self)
        self.dataio = tdc.DataIO(str(tdc_folder))
        if chan_grp is None:
            # if chan_grp is not provided, take the first one if unique
            chan_grps = list(self.dataio.channel_groups.keys())
            assert len(chan_grps) == 1, 'There are several in the folder chan_grp, specify it'
            chan_grp = chan_grps[0]

        self.chan_grp = chan_grp
        self.catalogue = self.dataio.load_catalogue(name='initial', chan_grp=chan_grp)
    def __init__(self, spykingcircus_folder):
        assert HAVE_SCSX, "To use the SpykingCircusSortingExtractor install h5py: \n\n pip install h5py\n\n"
        SortingExtractor.__init__(self)
        spykingcircus_folder = Path(spykingcircus_folder)
        listfiles = spykingcircus_folder.iterdir()
        results = None
        sample_rate = None

        parent_folder = None
        result_folder = None
        for f in listfiles:
            if f.is_dir():
                if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                    parent_folder = spykingcircus_folder
                    result_folder = f

        if parent_folder is None:
            parent_folder = spykingcircus_folder.parent
            for f in parent_folder.iterdir():
                if f.is_dir():
                    if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
                        result_folder = spykingcircus_folder

        assert isinstance(parent_folder, Path) and isinstance(
            result_folder, Path), "Not a valid spyking circus folder"

        # load files
        for f in result_folder.iterdir():
            if 'result.hdf5' in str(f):
                results = f
            if 'result-merged.hdf5' in str(f):
                results = f
                break

        # load params
        for f in parent_folder.iterdir():
            if f.suffix == '.params':
                sample_rate = _load_sample_rate(f)

        if sample_rate is not None:
            self._sampling_frequency = sample_rate

        if results is None:
            raise Exception(spykingcircus_folder,
                            " is not a spyking circus folder")
        f_results = h5py.File(results)
        self._spiketrains = []
        self._unit_ids = []
        for temp in f_results['spiketimes'].keys():
            self._spiketrains.append(f_results['spiketimes'][temp])
            self._unit_ids.append(int(temp.split('_')[-1]))