Esempio n. 1
0
    def _run_from_folder(cls, output_folder, params, verbose):
        import mountainsort4

        recording = load_extractor(output_folder /
                                   'spikeinterface_recording.json')

        # alias to params
        p = params

        samplerate = recording.get_sampling_frequency()

        # Bandpass filter
        if p['filter'] and p['freq_min'] is not None and p[
                'freq_max'] is not None:
            if verbose:
                print('filtering')
            recording = bandpass_filter(recording=recording,
                                        freq_min=p['freq_min'],
                                        freq_max=p['freq_max'])

        # Whiten
        if p['whiten']:
            if verbose:
                print('whitenning')
            recording = whiten(recording=recording)

        print(
            'Mountainsort4 use the OLD spikeextractors mapped with RecordingExtractorOldAPI'
        )
        old_api_recording = RecordingExtractorOldAPI(recording)

        # Check location no more needed done in basesorter
        old_api_sorting = mountainsort4.mountainsort4(
            recording=old_api_recording,
            detect_sign=p['detect_sign'],
            adjacency_radius=p['adjacency_radius'],
            clip_size=p['clip_size'],
            detect_threshold=p['detect_threshold'],
            detect_interval=p['detect_interval'],
            num_workers=p['num_workers'],
            verbose=verbose)

        # Curate
        if p['noise_overlap_threshold'] is not None and p['curation'] is True:
            if verbose:
                print('Curating')
            old_api_sorting = mountainsort4.mountainsort4_curation(
                recording=old_api_recording,
                sorting=old_api_sorting,
                noise_overlap_threshold=p['noise_overlap_threshold'])

        # convert sorting to new API and save it
        unit_ids = old_api_sorting.get_unit_ids()
        units_dict_list = [{
            u: old_api_sorting.get_unit_spike_train(u)
            for u in unit_ids
        }]
        new_api_sorting = NumpySorting.from_dict(units_dict_list, samplerate)
        NpzSortingExtractor.write_sorting(new_api_sorting,
                                          str(output_folder / 'firings.npz'))
Esempio n. 2
0
print(probe)

from probeinterface.plotting import plot_probe

plot_probe(probe)

##############################################################################
# Using the :code:`toolkit`, you can perform preprocessing on the recordings.
# Each pre-processing function also returns a :code:`RecordingExtractor`, 
# which makes it easy to build pipelines. Here, we filter the recording and 
# apply common median reference (CMR).
# All theses preprocessing steps are "lazy". The computation is done on demand when we call
# `recording.get_traces(...)` or when we save the object to disk.

recording_cmr = recording
recording_f = st.bandpass_filter(recording, freq_min=300, freq_max=6000)
print(recording_f)
recording_cmr = st.common_reference(recording_f, reference='global', operator='median')
print(recording_cmr)

# this computes and saves the recording after applying the preprocessing chain
recording_preprocessed = recording_cmr.save(format='binary')
print(recording_preprocessed)

##############################################################################
# Now you are ready to spike sort using the :code:`sorters` module!
# Let's first check which sorters are implemented and which are installed

print('Available sorters', ss.available_sorters())
print('Installed sorters', ss.installed_sorters())
Esempio n. 3
0
    def _run_from_folder(cls, output_folder, params, verbose):
        import herdingspikes as hs

        recording = load_extractor(output_folder /
                                   'spikeinterface_recording.json')

        p = params

        # Bandpass filter
        if p['filter'] and p['freq_min'] is not None and p[
                'freq_max'] is not None:
            recording = st.bandpass_filter(recording=recording,
                                           freq_min=p['freq_min'],
                                           freq_max=p['freq_max'])

        if p['pre_scale']:
            recording = st.normalize_by_quantile(recording=recording,
                                                 scale=p['pre_scale_value'],
                                                 median=0.0,
                                                 q1=0.05,
                                                 q2=0.95)

        print(
            'Herdingspikes use the OLD spikeextractors with RecordingExtractorOldAPI'
        )
        old_api_recording = RecordingExtractorOldAPI(recording)

        # this should have its name changed
        Probe = hs.probe.RecordingExtractor(
            old_api_recording,
            masked_channels=p['probe_masked_channels'],
            inner_radius=p['probe_inner_radius'],
            neighbor_radius=p['probe_neighbor_radius'],
            event_length=p['probe_event_length'],
            peak_jitter=p['probe_peak_jitter'])

        H = hs.HSDetection(Probe,
                           file_directory_name=str(output_folder),
                           left_cutout_time=p['left_cutout_time'],
                           right_cutout_time=p['right_cutout_time'],
                           threshold=p['detect_threshold'],
                           to_localize=True,
                           num_com_centers=p['num_com_centers'],
                           maa=p['maa'],
                           ahpthr=p['ahpthr'],
                           out_file_name=p['out_file_name'],
                           decay_filtering=p['decay_filtering'],
                           save_all=p['save_all'],
                           amp_evaluation_time=p['amp_evaluation_time'],
                           spk_evaluation_time=p['spk_evaluation_time'])

        H.DetectFromRaw(load=True, tInc=int(p['t_inc']))

        sorted_file = str(output_folder / 'HS2_sorted.hdf5')
        if (not H.spikes.empty):
            C = hs.HSClustering(H)
            C.ShapePCA(pca_ncomponents=p['pca_ncomponents'],
                       pca_whiten=p['pca_whiten'])
            C.CombinedClustering(alpha=p['clustering_alpha'],
                                 cluster_subset=p['clustering_subset'],
                                 bandwidth=p['clustering_bandwidth'],
                                 bin_seeding=p['clustering_bin_seeding'],
                                 n_jobs=p['clustering_n_jobs'],
                                 min_bin_freq=p['clustering_min_bin_freq'])
        else:
            C = hs.HSClustering(H)

        if p['filter_duplicates']:
            uids = C.spikes.cl.unique()
            for u in uids:
                s = C.spikes[C.spikes.cl == u].t.diff(
                ) < p['spk_evaluation_time'] / 1000 * Probe.fps
                C.spikes = C.spikes.drop(s.index[s])

        if verbose:
            print('Saving to', sorted_file)
        C.SaveHDF5(sorted_file, sampling=Probe.fps)