Beispiel #1
0
                          load_if_exists=True,
                          ms_before=1,
                          ms_after=2.,
                          max_spikes_per_unit=500,
                          n_jobs=1,
                          chunk_size=30000)
print(we)

##############################################################################
# The :code:`spikeinterface.toolkit.qualitymetrics` submodule has a set of functions that allow users to compute
# metrics in a compact and easy way. To compute a single metric, one can simply run one of the
# quality metric functions as shown below. Each function has a variety of adjustable parameters that can be tuned.

firing_rates = st.compute_firing_rate(we)
print(firing_rates)
isi_violation_ratio, isi_violations_rate, isi_violations_count = st.compute_isi_violations(
    we)
print(isi_violation_ratio)
snrs = st.compute_snrs(we)
print(snrs)

##############################################################################
# Some metrics are based on the principal component scores, so they require a
# :code:`WaveformsPrincipalComponent` object as input:

pc = st.compute_principal_components(we,
                                     load_if_exists=True,
                                     n_components=3,
                                     mode='by_channel_local')
print(pc)

pc_metrics = st.calculate_pc_metrics(pc, metric_names=['nearest_neighbor'])
Beispiel #2
0
##############################################################################
# Then you can run the template-gui with: :code:`phy template-gui phy/params.py`
# and manually curate the results.


##############################################################################
# Quality metrics for the spike sorting output are very important to asses the spike sorting performance.
# The :code:`spikeinterface.toolkit.qualitymetrics` module implements several quality metrics
# to assess the goodness of sorted units. Among those, for example,
# are signal-to-noise ratio, ISI violation ratio, isolation distance, and many more.
# Theses metrics are built on top of WaveformExtractor class and return a dictionary with the unit ids as keys:

snrs = st.compute_snrs(we_TDC)
print(snrs)
isi_violations_rate, isi_violations_count = st.compute_isi_violations(we_TDC, isi_threshold_ms=1.5)
print(isi_violations_rate)
print(isi_violations_count)

##############################################################################
# All theses quality mertics can be computed in one shot and returned as
# a :code:`pandas.Dataframe`

metrics = st.compute_quality_metrics(we_TDC, metric_names=['snr', 'isi_violation', 'amplitude_cutoff'])
print(metrics)

##############################################################################
# Quality metrics can be also used to automatically curate the spike sorting
# output. For example, you can select sorted units with a SNR above a 
# certain threshold:
Beispiel #3
0
def _do_recovery_loop(task_args):

    key, well_detected_score, isi_thr, fr_thr, sample_window_ms, \
    percentage_spikes, balance_spikes, detect_threshold, method, skew_thr, n_jobs, we_params, compare, \
    output_folder, job_kwargs = task_args
    recording = load_extractor(output_folder / 'back_recording' / key[1] /
                               key[0])
    if compare is True:
        gt = load_extractor(output_folder / 'back_recording' / key[1] /
                            (key[0] + '_gt'))
    else:
        gt = None
    sorting = load_extractor(output_folder / 'back_recording' / key[0] /
                             (key[1] + '_pre'))
    we = extract_waveforms(
        recording,
        sorting,
        folder=output_folder / 'waveforms' / key[0] / key[1],
        load_if_exists=we_params['load_if_exists'],
        ms_before=we_params['ms_before'],
        ms_after=we_params['ms_after'],
        max_spikes_per_unit=we_params['max_spikes_per_unit'],
        return_scaled=we_params['return_scaled'],
        dtype=we_params['dtype'],
        overwrite=True,
        **job_kwargs)
    if gt is not None:
        comparison = sc.compare_sorter_to_ground_truth(tested_sorting=sorting,
                                                       gt_sorting=gt)
        selected_units = comparison.get_well_detected_units(
            well_detected_score)
        print(key[1][:-1])
        if key[1] == 'hdsort':
            selected_units = [unit - 1000 for unit in selected_units]
    else:
        isi_violation = st.compute_isi_violations(we)[0]
        good_isi = np.argwhere(
            np.array(list(isi_violation.values())) < isi_thr)[:, 0]

        firing_rate = st.compute_firing_rate(we)
        good_fr_idx_up = np.argwhere(
            np.array(list(firing_rate.values())) < fr_thr[1])[:, 0]
        good_fr_idx_down = np.argwhere(
            np.array(list(firing_rate.values())) > fr_thr[0])[:, 0]

        selected_units = [
            unit for unit in range(sorting.get_num_units())
            if unit in good_fr_idx_up and unit in good_fr_idx_down
            and unit in good_isi
        ]

    templates = we.get_all_templates()
    templates_dict = {
        str(unit): templates[unit - 1]
        for unit in selected_units
    }

    recording_subtracted = subtract_templates(recording, sorting,
                                              templates_dict, we.nbefore,
                                              selected_units)

    sorter = SpyICASorter(recording_subtracted)
    sorter.mask_traces(sample_window_ms=sample_window_ms,
                       percent_spikes=percentage_spikes,
                       balance_spikes_on_channel=balance_spikes,
                       detect_threshold=detect_threshold,
                       method=method,
                       **job_kwargs)
    sorter.compute_ica(n_comp='all')
    cleaning_result = clean_correlated_sources(
        recording,
        sorter.W_ica,
        skew_thresh=skew_thr,
        n_jobs=n_jobs,
        chunk_size=recording.get_num_samples(0) // n_jobs,
        **job_kwargs)
    sorter.A_ica[cleaning_result[1]] = -sorter.A_ica[cleaning_result[1]]
    sorter.W_ica[cleaning_result[1]] = -sorter.W_ica[cleaning_result[1]]
    sorter.source_idx = cleaning_result[0]
    sorter.cleaned_A_ica = sorter.A_ica[cleaning_result[0]]
    sorter.cleaned_W_ica = sorter.W_ica[cleaning_result[0]]

    ica_recording = st.preprocessing.lin_map(recording_subtracted,
                                             sorter.cleaned_W_ica)
    recording_back = st.preprocessing.lin_map(ica_recording,
                                              sorter.cleaned_A_ica.T)
    recording_back.save_to_folder(folder=output_folder / 'back_recording' /
                                  key[0] / key[1])