示例#1
0
def test_run_sorter_singularity():
    mearec_filename = download_dataset(remote_path='mearec/mearec_test_10s.h5',
                                       unlock=True)
    output_folder = 'sorting_tdc_singularity'

    recording, sorting_true = read_mearec(mearec_filename)

    sorter_params = {'detect_threshold': 4.9}

    singularity_image = 'spikeinterface/tridesclous-base:1.6.4-1'

    sorting = run_sorter('tridesclous',
                         recording,
                         output_folder=output_folder,
                         remove_existing_folder=True,
                         delete_output_folder=False,
                         verbose=True,
                         raise_error=True,
                         singularity_image=singularity_image,
                         **sorter_params)
    print(sorting)

    # basic check to confirm sorting was successful
    assert 'Tridesclous' in sorting.to_dict()['class']
    assert len(sorting.get_unit_ids()) > 0
示例#2
0
def test_run_sorter_local():
    local_path = download_dataset(remote_path='mearec/mearec_test_10s.h5')
    recording, sorting_true = read_mearec(local_path)

    sorter_params = {'detect_threshold': 4.9}

    sorting = run_sorter('tridesclous', recording, output_folder='sorting_tdc_local',
                         remove_existing_folder=True, delete_output_folder=False,
                         verbose=True, raise_error=True, docker_image=None,
                         **sorter_params)
    print(sorting)
    def test_with_run(self):
        # some sorter (TDC, KS, KS2, ...) work by default with the raw binary
        # format as input to avoid copy when the recording is already this format

        recording = self.recording

        sorter_name = self.SorterClass.sorter_name

        sorter_params = self.SorterClass.default_params()

        sorting = run_sorter(sorter_name, recording, output_folder=None,
                             remove_existing_folder=True, delete_output_folder=False,
                             verbose=False, raise_error=True, **sorter_params)

        del sorting
示例#4
0
def test_run_sorter_docker():
    mearec_filename = download_dataset(remote_path='mearec/mearec_test_10s.h5',
                                       unlock=True)
    output_folder = 'sorting_tdc_docker'

    recording, sorting_true = read_mearec(mearec_filename)

    sorter_params = {'detect_threshold': 4.9}

    docker_image = 'spikeinterface/tridesclous-base:1.6.4-1'

    sorting = run_sorter('tridesclous',
                         recording,
                         output_folder=output_folder,
                         remove_existing_folder=True,
                         delete_output_folder=False,
                         verbose=True,
                         raise_error=True,
                         docker_image=docker_image,
                         with_output=False,
                         **sorter_params)
    assert sorting is None
##############################################################################
# First, let's create the usueal toy example:

recording, sorting_true = se.example_datasets.toy_example(duration=10, seed=0)

##############################################################################
# The launcher enables to call any spike sorter with the same functions:  :code:`run_sorter` and :code:`run_sorters`.
# For running multiple sorters on the same recording extractor or a collection of them, the :code:`run_sorters`
# function can be used.
#
# Let's first see how to run a single sorter, for example, Klusta:

# The sorter name can be now a parameter, e.g. chosen with a command line interface or a GUI
sorter_name = 'klusta'
sorting_KL = ss.run_sorter(sorter_name_or_class='klusta',
                           recording=recording,
                           output_folder='my_sorter_output')
print(sorting_KL.get_unit_ids())

##############################################################################
# This will launch the klusta sorter on the recording object.
#
# You can also run multiple sorters on the same recording:

recording_list = [recording]
sorter_list = ['klusta', 'mountainsort4', 'tridesclous']
sorting_output = ss.run_sorters(sorter_list,
                                recording_list,
                                working_folder='tmp_some_sorters',
                                mode='overwrite')
示例#6
0
# We can load 'group' information using the '.prb' file:

recording_tetrodes = recording_tetrodes.load_probe_file('tetrode_16.prb')
print(recording_tetrodes.get_shared_channel_property_names())

##############################################################################
# We can now use the launcher to spike sort by the property 'group'. The different groups can also be sorted in
# parallel, and the output sorting extractor will have the same property used for sorting. Running in parallel
# (in separate threads) can speed up the computations.
#
# Let's first run the four channel groups sequentially:

t_start = time.time()
sorting_tetrodes = ss.run_sorter('klusta',
                                 recording_tetrodes,
                                 output_folder='tmp_tetrodes',
                                 grouping_property='group',
                                 parallel=False)
print('Elapsed time: ', time.time() - t_start)

##############################################################################
# then in parallel:

t_start = time.time()
sorting_tetrodes_p = ss.run_sorter('klusta',
                                   recording_tetrodes,
                                   output_folder='tmp_tetrodes_par',
                                   grouping_property='group',
                                   parallel=True)
print('Elapsed time parallel: ', time.time() - t_start)
def run(location,
        sorter="klusta",
        output_folder="result",
        verbose=False,
        view=False,
        phy_out_folder="phy",
        remove_last_chan=False,
        do_validate=False,
        do_parallel=False,
        do_plot_waveforms=True,
        transposed=False,
        **sorting_kwargs):
    """
    Run spike interface on a _shuff.bin file.

    if verbose is True prints more information.

    """
    # Do setup
    o_start = time()
    print("Starting the sorting pipeline from bin data on {}".format(
        os.path.basename(location)))
    in_dir = os.path.dirname(location)
    o_dir = os.path.join(in_dir, output_folder)
    print("Writing result to {}".format(o_dir))
    probe_loc = os.path.join(o_dir, "channel_map.prb")

    # Load the recording data
    start_time = time()
    if transposed:
        time_axis = 0
    else:
        time_axis = 1
    recording = se.BinDatRecordingExtractor(file_path=location,
                                            offset=0,
                                            dtype=np.int16,
                                            sampling_frequency=48000,
                                            numchan=64,
                                            time_axis=time_axis)
    recording_prb = recording.load_probe_file(probe_loc)
    get_info(recording, probe_loc)
    plot_trace(recording_prb, o_dir)

    # Do the pre-processing pipeline
    print("Running preprocessing")
    preproc_recording = st.preprocessing.bandpass_filter(recording_prb,
                                                         freq_min=300,
                                                         freq_max=6000)
    if remove_last_chan:
        bad_chans = [
            i for i in range(3, 64, 4)
            if i in preproc_recording.get_channel_ids()
        ]
        print("Removing {}".format(bad_chans))
        preproc_recording = st.preprocessing.remove_bad_channels(
            preproc_recording, bad_channel_ids=bad_chans)
        print('Channel ids after preprocess:',
              preproc_recording.get_channel_ids())
        print('Channel groups after preprocess:',
              preproc_recording.get_channel_groups())
        chans_per_tet = 3
    else:
        chans_per_tet = 4

    # Get sorting params and run the sorting
    params = custom_default_params_list(sorter, check=False)
    for k, v in sorting_kwargs.items():
        params[k] = v
    print("Loaded and preprocessed data in {:.2f}s".format(time() -
                                                           start_time))
    start_time = time()
    print("Running {} with parameters {}".format(sorter, params))
    sorted_s = ss.run_sorter(sorter,
                             preproc_recording,
                             grouping_property="group",
                             output_folder=o_dir,
                             parallel=do_parallel,
                             verbose=verbose,
                             **params)
    print("Sorted in {:.2f}mins".format((time() - start_time) / 60.0))

    # Some validation statistics
    if do_validate:
        print("Spike sorting completed, running validation")
        start_time = time()
        sorting_curated_snr = validation_fn(recording, sorted_s)
        print("Validated in {:.2f}mins".format((time() - start_time) / 60.0))
    else:
        sorting_curated_snr = sorted_s

    # Export the result to phy for manual curation
    if (len(sorting_curated_snr.get_unit_ids()) == 0):
        print("Found no units in sorting, quitting now")
        return

    start_time = time()
    phy_out = os.path.join(in_dir, phy_out_folder)
    print("Exporting to phy")
    st.postprocessing.export_to_phy(recording,
                                    sorting_curated_snr,
                                    output_folder=phy_out,
                                    grouping_property='group',
                                    verbose=verbose,
                                    ms_before=0.2,
                                    ms_after=0.8,
                                    dtype=None,
                                    max_channels_per_template=8,
                                    max_spikes_for_pca=5000)
    print("Exported in {:.2f}s".format(time() - start_time))
    pipeline_time = (time() - o_start) / 60.0
    print("Whole pipeline took {:.2f}mins".format(pipeline_time))

    do_plot = False
    print("Showing some extra information")
    start_time = time()
    unit_ids = sorting_curated_snr.get_unit_ids()
    print("Found", len(unit_ids), 'units')
    if do_plot_waveforms:
        print("Plotting waveforms (can set this off in config)")
        plot_all_forms(sorting_curated_snr,
                       recording_prb,
                       o_dir,
                       channels_per_group=chans_per_tet)
        print("Summarised recording in {:.2f}mins".format(
            (time() - start_time) / 60.0))

    phy_final = os.path.join(phy_out, "params.py")
    if view:
        subprocess.run(["phy", "template-gui", phy_final])
    else:
        print("To view the data in phy, run: phy template-gui {}".format(
            phy_final))
示例#8
0
# Lets cache this recording to make it "dumpable"

recording = recording.save(name='toy')
print(recording)

##############################################################################
# The launcher enables to call any spike sorter with the same functions:  :code:`run_sorter` and :code:`run_sorters`.
# For running multiple sorters on the same recording extractor or a collection of them, the :code:`run_sorters`
# function can be used.
#
# Let's first see how to run a single sorter, for example, Klusta:

# The sorter name can be now a parameter, e.g. chosen with a command line interface or a GUI
sorter_name = 'herdingspikes'
sorting_HS = ss.run_sorter(sorter_name='herdingspikes',
                           recording=recording,
                           output_folder='my_sorter_output',
                           clustering_bandwidth=8)
print(sorting_HS.get_unit_ids())

##############################################################################
#
# You can also run multiple sorters on the same recording:

recordings = {'toy': recording}
sorter_list = ['herdingspikes', 'tridesclous']
sorter_params = {'herdingspikes': {'clustering_bandwidth': 8}}
sorting_output = ss.run_sorters(sorter_list,
                                recordings,
                                working_folder='tmp_some_sorters',
                                mode_if_folder_exists='overwrite',
                                sorter_params=sorter_params)
示例#9
0
##############################################################################
# Lets cache this recording to make it "dumpable"

recording = recording.save(name='toy')
print(recording)

##############################################################################
# The launcher enables to call any spike sorter with the same functions:  :code:`run_sorter` and :code:`run_sorters`.
# For running multiple sorters on the same recording extractor or a collection of them, the :code:`run_sorters`
# function can be used.
#
# Let's first see how to run a single sorter, for example, Klusta:

# The sorter name can be now a parameter, e.g. chosen with a command line interface or a GUI
sorter_name = 'herdingspikes'
sorting_HS = ss.run_sorter(sorter_name='herdingspikes', recording=recording, output_folder='my_sorter_output')
print(sorting_HS.get_unit_ids())

##############################################################################
#
# You can also run multiple sorters on the same recording:

recordings = {'toy' : recording }
sorter_list = ['herdingspikes', 'tridesclous']
sorting_output = ss.run_sorters(sorter_list, recordings, working_folder='tmp_some_sorters', mode_if_folder_exists='overwrite')

##############################################################################
# The 'mode' argument allows to 'overwrite' the 'working_folder' (if existing), 'raise' and Exception, or 'keep' the
# folder and skip the spike sorting run.
#
# To 'sorting_output' is a dictionary that has (recording, sorter) pairs as keys and the correspondent