Beispiel #1
0
def test_run_sorters_multiprocessing():
    recording_dict = {}
    for i in range(8):
        rec, _ = se.example_datasets.toy_example(num_channels=8,
                                                 duration=30,
                                                 seed=0,
                                                 dumpable=True)
        recording_dict['rec_' + str(i)] = rec

    # sorter_list = ['mountainsort4', 'klusta', 'tridesclous']
    sorter_list = [
        'tridesclous',
        'klusta',
    ]
    # ~ sorter_list = ['tridesclous', 'herdingspikes']

    working_folder = 'test_run_sorters_mp'
    if os.path.exists(working_folder):
        shutil.rmtree(working_folder)

    # multiprocessing
    t0 = time.perf_counter()
    run_sorters(sorter_list,
                recording_dict,
                working_folder,
                engine='multiprocessing',
                engine_kwargs={'processes': 4})
    t1 = time.perf_counter()
    print(t1 - t0)
Beispiel #2
0
def run_study_sorters(study_folder,
                      sorter_list,
                      sorter_params={},
                      mode='keep',
                      engine='loop',
                      engine_kargs={},
                      verbose=False):
    """
    Run all sorter on all recordings.


    Wrapper on top of st.sorter.run_sorters(...)


    Parameters
    ----------
    study_folder: str
        The study folder.

    sorter_params: dict of dict with sorter_name as key
        This allow to overwritte default params for sorter.

    mode: 'raise_if_exists' or 'overwrite' or 'keep'
        The mode when the subfolder of recording/sorter already exists.
            * 'raise' : raise error if subfolder exists
            * 'overwrite' : force recompute
            * 'keep' : do not compute again if f=subfolder exists and log is OK

    engine: str
        'loop' or 'multiprocessing'

    engine_kargs: dict
        This contains kargs specific to the launcher engine:
            * 'loop' : no kargs
            * 'multiprocessing' : {'processes' : } number of processes


    """
    study_folder = Path(study_folder)
    sorter_folders = study_folder / 'sorter_folders'

    recording_dict = get_recordings(study_folder)

    run_sorters(sorter_list,
                recording_dict,
                sorter_folders,
                sorter_params=sorter_params,
                grouping_property=None,
                mode=mode,
                engine=engine,
                engine_kargs=engine_kargs,
                with_output=False,
                verbose=verbose)

    # results are copied so the heavy sorter_folders can be removed
    copy_sortings_to_npz(study_folder)
Beispiel #3
0
def test_run_sorters_with_list():
    rec0, _ = se.example_datasets.toy_example(num_channels=4,
                                              duration=30,
                                              seed=0)
    rec1, _ = se.example_datasets.toy_example(num_channels=8,
                                              duration=30,
                                              seed=0)

    recording_list = [rec0, rec1]
    sorter_list = ['tridesclous']
    working_folder = 'test_run_sorters_list'
    if os.path.exists(working_folder):
        shutil.rmtree(working_folder)
    run_sorters(sorter_list, recording_list, working_folder, verbose=False)
Beispiel #4
0
def test_run_sorters_with_dict():
    rec0, _ = se.example_datasets.toy_example(num_channels=4,
                                              duration=30,
                                              seed=0)
    rec1, _ = se.example_datasets.toy_example(num_channels=8,
                                              duration=30,
                                              seed=0)

    recording_dict = {'toy_tetrode': rec0, 'toy_octotrode': rec1}

    # sorter_list = ['mountainsort4', 'klusta', 'tridesclous']
    # ~ sorter_list = ['tridesclous',  'klusta',]
    # ~ sorter_list = ['tridesclous', 'mountainsort4']
    sorter_list = ['tridesclous', 'herdingspikes']

    working_folder = 'test_run_sorters_dict'
    if os.path.exists(working_folder):
        shutil.rmtree(working_folder)

    sorter_params = {
        'tridesclous': dict(detect_threshold=5.6),
        'herdingspikes': dict(detect_threshold=20.1),
    }

    # simple loop
    t0 = time.perf_counter()
    results = run_sorters(sorter_list,
                          recording_dict,
                          working_folder,
                          sorter_params=sorter_params,
                          engine=None)
    t1 = time.perf_counter()
    print(t1 - t0)
    print(results)

    shutil.rmtree(working_folder + '/toy_tetrode/tridesclous')
    results = run_sorters(sorter_list,
                          recording_dict,
                          working_folder,
                          engine=None,
                          sorter_params=sorter_params,
                          mode='keep')
Beispiel #5
0
def test_run_sorters_dask():
    # create a dask Client for a slurm queue
    from dask.distributed import Client
    from dask_jobqueue import SLURMCluster

    python = '/home/samuel.garcia/.virtualenvs/py36/bin/python3.6'
    cluster = SLURMCluster(
        processes=1,
        cores=1,
        memory="12GB",
        python=python,
        walltime='12:00:00',
    )
    cluster.scale(5)
    client = Client(cluster)

    # create recording
    recording_dict = {}
    for i in range(8):
        rec, _ = se.example_datasets.toy_example(num_channels=8,
                                                 duration=30,
                                                 seed=0,
                                                 dumpable=True)
        recording_dict['rec_' + str(i)] = rec

    # sorter_list = ['mountainsort4', 'klusta', 'tridesclous']
    sorter_list = [
        'tridesclous',
    ]
    # ~ sorter_list = ['tridesclous', 'herdingspikes']

    working_folder = 'test_run_sorters_dask'
    if os.path.exists(working_folder):
        shutil.rmtree(working_folder)

    # dask
    t0 = time.perf_counter()
    results = run_sorters(sorter_list,
                          recording_dict,
                          working_folder,
                          engine='dask',
                          engine_kwargs={'client': client},
                          with_output=True)
    # dask do not return results always None
    assert results is None
    t1 = time.perf_counter()
    print(t1 - t0)
Beispiel #6
0
def run_study_sorters(study_folder,
                      sorter_list,
                      sorter_params={},
                      mode='keep',
                      engine='loop',
                      engine_kwargs={},
                      verbose=False,
                      run_sorter_kwargs={}):
    """
    Run all sorter on all recordings.


    Wrapper on top of st.sorter.run_sorters(...)


    Parameters
    ----------
    study_folder: str
        The study folder.

    sorter_params: dict of dict with sorter_name as key
        This allow to overwritte default params for sorter.

    mode: 'raise_if_exists' or 'overwrite' or 'keep'
        The mode when the subfolder of recording/sorter already exists.
            * 'raise' : raise error if subfolder exists
            * 'overwrite' : force recompute
            * 'keep' : do not compute again if f=subfolder exists and log is OK

    engine: str
        'loop', 'multiprocessing', or 'dask'

    engine_kwargs: dict
        This contains kwargs specific to the launcher engine:
            * 'loop' : no kargs
            * 'multiprocessing' : {'processes' : } number of processes
            * 'dask' : {'client':} the dask client for submiting task

    verbose: bool
        default True

    run_sorter_kwargs: dict
        This contains kwargs specific to run_sorter function:\
            * 'raise_error' :  bool
            * 'parallel' : bool
            * 'n_jobs' : int
            * 'joblib_backend' : 'loky' / 'multiprocessing' / 'threading'

    """
    study_folder = Path(study_folder)
    sorter_folders = study_folder / 'sorter_folders'

    recording_dict = get_recordings(study_folder)

    run_sorters(sorter_list,
                recording_dict,
                sorter_folders,
                sorter_params=sorter_params,
                grouping_property=None,
                mode=mode,
                engine=engine,
                engine_kwargs=engine_kwargs,
                with_output=False,
                verbose=verbose,
                run_sorter_kwargs=run_sorter_kwargs)

    # results are copied so the heavy sorter_folders can be removed
    copy_sortings_to_npz(study_folder)