def kilosort2(recording, sorting_out): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._kilosort2sorter import Kilosort2Sorter import kachery as ka # TODO: need to think about how to deal with this ka.set_config(fr='default_readonly') recording = AutoRecordingExtractor(dict(path=recording), download=True) # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10) # Sorting print('Sorting...') sorter = Kilosort2Sorter( recording=recording, output_folder='/tmp/tmp_kilosort2_' + _random_string(8), delete_output_folder=True ) sorter.set_params( detect_sign=-1, detect_threshold=5, freq_min=150, pc_per_chan=3 ) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def tridesclous( recording_path, sorting_out ): import spiketoolkit as st import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # Sorting print('Sorting...') output_folder = '/tmp/tmp_tridesclous_' + _random_string(8) os.environ['HS2_PROBE_PATH'] = output_folder # important for when we are in a container sorter = ss.TridesclousSorter( recording=recording, output_folder=output_folder, delete_output_folder=True, verbose=True, ) # num_workers = os.environ.get('NUM_WORKERS', None) # if not num_workers: num_workers='1' # num_workers = int(num_workers) sorter.set_params( ) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def mountainsort4(recording: str, sorting_out: str) -> str: import spiketoolkit as st import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor import kachery as ka # TODO: need to think about how to deal with this ka.set_config(fr='default_readonly') recording = AutoRecordingExtractor(dict(path=recording), download=True) # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10) # Preprocessing print('Preprocessing...') recording = st.preprocessing.bandpass_filter(recording, freq_min=300, freq_max=6000) recording = st.preprocessing.whiten(recording) # Sorting print('Sorting...') sorter = ss.Mountainsort4Sorter(recording=recording, output_folder='/tmp/tmp_mountainsort4_' + _random_string(8), delete_output_folder=True) sorter.set_params(detect_sign=-1, adjacency_radius=50, detect_threshold=4) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def waveclus( recording_path, sorting_out ): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._waveclussorter import WaveclusSorter recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10) # Sorting print('Sorting...') sorter = WaveclusSorter( recording=recording, output_folder='/tmp/tmp_waveclus_' + _random_string(8), delete_output_folder=True ) sorter.set_params( ) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def herdingspikes2(recording_path, sorting_out, filter=True, pre_scale=True, pre_scale_value=20): import spiketoolkit as st import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # Sorting print('Sorting...') output_folder = '/tmp/tmp_herdingspikes2_' + _random_string(8) os.environ[ 'HS2_PROBE_PATH'] = output_folder # important for when we are in a container sorter = ss.HerdingspikesSorter(recording=recording, output_folder=output_folder, delete_output_folder=True) num_workers = os.environ.get('NUM_WORKERS', None) if not num_workers: num_workers = '1' num_workers = int(num_workers) sorter.set_params(filter=filter, pre_scale=pre_scale, pre_scale_value=pre_scale_value, clustering_n_jobs=num_workers) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def kilosort( recording_path, sorting_out, detect_threshold=6, freq_min=300, freq_max=6000, Nt=128 * 1024 * 5 + 64 # batch size for kilosort ): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._kilosortsorter import KilosortSorter recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10) # Sorting print('Sorting...') sorter = KilosortSorter(recording=recording, output_folder='/tmp/tmp_kilosort_' + _random_string(8), delete_output_folder=True) sorter.set_params(detect_threshold=detect_threshold, freq_min=freq_min, freq_max=freq_max, car=True) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def mountainsort4( recording_path: str, sorting_out: str, detect_sign=-1, adjacency_radius=50, clip_size=50, detect_threshold=3, detect_interval=10, freq_min=300, freq_max=6000 ): import spiketoolkit as st import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # for quick testing # import spikeextractors as se # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 1) # Preprocessing # print('Preprocessing...') # recording = st.preprocessing.bandpass_filter(recording, freq_min=300, freq_max=6000) # recording = st.preprocessing.whiten(recording) # Sorting print('Sorting...') sorter = ss.Mountainsort4Sorter( recording=recording, output_folder='/tmp/tmp_mountainsort4_' + _random_string(8), delete_output_folder=True ) num_workers = os.environ.get('NUM_WORKERS', None) if num_workers: num_workers = int(num_workers) else: num_workers = 0 sorter.set_params( detect_sign=detect_sign, adjacency_radius=adjacency_radius, clip_size=clip_size, detect_threshold=detect_threshold, detect_interval=detect_interval, num_workers=num_workers, curation=False, whiten=True, filter=True, freq_min=freq_min, freq_max=freq_max ) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def jrclust( recording_path, sorting_out, detect_sign=-1, # Use -1, 0, or 1, depending on the sign of the spikes in the recording') adjacency_radius=50, detect_threshold=4.5, # detection threshold freq_min=300, freq_max=3000, merge_thresh=0.98, pc_per_chan=1, filter_type='bandpass', # {none, bandpass, wiener, fftdiff, ndiff} nDiffOrder='none', min_count=30, fGpu=0, fParfor=0, feature_type='gpca' # # {gpca, pca, vpp, vmin, vminmax, cov, energy, xcov}') ): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._jrclustsorter import JRClustSorter recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10) # Sorting print('Sorting...') sorter = JRClustSorter( recording=recording, output_folder='/tmp/tmp_jrclust_' + _random_string(8), delete_output_folder=True ) sorter.set_params( detect_sign=detect_sign, adjacency_radius=adjacency_radius, detect_threshold=detect_threshold, freq_min=freq_min, freq_max=freq_max, merge_thresh=merge_thresh, pc_per_chan=pc_per_chan, filter_type=filter_type, nDiffOrder=nDiffOrder, min_count=min_count, fGpu=fGpu, fParfor=fParfor, feature_type=feature_type ) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def ironclust(recording, sorting_out): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._ironclustsorter import IronClustSorter import kachery as ka # TODO: need to think about how to deal with this ka.set_config(fr='default_readonly') recording = AutoRecordingExtractor(dict(path=recording), download=True) # Sorting print('Sorting...') sorter = IronClustSorter(recording=recording, output_folder='/tmp/tmp_ironclust_' + _random_string(8), delete_output_folder=True) sorter.set_params(detect_sign=-1, adjacency_radius=50, adjacency_radius_out=75, detect_threshold=4, prm_template_name='', freq_min=300, freq_max=8000, merge_thresh=0.99, pc_per_chan=0, whiten=False, filter_type='bandpass', filter_detect_type='none', common_ref_type='mean', batch_sec_drift=300, step_sec_drift=20, knn=30, min_count=30, fGpu=True, fft_thresh=8, fft_thresh_low=0, nSites_whiten=32, feature_type='gpca', delta_cut=1, post_merge_mode=1, sort_mode=1) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def klusta( recording_path, sorting_out, adjacency_radius=None, detect_sign=-1, threshold_strong_std_factor=5, threshold_weak_std_factor=2, n_features_per_channel=3, num_starting_clusters=3, extract_s_before=16, extract_s_after=32 ): import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # Sorting print('Sorting...') sorter = ss.KlustaSorter( recording=recording, output_folder='/tmp/tmp_klusta_' + _random_string(8), delete_output_folder=True ) # num_workers = os.environ.get('NUM_WORKERS', None) # if not num_workers: num_workers='1' # num_workers = int(num_workers) sorter.set_params( adjacency_radius=adjacency_radius, detect_sign=detect_sign, threshold_strong_std_factor=threshold_strong_std_factor, threshold_weak_std_factor=threshold_weak_std_factor, n_features_per_channel=n_features_per_channel, num_starting_clusters=num_starting_clusters, extract_s_before=extract_s_before, extract_s_after=extract_s_after ) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def spykingcircus(recording_path, sorting_out, detect_sign=-1, adjacency_radius=200, detect_threshold=6, template_width_ms=3, filter=True, merge_spikes=True, auto_merge=0.75, whitening_max_elts=1000, clustering_max_elts=10000): import spiketoolkit as st import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # Sorting print('Sorting...') sorter = ss.SpykingcircusSorter(recording=recording, output_folder='/tmp/tmp_spykingcircus_' + _random_string(8), delete_output_folder=True) num_workers = os.environ.get('NUM_WORKERS', None) if not num_workers: num_workers = '1' num_workers = int(num_workers) sorter.set_params(detect_sign=detect_sign, adjacency_radius=adjacency_radius, detect_threshold=detect_threshold, template_width_ms=template_width_ms, filter=filter, merge_spikes=merge_spikes, auto_merge=auto_merge, num_workers=num_workers, whitening_max_elts=whitening_max_elts, clustering_max_elts=clustering_max_elts) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def kilosort2( recording_path, sorting_out, detect_threshold=6, car=True, # whether to do common average referencing minFR=1 / 50, # minimum spike rate (Hz), if a cluster falls below this for too long it gets removed freq_min=150, # min. bp filter freq (Hz), use 0 for no filter sigmaMask=30, # sigmaMask nPCs=3, # PCs per channel? ): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._kilosort2sorter import Kilosort2Sorter recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10) # Sorting print('Sorting...') sorter = Kilosort2Sorter(recording=recording, output_folder='/tmp/tmp_kilosort2_' + _random_string(8), delete_output_folder=True) sorter.set_params(detect_threshold=detect_threshold, car=car, minFR=minFR, freq_min=freq_min, sigmaMask=sigmaMask, nPCs=nPCs) timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def spykingcircus(recording, sorting_out): import spiketoolkit as st import spikesorters as ss from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor import kachery as ka # TODO: need to think about how to deal with this ka.set_config(fr='default_readonly') recording = AutoRecordingExtractor(dict(path=recording), download=True) # Sorting print('Sorting...') sorter = ss.SpykingcircusSorter(recording=recording, output_folder='/tmp/tmp_spykingcircus_' + _random_string(8), delete_output_folder=True) sorter.set_params() timer = sorter.run() print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
def ironclust(recording_path, sorting_out, detect_threshold=4, freq_min=300, freq_max=0, detect_sign=-1, adjacency_radius=50, whiten=False, adjacency_radius_out=100, merge_thresh=0.95, fft_thresh=8, knn=30, min_count=30, delta_cut=1, pc_per_chan=6, batch_sec_drift=600, step_sec_drift=20, common_ref_type='trimmean', fGpu=True, clip_pre=0.25, clip_post=0.75, merge_thresh_cc=1): from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor from ._ironclustsorter import IronClustSorter recording = AutoRecordingExtractor(dict(path=recording_path), download=True) # Sorting print('Sorting...') sorter = IronClustSorter(recording=recording, output_folder='/tmp/tmp_ironclust_' + _random_string(8), delete_output_folder=True) sorter.set_params(fft_thresh_low=0, nSites_whiten=32, feature_type='gpca', post_merge_mode=1, sort_mode=1, prm_template_name='', filter_type='bandpass', filter_detect_type='none', detect_threshold=detect_threshold, freq_min=freq_min, freq_max=freq_max, detect_sign=detect_sign, adjacency_radius=adjacency_radius, whiten=whiten, adjacency_radius_out=adjacency_radius_out, merge_thresh=merge_thresh, fft_thresh=fft_thresh, knn=knn, min_count=min_count, delta_cut=delta_cut, pc_per_chan=pc_per_chan, batch_sec_drift=batch_sec_drift, step_sec_drift=step_sec_drift, common_ref_type=common_ref_type, fGpu=fGpu, clip_pre=clip_pre, clip_post=clip_post, merge_thresh_cc=merge_thresh_cc) timer = sorter.run() #print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer)) sorting = sorter.get_result() AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)