コード例 #1
0
ファイル: _waveclus.py プロジェクト: mhhennig/spikeforest2
def waveclus(
    recording_path,
    sorting_out
):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._waveclussorter import WaveclusSorter

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)
    
    # Sorting
    print('Sorting...')
    sorter = WaveclusSorter(
        recording=recording,
        output_folder='/tmp/tmp_waveclus_' + _random_string(8),
        delete_output_folder=True
    )

    sorter.set_params(
    )

    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #2
0
ファイル: _kilosort2.py プロジェクト: yarikoptic/spikeforest2
def kilosort2(recording, sorting_out):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._kilosort2sorter import Kilosort2Sorter
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)
    
    # Sorting
    print('Sorting...')
    sorter = Kilosort2Sorter(
        recording=recording,
        output_folder='/tmp/tmp_kilosort2_' + _random_string(8),
        delete_output_folder=True
    )

    sorter.set_params(
        detect_sign=-1,
        detect_threshold=5,
        freq_min=150,
        pc_per_chan=3
    )     
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #3
0
ファイル: _tridesclous.py プロジェクト: mhhennig/spikeforest2
def tridesclous(
    recording_path,
    sorting_out
):
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)
    
    # Sorting
    print('Sorting...')

    output_folder = '/tmp/tmp_tridesclous_' + _random_string(8)
    os.environ['HS2_PROBE_PATH'] = output_folder # important for when we are in a container
    sorter = ss.TridesclousSorter(
        recording=recording,
        output_folder=output_folder,
        delete_output_folder=True,
        verbose=True,
    )

    # num_workers = os.environ.get('NUM_WORKERS', None)
    # if not num_workers: num_workers='1'
    # num_workers = int(num_workers)

    sorter.set_params(
    )
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #4
0
def mountainsort4(recording: str, sorting_out: str) -> str:
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)

    # Preprocessing
    print('Preprocessing...')
    recording = st.preprocessing.bandpass_filter(recording,
                                                 freq_min=300,
                                                 freq_max=6000)
    recording = st.preprocessing.whiten(recording)

    # Sorting
    print('Sorting...')
    sorter = ss.Mountainsort4Sorter(recording=recording,
                                    output_folder='/tmp/tmp_mountainsort4_' +
                                    _random_string(8),
                                    delete_output_folder=True)

    sorter.set_params(detect_sign=-1, adjacency_radius=50, detect_threshold=4)
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #5
0
ファイル: _kilosort.py プロジェクト: mhhennig/spikeforest2
def kilosort(
        recording_path,
        sorting_out,
        detect_threshold=6,
        freq_min=300,
        freq_max=6000,
        Nt=128 * 1024 * 5 + 64  # batch size for kilosort
):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._kilosortsorter import KilosortSorter

    recording = AutoRecordingExtractor(dict(path=recording_path),
                                       download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)

    # Sorting
    print('Sorting...')
    sorter = KilosortSorter(recording=recording,
                            output_folder='/tmp/tmp_kilosort_' +
                            _random_string(8),
                            delete_output_folder=True)

    sorter.set_params(detect_threshold=detect_threshold,
                      freq_min=freq_min,
                      freq_max=freq_max,
                      car=True)

    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #6
0
def herdingspikes2(recording_path,
                   sorting_out,
                   filter=True,
                   pre_scale=True,
                   pre_scale_value=20):
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor

    recording = AutoRecordingExtractor(dict(path=recording_path),
                                       download=True)

    # Sorting
    print('Sorting...')

    output_folder = '/tmp/tmp_herdingspikes2_' + _random_string(8)
    os.environ[
        'HS2_PROBE_PATH'] = output_folder  # important for when we are in a container
    sorter = ss.HerdingspikesSorter(recording=recording,
                                    output_folder=output_folder,
                                    delete_output_folder=True)

    num_workers = os.environ.get('NUM_WORKERS', None)
    if not num_workers: num_workers = '1'
    num_workers = int(num_workers)

    sorter.set_params(filter=filter,
                      pre_scale=pre_scale,
                      pre_scale_value=pre_scale_value,
                      clustering_n_jobs=num_workers)
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #7
0
def mountainsort4(
    recording_path: str,
    sorting_out: str,
    detect_sign=-1,
    adjacency_radius=50,
    clip_size=50,
    detect_threshold=3,
    detect_interval=10,
    freq_min=300,
    freq_max=6000
):
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)

    # for quick testing
    # import spikeextractors as se
    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 1)
    
    # Preprocessing
    # print('Preprocessing...')
    # recording = st.preprocessing.bandpass_filter(recording, freq_min=300, freq_max=6000)
    # recording = st.preprocessing.whiten(recording)

    # Sorting
    print('Sorting...')
    sorter = ss.Mountainsort4Sorter(
        recording=recording,
        output_folder='/tmp/tmp_mountainsort4_' + _random_string(8),
        delete_output_folder=True
    )

    num_workers = os.environ.get('NUM_WORKERS', None)
    if num_workers:
        num_workers = int(num_workers)
    else:
        num_workers = 0

    sorter.set_params(
        detect_sign=detect_sign,
        adjacency_radius=adjacency_radius,
        clip_size=clip_size,
        detect_threshold=detect_threshold,
        detect_interval=detect_interval,
        num_workers=num_workers,
        curation=False,
        whiten=True,
        filter=True,
        freq_min=freq_min,
        freq_max=freq_max
    )     
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #8
0
def compare_with_truth(sorting_path, sorting_true_path, json_out):
    from spikeforest2_utils import SortingComparison
    sorting = AutoSortingExtractor(sorting_path)
    sorting_true = AutoSortingExtractor(sorting_true_path)
    SC = SortingComparison(sorting_true, sorting, delta_tp=30)
    df = _get_comparison_data_frame(comparison=SC)
    obj = df.transpose().to_dict()
    with open(json_out, 'w') as f:
        json.dump(obj, f, indent=4)
コード例 #9
0
ファイル: _jrclust.py プロジェクト: mhhennig/spikeforest2
def jrclust(
    recording_path,
    sorting_out,
    detect_sign=-1, # Use -1, 0, or 1, depending on the sign of the spikes in the recording')
    adjacency_radius=50,
    detect_threshold=4.5, # detection threshold
    freq_min=300,
    freq_max=3000,
    merge_thresh=0.98,
    pc_per_chan=1,
    filter_type='bandpass', # {none, bandpass, wiener, fftdiff, ndiff}
    nDiffOrder='none',
    min_count=30,
    fGpu=0,
    fParfor=0,
    feature_type='gpca' #  # {gpca, pca, vpp, vmin, vminmax, cov, energy, xcov}')
):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._jrclustsorter import JRClustSorter

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)
    
    # Sorting
    print('Sorting...')
    sorter = JRClustSorter(
        recording=recording,
        output_folder='/tmp/tmp_jrclust_' + _random_string(8),
        delete_output_folder=True
    )

    sorter.set_params(
        detect_sign=detect_sign,
        adjacency_radius=adjacency_radius,
        detect_threshold=detect_threshold,
        freq_min=freq_min,
        freq_max=freq_max,
        merge_thresh=merge_thresh,
        pc_per_chan=pc_per_chan,
        filter_type=filter_type,
        nDiffOrder=nDiffOrder,
        min_count=min_count,
        fGpu=fGpu,
        fParfor=fParfor,
        feature_type=feature_type
    )

    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #10
0
ファイル: _ironclust.py プロジェクト: yarikoptic/spikeforest2
def ironclust(recording, sorting_out):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._ironclustsorter import IronClustSorter
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # Sorting
    print('Sorting...')
    sorter = IronClustSorter(recording=recording,
                             output_folder='/tmp/tmp_ironclust_' +
                             _random_string(8),
                             delete_output_folder=True)

    sorter.set_params(detect_sign=-1,
                      adjacency_radius=50,
                      adjacency_radius_out=75,
                      detect_threshold=4,
                      prm_template_name='',
                      freq_min=300,
                      freq_max=8000,
                      merge_thresh=0.99,
                      pc_per_chan=0,
                      whiten=False,
                      filter_type='bandpass',
                      filter_detect_type='none',
                      common_ref_type='mean',
                      batch_sec_drift=300,
                      step_sec_drift=20,
                      knn=30,
                      min_count=30,
                      fGpu=True,
                      fft_thresh=8,
                      fft_thresh_low=0,
                      nSites_whiten=32,
                      feature_type='gpca',
                      delta_cut=1,
                      post_merge_mode=1,
                      sort_mode=1)
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #11
0
def klusta(
    recording_path,
    sorting_out,
    adjacency_radius=None,
    detect_sign=-1,
    threshold_strong_std_factor=5,
    threshold_weak_std_factor=2,
    n_features_per_channel=3,
    num_starting_clusters=3,
    extract_s_before=16,
    extract_s_after=32
):
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)
    
    # Sorting
    print('Sorting...')
    sorter = ss.KlustaSorter(
        recording=recording,
        output_folder='/tmp/tmp_klusta_' + _random_string(8),
        delete_output_folder=True
    )

    # num_workers = os.environ.get('NUM_WORKERS', None)
    # if not num_workers: num_workers='1'
    # num_workers = int(num_workers)

    sorter.set_params(
        adjacency_radius=adjacency_radius,
        detect_sign=detect_sign,
        threshold_strong_std_factor=threshold_strong_std_factor,
        threshold_weak_std_factor=threshold_weak_std_factor,
        n_features_per_channel=n_features_per_channel,
        num_starting_clusters=num_starting_clusters,
        extract_s_before=extract_s_before,
        extract_s_after=extract_s_after
    )     
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #12
0
def spykingcircus(recording_path,
                  sorting_out,
                  detect_sign=-1,
                  adjacency_radius=200,
                  detect_threshold=6,
                  template_width_ms=3,
                  filter=True,
                  merge_spikes=True,
                  auto_merge=0.75,
                  whitening_max_elts=1000,
                  clustering_max_elts=10000):
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor

    recording = AutoRecordingExtractor(dict(path=recording_path),
                                       download=True)

    # Sorting
    print('Sorting...')
    sorter = ss.SpykingcircusSorter(recording=recording,
                                    output_folder='/tmp/tmp_spykingcircus_' +
                                    _random_string(8),
                                    delete_output_folder=True)

    num_workers = os.environ.get('NUM_WORKERS', None)
    if not num_workers: num_workers = '1'
    num_workers = int(num_workers)

    sorter.set_params(detect_sign=detect_sign,
                      adjacency_radius=adjacency_radius,
                      detect_threshold=detect_threshold,
                      template_width_ms=template_width_ms,
                      filter=filter,
                      merge_spikes=merge_spikes,
                      auto_merge=auto_merge,
                      num_workers=num_workers,
                      whitening_max_elts=whitening_max_elts,
                      clustering_max_elts=clustering_max_elts)
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #13
0
ファイル: extractor.py プロジェクト: AkiHase/HTsort
def load_spikeforest_data(recording_path: str,
                          sorting_true_path: str,
                          download=True):
    recording = AutoRecordingExtractor(recording_path, download=download)
    sorting_GT = AutoSortingExtractor(sorting_true_path)
    # recording info
    fs = recording.get_sampling_frequency()
    channel_ids = recording.get_channel_ids()
    channel_loc = recording.get_channel_locations()
    num_frames = recording.get_num_frames()
    duration = recording.frame_to_time(num_frames)
    print(f'Sampling frequency:{fs}')
    print(f'Channel ids:{channel_ids}')
    print(f'channel location:{channel_loc}')
    print(f'frame num:{num_frames}')
    print(f'recording duration:{duration}')
    # sorting_GT info
    unit_ids = sorting_GT.get_unit_ids()
    print(f'unit ids:{unit_ids}')
    return recording, sorting_GT
コード例 #14
0
def kilosort2(
        recording_path,
        sorting_out,
        detect_threshold=6,
        car=True,  # whether to do common average referencing
        minFR=1 /
    50,  # minimum spike rate (Hz), if a cluster falls below this for too long it gets removed
        freq_min=150,  # min. bp filter freq (Hz), use 0 for no filter
        sigmaMask=30,  # sigmaMask
        nPCs=3,  # PCs per channel?
):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._kilosort2sorter import Kilosort2Sorter

    recording = AutoRecordingExtractor(dict(path=recording_path),
                                       download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)

    # Sorting
    print('Sorting...')
    sorter = Kilosort2Sorter(recording=recording,
                             output_folder='/tmp/tmp_kilosort2_' +
                             _random_string(8),
                             delete_output_folder=True)

    sorter.set_params(detect_threshold=detect_threshold,
                      car=car,
                      minFR=minFR,
                      freq_min=freq_min,
                      sigmaMask=sigmaMask,
                      nPCs=nPCs)

    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #15
0
def spykingcircus(recording, sorting_out):
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # Sorting
    print('Sorting...')
    sorter = ss.SpykingcircusSorter(recording=recording,
                                    output_folder='/tmp/tmp_spykingcircus_' +
                                    _random_string(8),
                                    delete_output_folder=True)

    sorter.set_params()
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #16
0
def prepare_dataset_from_hash(
    recording_paths: Union[str, List[str]],
    gt_paths: Union[str, List[str]],
    sorter_names: List[str],
    metric_names: List[str],
    cache_path: Path,
):
    if isinstance(recording_paths, str):
        recording_paths = [recording_paths]

    if isinstance(gt_paths, str):
        gt_paths = [gt_paths]

    if len(recording_paths) != len(gt_paths):
        raise ValueError(
            f"You have provided {len(recording_paths)} recording hashes and {len(gt_paths)} ground truth hashes! These must be the same."
        )

    all_X = []
    all_y = []
    for i in range(len(recording_paths)):
        recording_path = recording_paths[i]
        gt_path = gt_paths[i]

        c_path = cache_path / recording_path.split('//')[1]

        recording = AutoRecordingExtractor(recording_path, download=True)
        gt_sorting = AutoSortingExtractor(gt_path)

        session = SpikeSession(recording, gt_sorting, cache_path=c_path)

        X, y = prepare_fp_dataset(session,
                                  sorter_names=sorter_names,
                                  metric_names=metric_names)

        all_X.append(X)
        all_y.append(y)

    return np.vstack(all_X), np.hstack(all_y)
コード例 #17
0
ka.load_file(recordingZero['directory'] + '/raw.mda')
ka.load_file(recordingZero['directory'] + '/params.json')
ka.load_file(recordingZero['directory'] + '/geom.csv')
ka.load_file(recordingZero['directory'] + '/firings_true.mda')

#Attaching the results
recordingZero['results'] = dict()

#Tryting to plot the recordings
recordingInput = AutoRecordingExtractor(dict(path=recordingPath),
                                        download=True)
w_ts = sw.plot_timeseries(recordingInput)
w_ts.figure.suptitle("Recording by group")
w_ts.ax.set_ylabel("Channel_ids")

gtOutput = AutoSortingExtractor(sortingPath)
#Only getting a part of the recording
#gtOutput.add_epoch(epoch_name="first_half", start_frame=0, end_frame=recordingInput.get_num_frames()/2) #set

#subsorting = gtOutput.get_epoch("first_half")
#w_rs_gt = sw.plot_rasters(gtOutput,sampling_frequency=sampleRate)

#w_wf_gt = sw.plot_unit_waveforms(recordingInput,gtOutput, max_spikes_per_unit=100)

#We will also try to plot the rastor plot for the ground truth

#We will be trying to get a sub set of the recording
#recording4 = se.SubRecordingExtractor(parent_recording=recordingInput, channel_ids=[2, 3, 4, 5])
#Plotting a segment of recording
#w_ts = sw.plot_timeseries(recording4)
w_ts.figure.suptitle("Recording by group")
コード例 #18
0
ファイル: _ironclust.py プロジェクト: mhhennig/spikeforest2
def ironclust(recording_path,
              sorting_out,
              detect_threshold=4,
              freq_min=300,
              freq_max=0,
              detect_sign=-1,
              adjacency_radius=50,
              whiten=False,
              adjacency_radius_out=100,
              merge_thresh=0.95,
              fft_thresh=8,
              knn=30,
              min_count=30,
              delta_cut=1,
              pc_per_chan=6,
              batch_sec_drift=600,
              step_sec_drift=20,
              common_ref_type='trimmean',
              fGpu=True,
              clip_pre=0.25,
              clip_post=0.75,
              merge_thresh_cc=1):

    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._ironclustsorter import IronClustSorter

    recording = AutoRecordingExtractor(dict(path=recording_path),
                                       download=True)

    # Sorting
    print('Sorting...')
    sorter = IronClustSorter(recording=recording,
                             output_folder='/tmp/tmp_ironclust_' +
                             _random_string(8),
                             delete_output_folder=True)

    sorter.set_params(fft_thresh_low=0,
                      nSites_whiten=32,
                      feature_type='gpca',
                      post_merge_mode=1,
                      sort_mode=1,
                      prm_template_name='',
                      filter_type='bandpass',
                      filter_detect_type='none',
                      detect_threshold=detect_threshold,
                      freq_min=freq_min,
                      freq_max=freq_max,
                      detect_sign=detect_sign,
                      adjacency_radius=adjacency_radius,
                      whiten=whiten,
                      adjacency_radius_out=adjacency_radius_out,
                      merge_thresh=merge_thresh,
                      fft_thresh=fft_thresh,
                      knn=knn,
                      min_count=min_count,
                      delta_cut=delta_cut,
                      pc_per_chan=pc_per_chan,
                      batch_sec_drift=batch_sec_drift,
                      step_sec_drift=step_sec_drift,
                      common_ref_type=common_ref_type,
                      fGpu=fGpu,
                      clip_pre=clip_pre,
                      clip_post=clip_post,
                      merge_thresh_cc=merge_thresh_cc)
    timer = sorter.run()
    #print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
コード例 #19
0
ka.load_file(recordingZero['directory'] + '/params.json')
ka.load_file(recordingZero['directory'] + '/geom.csv')
ka.load_file(recordingZero['directory'] + '/firings_true.mda')

#Attaching the results
recordingZero['results'] = dict()

#Tryting to plot the recordings
recordingInput = AutoRecordingExtractor(dict(path=recordingPath),
                                        download=True)
w_ts = sw.plot_timeseries(recordingInput)
w_ts.figure.suptitle("Recording by group")
w_ts.ax.set_ylabel("Channel_ids")

#We will also try to plot the rastor plot for the ground truth
gtOutput = AutoSortingExtractor(sortingPath)
#We need to change the indices of  the ground truth output
w_rs_gt = sw.plot_rasters(gtOutput, sampling_frequency=sampleRate)

#Spike-Sorting
#trying to run SPYKINGCIRCUS through spike interface
#spykingcircus
with ka.config(fr='default_readonly'):
    #with hither.config(cache='default_readwrite'):
    with hither.config(container='default'):
        result_spyKingCircus = sorters.spykingcircus.run(
            recording_path=recordingPath, sorting_out=hither.File())
#Mountainsort
with ka.config(fr='default_readonly'):
    #with hither.config(cache='default_readwrite'):
    with hither.config(container='default'):
コード例 #20
0
def compute_units_info(recording_path, sorting_path, json_out):
    recording = AutoRecordingExtractor(recording_path)
    sorting = AutoSortingExtractor(sorting_path, samplerate=recording.get_sampling_frequency())
    obj = _compute_units_info(recording=recording, sorting=sorting)
    with open(json_out, 'w') as f:
        json.dump(obj, f)