Пример #1
0
    def run(self):
        from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor

        print('test1', self.firings_path, self.samplerate)

        sorting = SFMdaSortingExtractor(firings_file=self.firings_path)
        samplerate = self.samplerate
        max_samples = self.max_samples
        max_dt_msec = self.max_dt_msec
        bin_size_msec = self.bin_size_msec

        max_dt_tp = max_dt_msec * samplerate / 1000
        bin_size_tp = bin_size_msec * samplerate / 1000

        autocorrelograms = []
        for unit_id in sorting.get_unit_ids():
            print('Unit::g {}'.format(unit_id))
            (bin_counts, bin_edges) = compute_autocorrelogram(sorting.get_unit_spike_train(unit_id), max_dt_tp=max_dt_tp, bin_size_tp=bin_size_tp, max_samples=max_samples)
            autocorrelograms.append(dict(
                unit_id=unit_id,
                bin_counts=bin_counts,
                bin_edges=bin_edges
            ))
        ret = dict(
            autocorrelograms=autocorrelograms
        )
        with open(self.json_out, 'w') as f:
            json.dump(serialize_np(ret), f)
Пример #2
0
 def run(self):
     rx = SFMdaRecordingExtractor(
         dataset_directory=self.recording_directory,
         download=True,
         raw_fname=self.filtered_timeseries)
     sx_true = SFMdaSortingExtractor(firings_file=self.firings_true)
     sx = SFMdaSortingExtractor(firings_file=self.firings_sorted)
     ssobj = create_spikesprays(rx=rx,
                                sx_true=sx_true,
                                sx_sorted=sx,
                                neighborhood_size=self.neighborhood_size,
                                num_spikes=self.num_spikes,
                                unit_id_true=self.unit_id_true,
                                unit_id_sorted=self.unit_id_sorted)
     with open(self.json_out, 'w') as f:
         json.dump(ssobj, f)
Пример #3
0
 def run(self):
     recording = SFMdaRecordingExtractor(
         dataset_directory=self.recording_directory, download=True)
     sorting = SFMdaSortingExtractor(firings_file=self.firings)
     waveforms0 = _get_random_spike_waveforms(recording=recording,
                                              sorting=sorting,
                                              unit=self.unit_id)
     channel_ids = recording.get_channel_ids()
     avg_waveform = np.median(waveforms0, axis=2)
     ret = dict(channel_ids=channel_ids,
                average_waveform=avg_waveform.tolist())
     with open(self.json_out, 'w') as f:
         json.dump(ret, f)
Пример #4
0
def yass_example(download=True, set_id=1):
    if set_id in range(1, 7):
        dsdir = 'kbucket://15734439d8cf/groundtruth/visapy_mea/set{}'.format(
            set_id)
        IX = SFMdaRecordingExtractor(dataset_directory=dsdir,
                                     download=download)
        path1 = os.path.join(dsdir, 'firings_true.mda')
        print(path1)
        OX = SFMdaSortingExtractor(path1)
        return (IX, OX)
    else:
        raise Exception(
            'Invalid ID for yass_example {} is not betewen 1..6'.format(
                set_id))
Пример #5
0
    def initialize(self):
        if self._initialized:
            return
        self._initialized = True

        # self._recording_context.initialize()

        print('******** FORESTVIEW: Initializing sorting result context')

        if self._sorting_result_object['firings']:
            self._sorting_extractor = SFMdaSortingExtractor(
                firings_file=self._sorting_result_object['firings'])
        else:
            self._sorting_extractor = None

        print('******** FORESTVIEW: Done initializing sorting result context')
Пример #6
0
def load_sorting_results_info(firings_path, *, recording_path, epoch_name, ntrode_name, curated=False):
    if not mt.findFile(firings_path):
        return None
    sorting = SFMdaSortingExtractor(firings_file=firings_path)
    total_num_events = 0
    for unit_id in sorting.get_unit_ids():
        spike_times = sorting.get_unit_spike_train(unit_id=unit_id)
        total_num_events = total_num_events + len(spike_times)
    return dict(
        type='sorting_results',
        epoch_name=epoch_name,
        ntrode_name=ntrode_name,
        curated=curated,
        firings_path=firings_path,
        recording_path=recording_path,
        unit_ids=sorting.get_unit_ids(),
        num_events=total_num_events
    )
Пример #7
0
#!/usr/bin/env python

from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor
from mountaintools import client as mt

# Configure to download from the public spikeforest kachery node
mt.configDownloadFrom('spikeforest.public')

# Load an example tetrode recording with its ground truth
# You can also substitute any of the other available recordings
recdir = 'sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth'

print('loading recording...')
recording = SFMdaRecordingExtractor(dataset_directory=recdir, download=True)
sorting_true = SFMdaSortingExtractor(firings_file=recdir + '/firings_true.mda')

# import a spike sorter from the spikesorters module of spikeforest
from spikeforestsorters import MountainSort4
import os
import shutil

# In place of MountainSort4 you could use any of the following:
#
# MountainSort4, SpykingCircus, KiloSort, KiloSort2, YASS
# IronClust, HerdingSpikes2, JRClust, Tridesclous, Klusta
# although the Matlab sorters require further setup.

# clear and create an empty output directory (keep things tidy)
if os.path.exists('test_outputs'):
    shutil.rmtree('test_outputs')
os.makedirs('test_outputs', exist_ok=True)
Пример #8
0
from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor
from mountaintools import client as mt

# Configure to download from the public spikeforest kachery node
mt.configDownloadFrom('spikeforest.public')

# Load the recording with its ground truth
recdir = 'sha1dir://be6ce9f60fe1963af235862dc8197c9753b4b6f5.hybrid_janelia/drift_siprobe/rec_16c_1200s_11'

print('Loading recording...')
recording = SFMdaRecordingExtractor(dataset_directory=recdir, download=True)
sorting_true = SFMdaSortingExtractor(firings_file=recdir + '/firings_true.mda')

sorting_ms4 = SFMdaSortingExtractor(
    firings_file=
    'sha1://f1c6fdf52a2873d6f746e44dab6bf7ccd2937d97/f1c6fdf52a2873d6f746e44dab6bf7ccd2937d97/firings.mda'
)

# import from the spikeforest package
import spikeforest_analysis as sa

# write the ground truth firings file
SFMdaSortingExtractor.write_sorting(sorting=sorting_true,
                                    save_path='test_outputs/firings_true.mda')

# run the comparison
print('Compare with truth...')
import time
timer = time.time()

## Old method
Пример #9
0
 def sorting(self):
     return SFMdaSortingExtractor(firings_file=self._obj['firings'])
Пример #10
0
 def sortingTrue(self):
     return SFMdaSortingExtractor(firings_file=self.directory() +
                                  '/firings_true.mda')
Пример #11
0
from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor
from mountaintools import client as mt

# Configure to download from the public spikeforest kachery node
mt.configDownloadFrom('spikeforest.public')

# Load an example tetrode recording with its ground truth
# You can also substitute any of the other available recordings
recdir = 'sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth'

print('loading recording...')
recording = SFMdaRecordingExtractor(dataset_directory=recdir, download=True)
sorting_true = SFMdaSortingExtractor(firings_file=recdir + '/firings_true.mda')
Пример #12
0
# clear and create an empty output directory (keep things tidy)
if os.path.exists('test_outputs'):
    shutil.rmtree('test_outputs')
os.makedirs('test_outputs', exist_ok=True)

# Run spike sorting in the default singularity container
print('Spike sorting...')
MountainSort4.execute(recording_dir=recdir,
                      firings_out='test_outputs/ms4_firings.mda',
                      detect_sign=-1,
                      adjacency_radius=50,
                      _container='default')

# Load the result into a sorting extractor
sorting = SFMdaSortingExtractor(firings_file='test_outputs/ms4_firings.mda')

# import from the spikeforest package
import spikeforest_analysis as sa

# write the ground truth firings file
SFMdaSortingExtractor.write_sorting(sorting=sorting_true,
                                    save_path='test_outputs/firings_true.mda')

# run the comparison
print('Compare with truth...')
sa.GenSortingComparisonTable.execute(
    firings='test_outputs/ms4_firings.mda',
    firings_true='test_outputs/firings_true.mda',
    units_true=[],  # use all units
    json_out='test_outputs/comparison.json',