def get_sorting_output(self, study_name, recording_name, sorter_name): # get study dataset = self._df.query(f"studyName == '{study_name}'") assert len(dataset) > 0, f"Study '{study_name}' not found" # get recording dataset = dataset.query(f"recordingName == '{recording_name}'") assert len(dataset) > 0, f"Recording '{recording_name}' not found" # get sorting uri dataset = dataset.query(f"sorterName == '{sorter_name}'") assert len(dataset) == 1, f"Sorting output '{sorter_name}' not found" firings_uri = dataset["firings"].values[0] # get samplint rate recording = self.get_gt_recording(study_name, recording_name, download=False) sorting_object = { 'sorting_format': 'mda', 'data': { 'firings': firings_uri, 'samplerate': recording.get_sampling_frequency() } } sorting = le.LabboxEphysSortingExtractor(sorting_object) return sorting
def main(): # Load the index of SpikeForest sorting outputs into a pandas dataframe x = kp.load_object( 'sha1://21c4ad407244f18318bdbdeef2c953ad1eb61aef/sortingresults.json') df = pd.DataFrame(x) print(x[0].keys()) # Print the dataframe print('***************************************************************') print(df) print('***************************************************************') # Inspect the first 10 results for index in range(10): study_name = df['studyName'][index] recording_name = df['recordingName'][index] sorter_name = df['sorterName'][index] firings_uri = df['firings'][index] sorting_object = { 'sorting_format': 'mda', 'data': { 'firings': firings_uri, 'samplerate': 30000 } } sorting: se.SortingExtractor = le.LabboxEphysSortingExtractor( sorting_object) print(f'=========================================================') print(f'{study_name}/{recording_name} {sorter_name}') print(f'Num. units: {len(sorting.get_unit_ids())}')
def old_fetch_average_waveform_plot_data(recording_object, sorting_object, unit_id): import labbox_ephys as le R = le.LabboxEphysRecordingExtractor(recording_object) S = le.LabboxEphysSortingExtractor(sorting_object) start_frame = 0 end_frame = R.get_sampling_frequency() * 30 R0 = se.SubRecordingExtractor(parent_recording=R, start_frame=start_frame, end_frame=end_frame) S0 = se.SubSortingExtractor(parent_sorting=S, start_frame=start_frame, end_frame=end_frame) times0 = S0.get_unit_spike_train(unit_id=unit_id) if len(times0) == 0: # no waveforms found return dict(channel_id=None, average_waveform=None) try: average_waveform = st.postprocessing.get_unit_templates( recording=R0, sorting=S0, unit_ids=[unit_id])[0] except: raise Exception(f'Error getting unit templates for unit {unit_id}') channel_maximums = np.max(np.abs(average_waveform), axis=1) maxchan_index = np.argmax(channel_maximums) maxchan_id = R0.get_channel_ids()[maxchan_index] return dict(channel_id=maxchan_id, average_waveform=average_waveform[maxchan_index, :].tolist())
def get_sorting_info(sorting_object, recording_object): sorting = le.LabboxEphysSortingExtractor(sorting_object) recording = le.LabboxEphysRecordingExtractor(recording_object, download=False) return dict( unit_ids=_to_int_list(sorting.get_unit_ids()), samplerate=recording.get_sampling_frequency() )
def main(): recording_object = { "data": { "geom": [[1, 0], [2, 0], [3, 0], [4, 0]], "params": { "samplerate": 30000, "spike_sign": -1 }, "raw": "sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth/raw.mda" }, "recording_format": "mda" } sorting_object = { "data": { "firings": "sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth/firings_true.mda", "samplerate": 30000 }, "sorting_format": "mda" } recording = le.LabboxEphysRecordingExtractor(recording_object) sorting = le.LabboxEphysSortingExtractor(sorting_object) h5_path = le.prepare_snippets_h5.run( sorting_object=sorting_object, recording_object=recording_object).wait() print(h5_path)
def preload_download_sorting_2(sorting_object): import kachery_p2p as kp try: X = le.LabboxEphysSortingExtractor(sorting_object) except: return {'success': False} return {'success': True}
def preload_check_sorting_downloaded_2(sorting_object): import kachery_p2p as kp try: kp._experimental_config(nop2p=True) X = le.LabboxEphysSortingExtractor(sorting_object) except: return {'isLocal': False} finally: kp._experimental_config(nop2p=False) return {'isLocal': True}
def get_gt_sorting_output(self, study_name, recording_name): # get study dataset = self._df.query(f"studyName == '{study_name}'") assert len(dataset) > 0, f"Study '{study_name}' not found" # get recording dataset = dataset.query(f"recordingName == '{recording_name}'") assert len(dataset) > 0, f"Recording '{recording_name}' not found" firings_uri = dataset.iloc[0]["sortingTrueUri"] sorting = le.LabboxEphysSortingExtractor(firings_uri) return sorting
def main(): snippets_h5_uri = 'sha1://5fc6996dfed9e7fd577bc85194d982a1ba52085e/real_snippets_1.h5?manifest=741f23273c3121aada6d9bdb67009c8c2ae1ed77' recording_obj = { 'recording_format': 'snippets1', 'data': { 'snippets_h5_uri': snippets_h5_uri } } sorting_obj = { 'sorting_format': 'snippets1', 'data': { 'snippets_h5_uri': snippets_h5_uri } } recording = le.LabboxEphysRecordingExtractor(recording_obj) sorting = le.LabboxEphysSortingExtractor(sorting_obj) print(recording.get_sampling_frequency()) print(recording.get_channel_ids()) le_recordings = [] le_sortings = [] le_recordings.append( dict(recordingId='loren_example1', recordingLabel='loren_example1', recordingPath=ka.store_object(recording_obj, basename='loren_example1.json'), recordingObject=recording_obj, description=''' Example from Loren Frank '''.strip())) le_sortings.append( dict(sortingId='loren_example1:mountainsort4', sortingLabel='loren_example1:mountainsort4', sortingPath=ka.store_object( sorting_obj, basename='loren_example-mountainsort4.json'), sortingObject=sorting_obj, recordingId='loren_example1', recordingPath=ka.store_object(recording_obj, basename='loren_example1.json'), recordingObject=recording_obj, description=''' Example from Loren Frank (MountainSort4) '''.strip())) feed_uri = create_labbox_ephys_feed(le_recordings, le_sortings) print(feed_uri)
def main(): snippets_h5_uri = 'sha1://55c0cb6a63231236b6948b0dd422e6fedc75c5b5/real_snippets.h5?manifest=b124474caccccdba135d9550ec544a88caf531aa' recording_obj = { 'recording_format': 'snippets1', 'data': { 'snippets_h5_uri': snippets_h5_uri } } sorting_obj = { 'sorting_format': 'snippets1', 'data': { 'snippets_h5_uri': snippets_h5_uri } } recording = le.LabboxEphysRecordingExtractor(recording_obj) sorting = le.LabboxEphysSortingExtractor(sorting_obj) print(recording.get_sampling_frequency()) print(recording.get_channel_ids()) le_recordings = [] le_sortings = [] le_recordings.append( dict(recordingId='loren_example1', recordingLabel='loren_example1', recordingPath=ka.store_object(recording_obj, basename='loren_example1.json'), recordingObject=recording_obj, description=''' Example from Loren Frank '''.strip())) le_sortings.append( dict(sortingId='loren_example1:mountainsort4', sortingLabel='loren_example1:mountainsort4', sortingPath=ka.store_object( sorting_obj, basename='loren_example-mountainsort4.json'), sortingObject=sorting_obj, recordingId='loren_example1', recordingPath=ka.store_object(recording_obj, basename='loren_example1.json'), recordingObject=recording_obj, description=''' Example from Loren Frank (MountainSort4) '''.strip())) feed_uri = create_labbox_ephys_feed(le_recordings, le_sortings) print(feed_uri)
def main(): # Sorting: cortexlab-single-phase-3 Curated (good units) for cortexlab-single-phase-3 (full) recording_object = kp.load_object( 'sha1://8b222e25bc4d9c792e4490ca322b5338e0795596/cortexlab-single-phase-3.json' ) sorting_object = { "sorting_format": "h5_v1", "data": { "h5_path": "sha1://68029d0eded8ca7d8f95c16dea81318966ae9b55/sorting.h5?manifest=12b0d8e37c7050a6fe636d4c16ed143bbd5dab0c" } } recording = le.LabboxEphysRecordingExtractor(recording_object) sorting = le.LabboxEphysSortingExtractor(sorting_object) h5_path = le.prepare_snippets_h5.run(sorting_object=sorting_object, recording_object=recording_object, start_frame=0, end_frame=30000 * 240).wait() print(h5_path)
def get_isi_violation_rates(sorting_object, recording_object, configuration={}): import labbox_ephys as le import spikemetrics as sm S = le.LabboxEphysSortingExtractor(sorting_object) R = le.LabboxEphysRecordingExtractor(recording_object) samplerate = R.get_sampling_frequency() # duration_sec = R.get_num_frames() / samplerate isi_threshold_msec = configuration.get('isi_threshold_msec', 2.5) unit_ids = configuration.get('unit_ids', S.get_unit_ids()) ret = {} for id in unit_ids: spike_train = S.get_unit_spike_train(unit_id=id) ret[str(id)], _ = sm.metrics.isi_violations( #_ is total violations spike_train=spike_train, duration=R.get_num_frames(), isi_threshold=isi_threshold_msec / 1000 * samplerate ) return ret
def prepare_snippets_h5(recording_object, sorting_object, start_frame=None, end_frame=None, max_events_per_unit=None, max_neighborhood_size=15): if recording_object['recording_format'] == 'snippets1': return recording_object['data']['snippets_h5_uri'] import labbox_ephys as le recording = le.LabboxEphysRecordingExtractor(recording_object) sorting = le.LabboxEphysSortingExtractor(sorting_object) with hi.TemporaryDirectory() as tmpdir: save_path = tmpdir + '/snippets.h5' prepare_snippets_h5_from_extractors( recording=recording, sorting=sorting, output_h5_path=save_path, start_frame=start_frame, end_frame=end_frame, max_events_per_unit=max_events_per_unit, max_neighborhood_size=max_neighborhood_size) return ka.store_file(save_path)
"label": "SF/PAIRED_KAMPFF/paired_kampff/2014_11_25_Pair_3_0", "recording_uri": "sha1://a205f87cef8b7f86df7a09cddbc79a1fbe5df60f/SF/PAIRED_KAMPFF/paired_kampff/2014_11_25_Pair_3_0.json", "sorting_true_uri": "sha1://1cd517687aeca7ecdfaa9695680038d142a75031/firings_true.mda" } # To find more examples, see: https://github.com/flatironinstitute/spikeforest_recordings # However: note that some processing needs to be done to the files in this repo (to add the manifests to the raw data). This is WIP # Adjust these values ########################### X = X1 # Select example from above feed_name = 'labbox-ephys-default' workspace_name = 'default' ################################################# recording_label = X['label'] recording_uri = X['recording_uri'] sorting_true_uri = X['sorting_true_uri'] recording = le.LabboxEphysRecordingExtractor(recording_uri, download=True) sorting_true = le.LabboxEphysSortingExtractor(sorting_true_uri, samplerate=30000) sorting_label = 'true' feed = kp.load_feed(feed_name, create=True) workspace = le.load_workspace(workspace_name=workspace_name, feed=feed) print(f'Feed URI: {feed.get_uri()}') R_id = workspace.add_recording(recording=recording, label=recording_label) S_id = workspace.add_sorting(sorting=sorting_true, recording_id=R_id, label=sorting_label)
def get_structure(sorting_object, recording_object): import labbox_ephys as le S = le.LabboxEphysSortingExtractor(sorting_object) R = le.LabboxEphysRecordingExtractor(recording_object) return S, R
import spikeextractors as se import numpy as np import labbox_ephys as le from labbox_ephys import sorters import kachery_p2p as kp if __name__ == '__main__': # adjust these values workspace_uri = '{workspaceUri}' recording_id = '{recordingId}' # {recordingLabel} workspace = le.load_workspace(workspace_uri) le_recording = workspace.get_recording(recording_id) recording_object = le_recording['recordingObject'] sorting_object = sorters.spykingcircus(recording_object=recording_object, detect_sign=-1, adjacency_radius=100, detect_threshold=6, template_width_ms=3, filter=True, merge_spikes=True, auto_merge=0.75, num_workers=None, whitening_max_elts=1000, clustering_max_elts=10000) sorting = le.LabboxEphysSortingExtractor(sorting_object) S_id = workspace.add_sorting(sorting=sorting, recording_id=recording_id, label='spykingcircus')
def get_sorting_object(sorting_path, recording_object): recording = le.LabboxEphysRecordingExtractor(recording_object, download=False) sorting = le.LabboxEphysSortingExtractor( sorting_path, samplerate=recording.get_sampling_frequency()) return sorting.object()
def fetch_correlogram_plot_data(sorting_object, unit_x, unit_y=None): import labbox_ephys as le S = le.LabboxEphysSortingExtractor(sorting_object) data = _get_correlogram_data(sorting=S, unit_id1=unit_x, unit_id2=unit_y, window_size_msec=50, bin_size_msec=1) return data