def __init__(self, arg, samplerate=None): super().__init__() if (isinstance(arg, dict)) and ('sorting_format' in arg): obj = dict(arg) else: obj = _create_object_for_arg(arg, samplerate=samplerate) assert obj is not None, f'Unable to create sorting from arg: {arg}' self._object: dict = obj if 'firings' in self._object: sorting_format = 'mda' data={'firings': self._object['firings'], 'samplerate': self._object.get('samplerate', 30000)} else: sorting_format = self._object['sorting_format'] data: dict = self._object['data'] if sorting_format == 'mda': firings_path = kp.load_file(data['firings']) assert firings_path is not None, f'Unable to load firings file: {data["firings"]}' self._sorting: se.SortingExtractor = MdaSortingExtractor(firings_file=firings_path, samplerate=data['samplerate']) elif sorting_format == 'h5_v1': h5_path = kp.load_file(data['h5_path']) self._sorting = H5SortingExtractorV1(h5_path=h5_path) elif sorting_format == 'npy1': times_npy = kp.load_npy(data['times_npy_uri']) labels_npy = kp.load_npy(data['labels_npy_uri']) samplerate = data['samplerate'] S = se.NumpySortingExtractor() S.set_sampling_frequency(samplerate) S.set_times_labels(times_npy.ravel(), labels_npy.ravel()) self._sorting = S elif sorting_format == 'snippets1': S = Snippets1SortingExtractor(snippets_h5_uri = data['snippets_h5_uri'], p2p=True) self._sorting = S elif sorting_format == 'npy2': npz = kp.load_npy(data['npz_uri']) times_npy = npz['spike_indexes'] labels_npy = npz['spike_labels'] samplerate = float(npz['sampling_frequency']) S = se.NumpySortingExtractor() S.set_sampling_frequency(samplerate) S.set_times_labels(times_npy.ravel(), labels_npy.ravel()) self._sorting = S elif sorting_format == 'nwb': from .nwbextractors import NwbSortingExtractor path0 = kp.load_file(data['path']) self._sorting: se.SortingExtractor = NwbSortingExtractor(path0) elif sorting_format == 'in_memory': S = get_in_memory_object(data) if S is None: raise Exception('Unable to find in-memory object for sorting') self._sorting = S else: raise Exception(f'Unexpected sorting format: {sorting_format}') self.copy_unit_properties(sorting=self._sorting)
def __init__(self, arg, samplerate=None): super().__init__() if (isinstance(arg, dict)) and ('sorting_format' in arg): obj = dict(arg) else: obj = _create_object_for_arg(arg, samplerate=samplerate) assert obj is not None, f'Unable to create sorting from arg: {arg}' self._object: dict = obj sorting_format = self._object['sorting_format'] data: dict = self._object['data'] if sorting_format == 'mda': firings_path = kp.load_file(data['firings']) assert firings_path is not None, f'Unable to load firings file: {data["firings"]}' self._sorting: se.SortingExtractor = MdaSortingExtractor( firings_file=firings_path, samplerate=data['samplerate']) elif sorting_format == 'h5_v1': h5_path = kp.load_file(data['h5_path']) self._sorting = H5SortingExtractorV1(h5_path=h5_path) elif sorting_format == 'npy1': times_npy = kp.load_npy(data['times_npy_uri']) labels_npy = kp.load_npy(data['labels_npy_uri']) samplerate = data['samplerate'] S = se.NumpySortingExtractor() S.set_sampling_frequency(samplerate) S.set_times_labels(times_npy.ravel(), labels_npy.ravel()) self._sorting = S elif sorting_format == 'npy2': npz = kp.load_npy(data['npz_uri']) times_npy = npz['spike_indexes'] labels_npy = npz['spike_labels'] samplerate = float(npz['sampling_frequency']) S = se.NumpySortingExtractor() S.set_sampling_frequency(samplerate) S.set_times_labels(times_npy.ravel(), labels_npy.ravel()) self._sorting = S else: raise Exception(f'Unexpected sorting format: {sorting_format}') self.copy_unit_properties(sorting=self._sorting)
def prepare_recording(*, bin_uri, bin_file_size, raw_num_channels, chanmap_mat_uri, manip_timestamps_uri, manip_positions_uri, meta_uri): manip_timestamps = kp.load_npy(manip_timestamps_uri) manip_positions = kp.load_npy(manip_positions_uri) num_frames = bin_file_size / (raw_num_channels * 2) print(num_frames) assert num_frames == int(num_frames) num_frames = int(num_frames) samplerate = 30000 chanmap, xcoords, ycoords = load_chanmap_data_from_mat( 'sha1://4693f77e3883861f28dc2a634f0e1e5776bc7167/dataset1/NP2_kilosortChanMap.mat' ) meta_lines = kp.load_text(meta_uri).split('\n') # perhaps use in future num_channels = len(chanmap) print(f'Number of channels: {num_channels}') channel_ids = [int(i) for i in range(num_channels)] channel_map = dict( zip([str(c) for c in channel_ids], [int(chanmap[i]) for i in range(num_channels)])) channel_positions = dict( zip([str(c) for c in channel_ids], [[float(xcoords[i]), float(ycoords[i])] for i in range(num_channels)])) ret = dict(recording_format='bin1', data=dict(raw=bin_uri, raw_num_channels=raw_num_channels, num_frames=num_frames, samplerate=samplerate, channel_ids=channel_ids, channel_map=channel_map, channel_positions=channel_positions)) return ret, manip_timestamps, manip_positions
def sample_data_object_slices(data_uri, slices, component_indices: List[int], mode: str): X = kp.load_npy(data_uri) nx = slices[0]['nx'] ny = slices[0]['ny'] num_components = len(component_indices) ret = [] for slice in slices: A = np.zeros((nx, ny, num_components), dtype=np.float32) transformation = np.array(slice['transformation']) # print(transformation) for ix in range(nx): for iy in range(ny): p = transformation @ [ix, iy, 0.5, 1] x_ind = int(np.floor(p[0])) y_ind = int(np.floor(p[1])) z_ind = int(np.floor(p[2])) # print(f'ix={ix}, nx={nx}, iy={iy}, ny={ny}, p={p}, x_ind={x_ind}, y_ind={y_ind}, z_ind={z_ind}, X.shape={X.shape}, val={X[:, x_ind, y_ind, z_ind]}') if (0 <= x_ind) and (x_ind < X.shape[1]) and (0 <= y_ind) and ( y_ind < X.shape[2]) and (0 <= z_ind) and (z_ind < X.shape[3]): for c in range(num_components): if mode == 'real': A[ix, iy, c] = np.real(X[component_indices[c], x_ind, y_ind, z_ind]) elif mode == 'imag': A[ix, iy, c] = np.imag(X[component_indices[c], x_ind, y_ind, z_ind]) elif mode == 'abs': A[ix, iy, c] = np.abs(X[component_indices[c], x_ind, y_ind, z_ind]) else: pass ret.append(A.tolist()) # print(ret) return ret
def prepare_mh_sortings(le_recordings_by_id): x = [ { 'recording_id': 'allen_mouse419112_probeE', 'sorter_name': 'hdsort', 'npz_uri': 'sha1://28efb237ea07041eb94993a316c53c1f22f59c64/hdsort.npz?manifest=32dc916a479afa8fc7932c12818750bc6b3b9956' }, { 'recording_id': 'allen_mouse419112_probeE', 'sorter_name': 'herdingspikes', 'npz_uri': 'sha1://ad8ecc05529ca124ba204a9110c7a50d3f0916e0/herdingspikes.npz?manifest=640cd612d3791a4dd18b0fd704716a021ac6170b' }, { 'recording_id': 'allen_mouse419112_probeE', 'sorter_name': 'ironclust', 'npz_uri': 'sha1://63f8577ee830f6e854fa37fa6dc9f300ddf5dcd2/ironclust.npz?manifest=63da49e17999ecf6082a1b7b1fcd50574a83ff57' }, { 'recording_id': 'allen_mouse419112_probeE', 'sorter_name': 'kilosort2', 'npz_uri': 'sha1://b5cc1eed184a9cb544cd11f49141fe59e12d473c/kilosort2.npz?manifest=7d4ec32d692c9ed3b72aaefcf0c31aa0352ec95b' }, { 'recording_id': 'allen_mouse419112_probeE', 'sorter_name': 'spykingcircus', 'npz_uri': 'sha1://cb07e45b1b969ebfa5a29faf7156585365104349/spykingcircus.npz?manifest=1fdb0dd7642a816db185e975bf43c85fa9bb6578' }, { 'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0', 'sorter_name': 'hdsort', 'npz_uri': 'sha1://dda1bfa8074c4a391bd941e6a341e493a0737768/hdsort.npz?manifest=6b0b78fe3508d1ddfed26b8666df1b7d94231c69' }, { 'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0', 'sorter_name': 'herdingspikes', 'npz_uri': 'sha1://6136940a5e7d2beca95c35f3e000d38ce4d5e596/herdingspikes.npz?manifest=8ab6d0e2050d07f0c39e6dfb391c85513803e5ca' }, { 'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0', 'sorter_name': 'ironclust', 'npz_uri': 'sha1://9bd3a55848d0ca9e98f899653e9554d965dbf6f1/ironclust.npz?manifest=dbf4ed27e7ce9e3fb4e6f00166423765c16bb161' }, { 'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0', 'sorter_name': 'kilosort2', 'npz_uri': 'sha1://7c5100ee4cb77969a4697b524d12727315ac8f1e/kilosort2.npz?manifest=e36011dee42181fb4dcd76764658b848060a51f1' }, { 'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0', 'sorter_name': 'tridesclous', 'npz_uri': 'sha1://20ac56455bc10c1c42c266d1773a4a58b258786f/tridesclous.npz?manifest=400f5b9a20d0bb3575f8e98859440db38aaccca7' }, { 'recording_id': 'cortexlab-single-phase-3', 'sorter_name': 'hdsort', 'npz_uri': 'sha1://d809e0ced7b37c059ee57fbda2f988a5b8dc1a55/hdsort.npz?manifest=fce43cc1a2850e0e7805a98539f24c0816a218e3' }, { 'recording_id': 'cortexlab-single-phase-3', 'sorter_name': 'herdingspikes', 'npz_uri': 'sha1://6b551be075b72dfa5c8df9a43541219630821197/herdingspikes.npz?manifest=b8ece277f8520feae2056f308e3269b6bd32e7a0' }, { 'recording_id': 'cortexlab-single-phase-3', 'sorter_name': 'ironclust', 'npz_uri': 'sha1://dfd2eaa009f6bc5b5c3f7eb979d0335f412cd575/ironclust.npz?manifest=0d9cedcf83a0de06be1a620777b2a5838e3c0d12' }, { 'recording_id': 'cortexlab-single-phase-3', 'sorter_name': 'kilosort2', 'npz_uri': 'sha1://3cf9943dedeb5f39344672ff701eebf12830d075/kilosort2.npz?manifest=8bbe8e6a536e63a274a3bd2e05ecc03116840855' }, { 'recording_id': 'cortexlab-single-phase-3', 'sorter_name': 'spykingcircus', 'npz_uri': 'sha1://d855d5314f36470719da17e4e5d2f48c808e65d3/spykingcircus.npz?manifest=af8e6189126b228ecde19237fb7a21807c7e2feb' }, { 'recording_id': 'cortexlab-single-phase-3', 'sorter_name': 'tridesclous', 'npz_uri': 'sha1://927721485f61cc9322536a8e9b457088b9dc16c7/tridesclous.npz?manifest=ee127bacf3d27de75b69313920af4691dd09c309' }, ] le_sortings = [] for a in x: recording_id = a['recording_id'] sorter_name = a['sorter_name'] print('{recording_id} {sorter_name}') npz_uri = a['npz_uri'] d = kp.load_npy(npz_uri) print(d) sorting_object = { 'sorting_format': 'npy2', 'data': { 'npz_uri': npz_uri, 'unit_ids': d['unit_ids'].tolist(), 'sampling_frequency': float(d['sampling_frequency']) } } sorting_path = kp.store_object(sorting_object, basename=recording_id + '--' + sorter_name + '.json') le_recording = le_recordings_by_id[recording_id] print(sorting_path) le_sortings.append(dict( sortingId=recording_id + ':mh-' + sorter_name, sortingLabel=recording_id + ':mh-' + sorter_name, sortingPath=sorting_path, sortingObject=sorting_object, recordingId=recording_id, recordingPath=le_recording['recordingPath'], recordingObject=le_recording['recordingObject'], tags=['contributed'], description=f''' {sorter_name} applied to {recording_id} (contributed by M. Hennig) '''.strip() )) return le_sortings
import kachery_p2p as kp import numpy as np A = np.meshgrid(np.arange(10000), np.arange(1002)) a = 'sha1://1d671d3393b9f67a8f32af1b3dc6017caa96c8ee/file.npy?manifest=f72e9a974ea212b973b132e766802a21795a58db' local_path = kp.load_file(a) print(local_path) A2 = kp.load_npy(a) assert (np.all(A2 == A)) print('Success.')
def cortexlab_create_recording_object( bin_uri, bin_size, # Later kachery-p2p will allow us to get this information from bin_uri channel_map_npy_uri, channel_positions_npy_uri, raw_num_channels, samplerate): # dd = kp.read_dir(dirname) # bin_sha1 = dd['files'][bin_fname]['sha1'] # bin_size = dd['files'][bin_fname]['size'] # bin_uri = f'sha1://{bin_sha1}/raw.bin' X_channel_map = kp.load_npy(channel_map_npy_uri) X_channel_positions = kp.load_npy(channel_positions_npy_uri) # X_channel_map = kp.load_npy(dirname + '/channel_map.npy') # X_channel_positions = kp.load_npy(dirname + '/channel_positions.npy') channel_map = dict() channel_ids = [ii for ii in range(len(X_channel_map))] for id in channel_ids: channel_map[str(id)] = int(X_channel_map[id]) channel_positions = dict() for id in channel_ids: channel_positions[str(id)] = _listify_ndarray( X_channel_positions[id, :].ravel()) num_frames = int(bin_size / raw_num_channels / 2) assert num_frames * raw_num_channels * 2 == bin_size, f'Unexpected size of bin file: {bin_size} <> {num_frames * raw_num_channels * 2}' ret = dict(recording_format='bin1', data=dict(raw=bin_uri, raw_num_channels=raw_num_channels, num_frames=num_frames, samplerate=samplerate, channel_ids=channel_ids, channel_map=channel_map, channel_positions=channel_positions)) return ret # @hi.function('cortexlab_create_sorting_object', '0.1.3') # @hi.container('docker://magland/labbox-ephys-processing:0.2.18') # def cortexlab_create_sorting_object( # times_npy_uri, # labels_npy_uri, # samplerate=30000 # ): # with hi.TemporaryDirectory() as tmpdir: # import spikeextractors as se # import kachery as ka # import h5py # times = ka.load_npy(times_npy_uri) # labels = ka.load_npy(labels_npy_uri) # sorting = se.NumpySortingExtractor() # sorting.set_sampling_frequency(samplerate) # sorting.set_times_labels(times.ravel(), labels.ravel()) # save_path = tmpdir + '/sorting.h5' # unit_ids = sorting.get_unit_ids() # samplerate = sorting.get_sampling_frequency() # with h5py.File(save_path, 'w') as f: # f.create_dataset('unit_ids', data=np.array(unit_ids).astype(np.int32)) # f.create_dataset('sampling_frequency', data=np.array([samplerate]).astype(np.float64)) # for unit_id in unit_ids: # x = sorting.get_unit_spike_train(unit_id=unit_id) # f.create_dataset(f'unit_spike_trains/{unit_id}', data=np.array(x).astype(np.float64)) # return dict( # sorting_format='h5_v1', # data=dict( # h5_path=ka.store_file(save_path) # ) # )