def compute_recording_info(recording_path, json_out): recording = AutoRecordingExtractor(recording_path) obj = dict(samplerate=recording.get_sampling_frequency(), num_channels=len(recording.get_channel_ids()), duration_sec=recording.get_num_frames() / recording.get_sampling_frequency()) with open(json_out, 'w') as f: json.dump(obj, f)
class Recording: def __init__(self): super().__init__() self._recording = None def javascript_state_changed(self, prev_state, state): self._set_status('running', 'Running Recording') if not self._recording: self._set_status('running', 'Loading recording') recording0 = state.get('recording', None) if not recording0: self._set_error('Missing: recording') return try: self._recording = AutoRecordingExtractor(recording0) except Exception as err: traceback.print_exc() self._set_error('Problem initiating recording: {}'.format(err)) return self._set_status('running', 'Loading recording data') try: channel_locations = self._recording.get_channel_locations() except: channel_locations = None self.set_state(dict( num_channels=self._recording.get_num_channels(), channel_ids=self._recording.get_channel_ids(), channel_locations=channel_locations, num_timepoints=self._recording.get_num_frames(), samplerate=self._recording.get_sampling_frequency(), status_message='Loaded recording.' )) self._set_status('finished', '') def _set_state(self, **kwargs): self.set_state(kwargs) def _set_error(self, error_message): self._set_status('error', error_message) def _set_status(self, status, status_message=''): self._set_state(status=status, status_message=status_message)
def load_spikeforest_data(recording_path: str, sorting_true_path: str, download=True): recording = AutoRecordingExtractor(recording_path, download=download) sorting_GT = AutoSortingExtractor(sorting_true_path) # recording info fs = recording.get_sampling_frequency() channel_ids = recording.get_channel_ids() channel_loc = recording.get_channel_locations() num_frames = recording.get_num_frames() duration = recording.frame_to_time(num_frames) print(f'Sampling frequency:{fs}') print(f'Channel ids:{channel_ids}') print(f'channel location:{channel_loc}') print(f'frame num:{num_frames}') print(f'recording duration:{duration}') # sorting_GT info unit_ids = sorting_GT.get_unit_ids() print(f'unit ids:{unit_ids}') return recording, sorting_GT
import numpy as np import kachery as ka from pykilosort import Bunch, add_default_handler, run from spikeextractors.extractors import bindatrecordingextractor as dat from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor dat_path = Path("test/test.bin").absolute() dir_path = dat_path.parent ka.set_config(fr="default_readonly") recording_path = "sha1dir://c0879a26f92e4c876cd608ca79192a84d4382868.manual_franklab/tetrode_600s/sorter1_1" recording = AutoRecordingExtractor(recording_path, download=True) recording.write_to_binary_dat_format(str(dat_path)) n_channels = len(recording.get_channel_ids()) probe = Bunch() probe.NchanTOT = n_channels probe.chanMap = np.array(range(0, n_channels)) probe.kcoords = np.ones(n_channels) probe.xc = recording.get_channel_locations()[:, 0] probe.yc = recording.get_channel_locations()[:, 1] add_default_handler(level="DEBUG") params = {"nfilt_factor": 8, "AUCsplit": 0.85, "nskip": 5} run( dat_path, params=params,
class TimeseriesView: def __init__(self): super().__init__() self._recording = None self._multiscale_recordings = None self._segment_size_times_num_channels = 1000000 self._segment_size = None def javascript_state_changed(self, prev_state, state): self._set_status('running', 'Running TimeseriesView') self._create_efficient_access = state.get('create_efficient_access', False) if not self._recording: self._set_status('running', 'Loading recording') recording0 = state.get('recording', None) if not recording0: self._set_error('Missing: recording') return try: self._recording = AutoRecordingExtractor(recording0) except Exception as err: traceback.print_exc() self._set_error('Problem initiating recording: {}'.format(err)) return self._set_status('running', 'Loading recording data') traces0 = self._recording.get_traces( channel_ids=self._recording.get_channel_ids(), start_frame=0, end_frame=min(self._recording.get_num_frames(), 25000)) y_offsets = -np.mean(traces0, axis=1) for m in range(traces0.shape[0]): traces0[m, :] = traces0[m, :] + y_offsets[m] vv = np.percentile(np.abs(traces0), 90) y_scale_factor = 1 / (2 * vv) if vv > 0 else 1 self._segment_size = int( np.ceil(self._segment_size_times_num_channels / self._recording.get_num_channels())) try: channel_locations = self._recording.get_channel_locations() except: channel_locations = None self.set_state( dict(num_channels=self._recording.get_num_channels(), channel_ids=self._recording.get_channel_ids(), channel_locations=channel_locations, num_timepoints=self._recording.get_num_frames(), y_offsets=y_offsets, y_scale_factor=y_scale_factor, samplerate=self._recording.get_sampling_frequency(), segment_size=self._segment_size, status_message='Loaded recording.')) # SR = state.get('segmentsRequested', {}) # for key in SR.keys(): # aa = SR[key] # if not self.get_python_state(key, None): # self.set_state(dict(status_message='Loading segment {}'.format(key))) # data0 = self._load_data(aa['ds'], aa['ss']) # data0_base64 = _mda32_to_base64(data0) # state0 = {} # state0[key] = dict(data=data0_base64, ds=aa['ds'], ss=aa['ss']) # self.set_state(state0) # self.set_state(dict(status_message='Loaded segment {}'.format(key))) self._set_status('finished', '') def on_message(self, msg): if msg['command'] == 'requestSegment': ds = msg['ds_factor'] ss = msg['segment_num'] data0 = self._load_data(ds, ss) data0_base64 = _mda32_to_base64(data0) self.send_message( dict(command='setSegment', ds_factor=ds, segment_num=ss, data=data0_base64)) def _load_data(self, ds, ss): if not self._recording: return logger.info('_load_data {} {}'.format(ds, ss)) if ds > 1: if self._multiscale_recordings is None: self.set_state( dict(status_message='Creating multiscale recordings...')) self._multiscale_recordings = _create_multiscale_recordings( recording=self._recording, progressive_ds_factor=3, create_efficient_access=self._create_efficient_access) self.set_state( dict(status_message='Done creating multiscale recording')) rx = self._multiscale_recordings[ds] # print('_extract_data_segment', ds, ss, self._segment_size) start_time = time.time() X = _extract_data_segment(recording=rx, segment_num=ss, segment_size=self._segment_size * 2) # print('done extracting data segment', time.time() - start_time) logger.info('extracted data segment {} {} {}'.format( ds, ss, time.time() - start_time)) return X start_time = time.time() traces = self._recording.get_traces( start_frame=ss * self._segment_size, end_frame=(ss + 1) * self._segment_size) logger.info('extracted data segment {} {} {}'.format( ds, ss, time.time() - start_time)) return traces def iterate(self): pass def _set_state(self, **kwargs): self.set_state(kwargs) def _set_error(self, error_message): self._set_status('error', error_message) def _set_status(self, status, status_message=''): self._set_state(status=status, status_message=status_message)