def from_memory(recording: se.RecordingExtractor, serialize=False, serialize_dtype=None): if serialize: if serialize_dtype is None: raise Exception( 'You must specify the serialize_dtype when serializing recording extractor in from_memory()' ) with hi.TemporaryDirectory() as tmpdir: fname = tmpdir + '/' + _random_string(10) + '_recording.mda' se.BinDatRecordingExtractor.write_recording( recording=recording, save_path=fname, time_axis=0, dtype=serialize_dtype) with ka.config(use_hard_links=True): uri = ka.store_file(fname, basename='raw.mda') num_channels = recording.get_num_channels() channel_ids = [int(a) for a in recording.get_channel_ids()] xcoords = [ recording.get_channel_property(a, 'location')[0] for a in channel_ids ] ycoords = [ recording.get_channel_property(a, 'location')[1] for a in channel_ids ] recording = LabboxEphysRecordingExtractor({ 'recording_format': 'bin1', 'data': { 'raw': uri, 'raw_num_channels': num_channels, 'num_frames': int(recording.get_num_frames()), 'samplerate': float(recording.get_sampling_frequency()), 'channel_ids': channel_ids, 'channel_map': dict( zip([str(c) for c in channel_ids], [int(i) for i in range(num_channels)])), 'channel_positions': dict( zip([str(c) for c in channel_ids], [[float(xcoords[i]), float(ycoords[i])] for i in range(num_channels)])) } }) return recording obj = { 'recording_format': 'in_memory', 'data': register_in_memory_object(recording) } return LabboxEphysRecordingExtractor(obj)
def _run(self, recording: se.RecordingExtractor, output_folder: Path): dataset_dir = output_folder / 'ironclust_dataset' source_dir = Path(__file__).parent samplerate = recording.get_sampling_frequency() num_channels = recording.get_num_channels() num_timepoints = recording.get_num_frames() duration_minutes = num_timepoints / samplerate / 60 if self.verbose: print( 'Num. channels = {}, Num. timepoints = {}, duration = {} minutes' .format(num_channels, num_timepoints, duration_minutes)) if self.verbose: print('Creating argfile.txt...') txt = '' for key0, val0 in self.params.items(): txt += '{}={}\n'.format(key0, val0) txt += 'samplerate={}\n'.format(samplerate) with (dataset_dir / 'argfile.txt').open('w') as f: f.write(txt) tmpdir = output_folder / 'tmp' os.makedirs(str(tmpdir), exist_ok=True) if self.verbose: print( 'Running ironclust in {tmpdir}...'.format(tmpdir=str(tmpdir))) shell_cmd = ''' #!/bin/bash cd {tmpdir} /run_irc {dataset_dir} {tmpdir} {dataset_dir}/argfile.txt '''.format(tmpdir=str(tmpdir), dataset_dir=str(dataset_dir)) shell_script = ShellScript(shell_cmd) shell_script.start() retcode = shell_script.wait() if retcode != 0: raise Exception('ironclust returned a non-zero exit code') result_fname = str(tmpdir / 'firings.mda') if not os.path.exists(result_fname): raise Exception('Result file does not exist: ' + result_fname) samplerate_fname = str(tmpdir / 'samplerate.txt') with open(samplerate_fname, 'w') as f: f.write('{}'.format(samplerate))
def __init__(self, *, recording: se.RecordingExtractor, freq_min, freq_max, freq_wid): self._padding = 3000 target_ram = 100 * 1000 * 1000 target_chunk_size = math.ceil( min(recording.get_sampling_frequency() * 30, target_ram / (recording.get_num_channels() * 4))) # It's important that the fft's have size 2^x. So we prepare the chunk sizes to have size 2^x - 2*padding chunk_size = int(2**math.ceil(np.log2(target_chunk_size)) - self._padding * 2) FilterRecording.__init__(self, recording=recording, chunk_size=chunk_size) self._params = dict(name='bandpass_filter', freq_min=freq_min, freq_max=freq_max, freq_wid=freq_wid) self._recording = recording
def write_recording(recording: RecordingExtractor, save_path: PathType, dtype: DtypeType = None, **write_binary_kwargs): """ Convert and save the recording extractor to Neuroscope format. Parameters ---------- recording: RecordingExtractor The recording extractor to be converted and saved. save_path: str Path to desired target folder. The name of the files will be the same as the final directory. dtype: dtype Optional. Data type to be used in writing; must be int16 or int32 (default). Will throw a warning if stored recording type from get_traces() does not match. **write_binary_kwargs: keyword arguments for write_to_binary_dat_format function - chunk_size - chunk_mb """ save_path = Path(save_path) save_path.mkdir(parents=True, exist_ok=True) if save_path.suffix == "": recording_name = save_path.name else: recording_name = save_path.stem xml_name = recording_name save_xml_filepath = save_path / f"{xml_name}.xml" recording_filepath = save_path / recording_name # create parameters file if none exists if save_xml_filepath.is_file(): raise FileExistsError(f"{save_xml_filepath} already exists!") xml_root = et.Element('xml') et.SubElement(xml_root, 'acquisitionSystem') et.SubElement(xml_root.find('acquisitionSystem'), 'nBits') et.SubElement(xml_root.find('acquisitionSystem'), 'nChannels') et.SubElement(xml_root.find('acquisitionSystem'), 'samplingRate') recording_dtype = str(recording.get_dtype()) int_loc = recording_dtype.find('int') recording_n_bits = recording_dtype[(int_loc + 3):(int_loc + 5)] valid_dtype = ["16", "32"] if dtype is None: if int_loc != -1 and recording_n_bits in valid_dtype: n_bits = recording_n_bits else: print( "Warning: Recording data type must be int16 or int32! Defaulting to int32." ) n_bits = "32" dtype = f"int{n_bits}" # update dtype in pass to BinDatRecordingExtractor.write_recording else: dtype = str(dtype) # if user passed numpy data type int_loc = dtype.find('int') assert int_loc != -1, "Data type must be int16 or int32! Non-integer received." n_bits = dtype[(int_loc + 3):(int_loc + 5)] assert n_bits in valid_dtype, "Data type must be int16 or int32!" xml_root.find('acquisitionSystem').find('nBits').text = n_bits xml_root.find('acquisitionSystem').find('nChannels').text = str( recording.get_num_channels()) xml_root.find('acquisitionSystem').find('samplingRate').text = str( recording.get_sampling_frequency()) et.ElementTree(xml_root).write(str(save_xml_filepath), pretty_print=True) recording.write_to_binary_dat_format(recording_filepath, dtype=dtype, **write_binary_kwargs)
def _run(self, recording: se.RecordingExtractor, output_folder: Path): recording = recover_recording(recording) dataset_dir = output_folder / 'ironclust_dataset' source_dir = Path(__file__).parent samplerate = recording.get_sampling_frequency() if recording.is_filtered and self.params['filter']: print("Warning! The recording is already filtered, but Ironclust filter is enabled. You can disable " "filters by setting 'filter' parameter to False") num_channels = recording.get_num_channels() num_timepoints = recording.get_num_frames() duration_minutes = num_timepoints / samplerate / 60 if self.verbose: print('Num. channels = {}, Num. timepoints = {}, duration = {} minutes'.format( num_channels, num_timepoints, duration_minutes)) if self.verbose: print('Creating argfile.txt...') txt = '' for key0, val0 in self.params.items(): txt += '{}={}\n'.format(key0, val0) txt += 'samplerate={}\n'.format(samplerate) with (dataset_dir / 'argfile.txt').open('w') as f: f.write(txt) tmpdir = output_folder / 'tmp' os.makedirs(str(tmpdir), exist_ok=True) if self.verbose: print('Running ironclust in {tmpdir}...'.format(tmpdir=str(tmpdir))) cmd = ''' addpath('{source_dir}'); addpath('{ironclust_path}', '{ironclust_path}/matlab', '{ironclust_path}/matlab/mdaio'); try p_ironclust('{tmpdir}', '{dataset_dir}/raw.mda', '{dataset_dir}/geom.csv', '', '', '{tmpdir}/firings.mda', '{dataset_dir}/argfile.txt'); catch fprintf('----------------------------------------'); fprintf(lasterr()); quit(1); end quit(0); ''' cmd = cmd.format(ironclust_path=IronClustSorter.ironclust_path, tmpdir=str(tmpdir), dataset_dir=str(dataset_dir), source_dir=str(source_dir)) matlab_cmd = ShellScript(cmd, script_path=str(tmpdir / 'run_ironclust.m')) matlab_cmd.write() if 'win' in sys.platform and sys.platform != 'darwin': shell_cmd = ''' cd {tmpdir} matlab -nosplash -wait -log -r run_ironclust '''.format(tmpdir=tmpdir) else: shell_cmd = ''' #!/bin/bash cd "{tmpdir}" matlab -nosplash -nodisplay -log -r run_ironclust '''.format(tmpdir=tmpdir) shell_script = ShellScript(shell_cmd, script_path=output_folder / f'run_{self.sorter_name}', log_path=output_folder / f'{self.sorter_name}.log', verbose=self.verbose) shell_script.start() retcode = shell_script.wait() if retcode != 0: raise Exception('ironclust returned a non-zero exit code') result_fname = str(tmpdir / 'firings.mda') if not os.path.exists(result_fname): raise Exception('Result file does not exist: ' + result_fname) samplerate_fname = str(tmpdir / 'samplerate.txt') with open(samplerate_fname, 'w') as f: f.write('{}'.format(samplerate))
def _run(self, recording: se.RecordingExtractor, output_folder: Path): dataset_dir = output_folder / 'ironclust_dataset' source_dir = Path(__file__).parent samplerate = recording.get_sampling_frequency() num_channels = recording.get_num_channels() num_timepoints = recording.get_num_frames() duration_minutes = num_timepoints / samplerate / 60 if self.verbose: print( 'Num. channels = {}, Num. timepoints = {}, duration = {} minutes' .format(num_channels, num_timepoints, duration_minutes)) if self.verbose: print('Creating argfile.txt...') txt = '' for key0, val0 in self.params.items(): txt += '{}={}\n'.format(key0, val0) txt += 'samplerate={}\n'.format(samplerate) with (dataset_dir / 'argfile.txt').open('w') as f: f.write(txt) tmpdir = output_folder / 'tmp' os.makedirs(str(tmpdir), exist_ok=True) if self.verbose: print( 'Running ironclust in {tmpdir}...'.format(tmpdir=str(tmpdir))) if os.getenv('IRONCLUST_BINARY_PATH', None): shell_cmd = f''' #!/bin/bash cd {tmpdir} exec $IRONCLUST_BINARY_PATH {dataset_dir} {tmpdir} {dataset_dir}/argfile.txt ''' else: matlab_script = f''' try addpath(genpath('{self.ironclust_path}')); irc2('{dataset_dir}', '{str(tmpdir)}', '{dataset_dir}/argfile.txt') catch fprintf('----------------------------------------'); fprintf(lasterr()); quit(1); end quit(0); ''' ShellScript(matlab_script).write( str(output_folder / 'ironclust_script.m')) if "win" in sys.platform: shell_cmd = f''' cd {str(output_folder)} matlab -nosplash -wait -batch ironclust_script ''' else: shell_cmd = f''' #!/bin/bash cd "{str(output_folder)}" matlab -nosplash -nodisplay -r ironclust_script ''' shell_script = ShellScript(shell_cmd, redirect_output_to_stdout=True) shell_script.start() retcode = shell_script.wait() if retcode != 0: raise Exception('ironclust returned a non-zero exit code') result_fname = str(tmpdir / 'firings.mda') if not os.path.exists(result_fname): raise Exception('Result file does not exist: ' + result_fname) samplerate_fname = str(tmpdir / 'samplerate.txt') with open(samplerate_fname, 'w') as f: f.write('{}'.format(samplerate))
def write_recording(recording: RecordingExtractor, save_path: PathType, dtype: DtypeType = None, **write_binary_kwargs): """ Convert and save the recording extractor to Neuroscope format. Parameters ---------- recording: RecordingExtractor The recording extractor to be converted and saved. save_path: str Path to desired target folder. The name of the files will be the same as the final directory. dtype: dtype Optional. Data type to be used in writing; must be int16 or int32 (default). Will throw a warning if stored recording type from get_traces() does not match. **write_binary_kwargs: keyword arguments for write_to_binary_dat_format function - chunk_size - chunk_mb """ save_path = Path(save_path) if not save_path.is_dir(): os.makedirs(save_path) if save_path.suffix == '': recording_name = save_path.name else: recording_name = save_path.stem xml_name = recording_name save_xml_filepath = save_path / (str(xml_name) + '.xml') recording_filepath = save_path / recording_name # create parameters file if none exists if save_xml_filepath.is_file(): raise FileExistsError(f'{save_xml_filepath} already exists!') soup = BeautifulSoup("", 'xml') new_tag = soup.new_tag('nbits') recording_dtype = str(recording.get_dtype()) int_loc = recording_dtype.find('int') recording_n_bits = recording_dtype[(int_loc + 3):(int_loc + 5)] if dtype is None: # user did not specify data type if int_loc != -1 and recording_n_bits in ['16', '32']: n_bits = recording_n_bits else: print('Warning: Recording data type must be int16 or int32! Defaulting to int32.') n_bits = '32' dtype = 'int' + n_bits # update dtype in pass to BinDatRecordingExtractor.write_recording else: dtype = str(dtype) # if user passed numpy data type int_loc = dtype.find('int') assert int_loc != -1, 'Data type must be int16 or int32! Non-integer received.' n_bits = dtype[(int_loc + 3):(int_loc + 5)] assert n_bits in ['16', '32'], 'Data type must be int16 or int32!' new_tag.string = n_bits soup.append(new_tag) new_tag = soup.new_tag('nchannels') new_tag.string = str(recording.get_num_channels()) soup.append(new_tag) new_tag = soup.new_tag('samplingrate') new_tag.string = str(recording.get_sampling_frequency()) soup.append(new_tag) # write parameters file # create parameters file if none exists with save_xml_filepath.open("w") as f: f.write(str(soup)) recording.write_to_binary_dat_format(recording_filepath, dtype=dtype, **write_binary_kwargs)