def write_recording(recording: RecordingExtractor, save_path: PathType, time_axis: int = 0, dtype: OptionalDtypeType = None, **write_binary_kwargs): """ Save the traces of a recording extractor in binary .dat format. Parameters ---------- recording : RecordingExtractor The recording extractor object to be saved in .dat format. save_path : str The path to the file. time_axis : int, optional If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. dtype : dtype Type of the saved data. Default float32. **write_binary_kwargs: keyword arguments for write_to_binary_dat_format() function """ write_to_binary_dat_format(recording, save_path, time_axis=time_axis, dtype=dtype, **write_binary_kwargs)
def write_recording(recording, save_path, time_axis=0, dtype=None, chunk_size=None): '''Saves the traces of a recording extractor in binary .dat format. Parameters ---------- recording: RecordingExtractor The recording extractor object to be saved in .dat format save_path: str The path to the file. time_axis: 0 (default) or 1 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. dtype: dtype Type of the saved data. Default float32. chunk_size: None or int If not None then the copy done by chunk size. This avoid to much memory consumption for big files. ''' write_to_binary_dat_format(recording, save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size)
def write_to_binary_dat_format(self, save_path, time_axis=0, dtype=None, chunk_size=None, chunk_mb=500, verbose=False): '''Saves the traces of this recording extractor into binary .dat format. Parameters ---------- save_path: str The path to the file. time_axis: 0 (default) or 1 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. dtype: dtype Type of the saved data. Default float32 chunk_size: None or int If not None then the file is saved in chunks. This avoid to much memory consumption for big files. If 'auto' the file is saved in chunks of ~ 500Mb chunk_mb: None or int Chunk size in Mb (default 500Mb) verbose: bool If True, output is verbose ''' X = DiskReadMda(self._timeseries_path) header_size = X._header.header_size if dtype is None or dtype == self.get_dtype(): try: with open(self._timeseries_path, 'rb') as src, open(save_path, 'wb') as dst: src.seek(header_size) shutil.copyfileobj(src, dst) except Exception as e: print('Error occurred while copying:', e) print('Writing to binary') write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb, verbose=verbose) else: write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb, verbose=verbose)
def write_to_binary_dat_format(self, save_path, time_axis=0, dtype=None, chunk_size=None, chunk_mb=500): '''Saves the traces of this recording extractor into binary .dat format. Parameters ---------- save_path: str The path to the file. time_axis: 0 (default) or 1 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. dtype: dtype Type of the saved data. Default float32 chunk_size: None or int If not None then the file is saved in chunks. This avoid to much memory consumption for big files. If 'auto' the file is saved in chunks of ~ 500Mb chunk_mb: None or int Chunk size in Mb (default 500Mb) ''' if dtype is None or dtype == self.get_dtype(): try: shutil.copy(self._datfile, save_path) except Exception as e: print('Error occurred while copying:', e) print('Writing to binary') write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb) else: write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb)
def write_to_binary_dat_format(self, save_path, time_axis=0, dtype=None, chunk_size=None, chunk_mb=500, n_jobs=1, joblib_backend='loky', verbose=False): '''Saves the traces of this recording extractor into binary .dat format. Parameters ---------- save_path: str The path to the file. time_axis: 0 (default) or 1 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. dtype: dtype Type of the saved data. Default float32 chunk_size: None or int If not None then the file is saved in chunks. This avoid to much memory consumption for big files. If 'auto' the file is saved in chunks of ~ 500Mb chunk_mb: None or int Chunk size in Mb (default 500Mb) n_jobs: int Number of jobs to use (Default 1) joblib_backend: str Joblib backend for parallel processing ('loky', 'threading', 'multiprocessing') ''' if dtype is None or dtype == self.get_dtype(): try: shutil.copy(self._datfile, save_path) except Exception as e: print('Error occurred while copying:', e) print('Writing to binary') write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb, n_jobs=n_jobs, joblib_backend=joblib_backend) else: write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb, n_jobs=n_jobs, joblib_backend=joblib_backend)
def write_recording(recording, save_path, params=dict(), raw_fname='raw.mda', params_fname='params.json', geom_fname='geom.csv', dtype=None, chunk_size=None, chunk_mb=500): ''' Parameters ---------- recording: RecordingExtractor The recording extractor to be saved save_path: str or Path The folder in which the Mda files are saved params: dictionary Dictionary with optional parameters to save metadata. Sampling frequency is appended to this dictionary. raw_fname: str File name of raw file (default raw.mda) params_fname: str File name of params file (default params.json) geom_fname: str File name of geom file (default geom.csv) dtype: dtype dtype to be used. If None dtype is same as recording traces. chunk_size: None or int Number of chunks to save the file in. This avoid to much memory consumption for big files. If None and 'chunk_mb' is given, the file is saved in chunks of 'chunk_mb' Mb (default 500Mb) chunk_mb: None or int Chunk size in Mb (default 500Mb) ''' save_path = Path(save_path) if not save_path.exists(): if not save_path.is_dir(): os.makedirs(str(save_path)) save_file_path = save_path / raw_fname parent_dir = save_path channel_ids = recording.get_channel_ids() num_chan = recording.get_num_channels() num_frames = recording.get_num_frames() geom = recording.get_channel_locations() if not save_path.is_dir(): os.mkdir(save_path) if dtype is None: dtype = recording.get_dtype() if dtype == 'float': dtype = 'float32' if dtype == 'int': dtype = 'int16' with save_file_path.open('wb') as f: header = MdaHeader(dt0=dtype, dims0=(num_chan, num_frames)) header.write(f) # takes care of the chunking write_to_binary_dat_format(recording, file_handle=f, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb) params["samplerate"] = recording.get_sampling_frequency() with (parent_dir / params_fname).open('w') as f: json.dump(params, f) np.savetxt(str(parent_dir / geom_fname), geom, delimiter=',')
def write_to_binary_dat_format(self, save_path, time_axis=0, dtype=None, chunk_size=None, chunk_mb=500, n_jobs=1, joblib_backend='loky', verbose=False): """Saves the traces of this recording extractor into binary .dat format. Parameters ---------- save_path: str The path to the file. time_axis: 0 (default) or 1 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. dtype: dtype Type of the saved data. Default float32 chunk_size: None or int Size of each chunk in number of frames. If None (default) and 'chunk_mb' is given, the file is saved in chunks of 'chunk_mb' Mb (default 500Mb) chunk_mb: None or int Chunk size in Mb (default 500Mb) n_jobs: int Number of jobs to use (Default 1) joblib_backend: str Joblib backend for parallel processing ('loky', 'threading', 'multiprocessing') verbose: bool If True, output is verbose """ X = DiskReadMda(self._timeseries_path) header_size = X._header.header_size if dtype is None or dtype == self.get_dtype(): try: with open(self._timeseries_path, 'rb') as src, open(save_path, 'wb') as dst: src.seek(header_size) shutil.copyfileobj(src, dst) except Exception as e: print('Error occurred while copying:', e) print('Writing to binary') write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb, n_jobs=n_jobs, joblib_backend=joblib_backend, verbose=verbose) else: write_to_binary_dat_format(self, save_path=save_path, time_axis=time_axis, dtype=dtype, chunk_size=chunk_size, chunk_mb=chunk_mb, n_jobs=n_jobs, joblib_backend=joblib_backend, verbose=verbose)
def write_recording(recording, save_path, params=dict(), raw_fname='raw.mda', params_fname='params.json', geom_fname='geom.csv', dtype=None, chunk_size=None, n_jobs=None, chunk_mb=500, verbose=False): """ Writes recording to file in MDA format. Parameters ---------- recording: RecordingExtractor The recording extractor to be saved save_path: str or Path The folder in which the Mda files are saved params: dictionary Dictionary with optional parameters to save metadata. Sampling frequency is appended to this dictionary. raw_fname: str File name of raw file (default raw.mda) params_fname: str File name of params file (default params.json) geom_fname: str File name of geom file (default geom.csv) dtype: dtype dtype to be used. If None dtype is same as recording traces. chunk_size: None or int Size of each chunk in number of frames. If None (default) and 'chunk_mb' is given, the file is saved in chunks of 'chunk_mb' Mb (default 500Mb) n_jobs: int Number of jobs to use (Default 1) chunk_mb: None or int Chunk size in Mb (default 500Mb) verbose: bool If True, output is verbose """ save_path = Path(save_path) save_path.mkdir(parents=True, exist_ok=True) save_file_path = save_path / raw_fname parent_dir = save_path channel_ids = recording.get_channel_ids() num_chan = recording.get_num_channels() num_frames = recording.get_num_frames() geom = recording.get_channel_locations() if dtype is None: dtype = recording.get_dtype() if dtype == 'float': dtype = 'float32' if dtype == 'int': dtype = 'int16' with save_file_path.open('wb') as f: header = MdaHeader(dt0=dtype, dims0=(num_chan, num_frames)) header.write(f) # takes care of the chunking write_to_binary_dat_format(recording, file_handle=f, dtype=dtype, n_jobs=n_jobs, chunk_size=chunk_size, chunk_mb=chunk_mb, verbose=verbose) params["samplerate"] = float(recording.get_sampling_frequency()) with (parent_dir / params_fname).open('w') as f: json.dump(params, f) np.savetxt(str(parent_dir / geom_fname), geom, delimiter=',')