def __init__(self, folder_path: PathType, keep_mua_units: bool = True, exclude_shanks: Union[list, None] = None): assert HAVE_LXML, self.installation_mesg folder_path = Path(folder_path) if exclude_shanks is not None: # dumping checks do not like having an empty list as default assert all( [ isinstance(x, (int, np.integer)) and x >= 0 for x in exclude_shanks ] ), 'Optional argument "exclude_shanks" must contain positive integers only!' exclude_shanks_passed = True else: exclude_shanks = [] exclude_shanks_passed = False xml_files = [ f for f in folder_path.iterdir() if f.is_file if f.suffix == '.xml' ] assert len(xml_files) > 0, 'No .xml file found in the folder.' assert len( xml_files) == 1, 'More than one .xml file found in the folder.' xml_filepath = xml_files[0] xml_root = et.parse(str(xml_filepath.absolute())).getroot() self._sampling_frequency = float( xml_root.find('acquisitionSystem').find('samplingRate').text ) # careful not to confuse it with the lfpsamplingrate res_files = [ f for f in folder_path.iterdir() if f.is_file() and '.res' in f.suffixes and re.search(r'\d+$', f.name) is not None and len(f.suffixes) == 2 ] clu_files = [ f for f in folder_path.iterdir() if f.is_file() and '.clu' in f.suffixes and re.search(r'\d+$', f.name) is not None and len(f.suffixes) == 2 ] assert len(res_files) > 0 or len(clu_files) > 0, \ 'No .res or .clu files found in the folder_path.' assert len(res_files) == len(clu_files) res_ids = [int(x.suffix[1:]) for x in res_files] clu_ids = [int(x.suffix[1:]) for x in clu_files] assert sorted(res_ids) == sorted( clu_ids), 'Unmatched .clu.%i and .res.%i files detected!' if any([x not in res_ids for x in exclude_shanks]): print( 'Warning: Detected indices in exclude_shanks that are not in the directory. These will be ignored.' ) resfile_names = [x.name[:x.name.find('.res')] for x in res_files] clufile_names = [x.name[:x.name.find('.clu')] for x in clu_files] assert np.all(r == c for (r, c) in zip(resfile_names, clufile_names)), \ 'Some of the .res.%i and .clu.%i files do not share the same name!' sorting_name = resfile_names[0] all_shanks_list_se = [] for shank_id in list(set(res_ids) - set(exclude_shanks)): resfile_path = folder_path / f'{sorting_name}.res.{shank_id}' clufile_path = folder_path / f'{sorting_name}.clu.{shank_id}' all_shanks_list_se.append( NeuroscopeSortingExtractor(resfile_path=resfile_path, clufile_path=clufile_path, keep_mua_units=keep_mua_units)) MultiSortingExtractor.__init__(self, sortings=all_shanks_list_se) if exclude_shanks_passed: self._kwargs = { 'folder_path': str(folder_path.absolute()), 'keep_mua_units': keep_mua_units, 'exclude_shanks': exclude_shanks } else: self._kwargs = { 'folder_path': str(folder_path.absolute()), 'keep_mua_units': keep_mua_units, 'exclude_shanks': None }
def __init__(self, folder_path: PathType, keep_mua_units: bool = True, exclude_shanks: Optional[list] = None, load_waveforms: bool = False, gain: Optional[float] = None): assert self.installed, self.installation_mesg folder_path = Path(folder_path) if exclude_shanks is not None: # dumping checks do not like having an empty list as default assert all( [ isinstance(x, (int, np.integer)) and x >= 0 for x in exclude_shanks ] ), "Optional argument 'exclude_shanks' must contain positive integers only!" exclude_shanks_passed = True else: exclude_shanks = [] exclude_shanks_passed = False xml_files = [ f for f in folder_path.iterdir() if f.is_file if f.suffix == ".xml" ] assert len(xml_files) > 0, "No .xml file found in the folder!" assert len( xml_files) == 1, "More than one .xml file found in the folder!" xml_filepath = xml_files[0] xml_root = et.parse(str(xml_filepath)).getroot() self._sampling_frequency = float( xml_root.find('acquisitionSystem').find('samplingRate').text) res_files = get_shank_files(folder_path=folder_path, suffix=".res") clu_files = get_shank_files(folder_path=folder_path, suffix=".clu") assert len(res_files) > 0 or len( clu_files) > 0, "No .res or .clu files found in the folder_path!" assert len(res_files) == len(clu_files) res_ids = [int(x.suffix[1:]) for x in res_files] clu_ids = [int(x.suffix[1:]) for x in clu_files] assert sorted(res_ids) == sorted( clu_ids), "Unmatched .clu.%i and .res.%i files detected!" if any([x not in res_ids for x in exclude_shanks]): warnings.warn( "Detected indices in exclude_shanks that are not in the directory! These will be ignored." ) resfile_names = [x.name[:x.name.find('.res')] for x in res_files] clufile_names = [x.name[:x.name.find('.clu')] for x in clu_files] assert np.all(r == c for (r, c) in zip(resfile_names, clufile_names)), \ "Some of the .res.%i and .clu.%i files do not share the same name!" sorting_name = resfile_names[0] all_shanks_list_se = [] for shank_id in list(set(res_ids) - set(exclude_shanks)): nse_args = dict( resfile_path=folder_path / f"{sorting_name}.res.{shank_id}", clufile_path=folder_path / f"{sorting_name}.clu.{shank_id}", keep_mua_units=keep_mua_units) if load_waveforms: spk_files = get_shank_files(folder_path=folder_path, suffix=".spk") assert len( spk_files ) > 0, "No .spk files found in the folder_path, but 'write_waveforms' is True!" assert len(spk_files) == len( res_files), "Mismatched number of .spk and .res files!" spk_ids = [int(x.suffix[1:]) for x in spk_files] assert sorted(spk_ids) == sorted( res_ids), "Unmatched .spk.%i and .res.%i files detected!" spkfile_names = [ x.name[:x.name.find('.spk')] for x in spk_files ] assert np.all(s == r for (s, r) in zip(spkfile_names, resfile_names)), \ "Some of the .spk.%i and .res.%i files do not share the same name!" nse_args.update(spkfile_path=folder_path / f"{sorting_name}.spk.{shank_id}", gain=gain) all_shanks_list_se.append(NeuroscopeSortingExtractor(**nse_args)) MultiSortingExtractor.__init__(self, sortings=all_shanks_list_se) if exclude_shanks_passed: self._kwargs = dict(folder_path=str(folder_path.absolute()), keep_mua_units=keep_mua_units, exclude_shanks=exclude_shanks, load_waveforms=load_waveforms, gain=gain) else: self._kwargs = dict(folder_path=str(folder_path.absolute()), keep_mua_units=keep_mua_units, exclude_shanks=None, load_waveforms=load_waveforms, gain=gain)
def __init__(self, folder_path: PathType, keep_mua_units: bool = True, exclude_shanks: Union[list, None] = None): assert HAVE_BS4_LXML, self.installation_mesg folder_path = Path(folder_path) if not folder_path.is_dir(): os.makedirs(folder_path) if exclude_shanks is not None: # dumping checks do not like having an empty list as default assert all([isinstance(x, (int, np.integer)) and x >= 0 for x in exclude_shanks]), 'Optional argument "exclude_shanks" must contain positive integers only!' exclude_shanks_passed = True else: exclude_shanks = [] exclude_shanks_passed = False xml_files = [f for f in folder_path.iterdir() if f.is_file if f.suffix == '.xml'] assert len(xml_files) > 0, 'No .xml file found in the folder.' assert len(xml_files) == 1, 'More than one .xml file found in the folder.' xml_filepath = xml_files[0] with xml_filepath.open('r') as xml_file: contents = xml_file.read() soup = BeautifulSoup(contents, 'lxml') # Normally, this would be a .xml, but there were strange issues # in the write_recording method that require it to be a .lxml instead # which also requires all capital letters to be removed from the tag names self._sampling_frequency = float(soup.samplingrate.string) # careful not to confuse it with the lfpsamplingrate res_files = [f for f in folder_path.iterdir() if f.is_file() and '.res' in f.name and '.temp.' not in f.name] clu_files = [f for f in folder_path.iterdir() if f.is_file() and '.clu' in f.name and '.temp.' not in f.name] assert len(res_files) > 0 or len(clu_files) > 0, \ 'No .res or .clu files found in the folder_path.' assert len(res_files) > 1 and len(clu_files) > 1, \ 'Single .res and .clu pairs found in the folder_path. ' \ 'For single .res and .clu files, use the NeuroscopeSortingExtractor instead.' assert len(res_files) == len(clu_files) res_ids = [int(x.name[-1]) for x in res_files] clu_ids = [int(x.name[-1]) for x in res_files] assert sorted(res_ids) == sorted(clu_ids), 'Unmatched .clu.%i and .res.%i files detected!' if any([x not in res_ids for x in exclude_shanks]): print('Warning: Detected indices in exclude_shanks that are not in the directory. These will be ignored.') resfile_names = [x.name[:x.name.find('.res')] for x in res_files] clufile_names = [x.name[:x.name.find('.clu')] for x in clu_files] assert np.all(r == c for (r, c) in zip(resfile_names, clufile_names)), \ 'Some of the .res.%i and .clu.%i files do not share the same name!' sorting_name = resfile_names[0] all_shanks_list_se = [] for shank_id in list(set(res_ids) - set(exclude_shanks)): resfile_path = folder_path / f'{sorting_name}.res.{shank_id}' clufile_path = folder_path / f'{sorting_name}.clu.{shank_id}' all_shanks_list_se.append(NeuroscopeSortingExtractor(resfile_path=resfile_path, clufile_path=clufile_path, keep_mua_units=keep_mua_units)) MultiSortingExtractor.__init__(self, sortings=all_shanks_list_se) if exclude_shanks_passed: self._kwargs = {'folder_path': str(folder_path.absolute()), 'keep_mua_units': keep_mua_units, 'exclude_shanks': exclude_shanks} else: self._kwargs = {'folder_path': str(folder_path.absolute()), 'keep_mua_units': keep_mua_units, 'exclude_shanks': None}