def open_klusters_oneshank(filename): filenames = find_filenames(filename) fileindex = find_index(filename) # Open small Klusters files. data = {} metadata = read_xml(filenames['xml'], fileindex) data['clu'] = read_clusters(filenames['clu']) # Read .aclu data. if 'aclu' in filenames and os.path.exists(filenames['aclu']): data['aclu'] = read_clusters(filenames['aclu']) else: data['aclu'] = data['clu'] # Read .acluinfo data. if 'acluinfo' in filenames and os.path.exists(filenames['acluinfo']): data['acluinfo'] = read_cluster_info(filenames['acluinfo']) # If the ACLUINFO does not exist, try CLUINFO (older file extension) elif 'cluinfo' in filenames and os.path.exists(filenames['cluinfo']): data['acluinfo'] = read_cluster_info(filenames['cluinfo']) else: data['acluinfo'] = default_cluster_info(np.unique(data['aclu'])) # Read group info. if 'groupinfo' in filenames and os.path.exists(filenames['groupinfo']): data['groupinfo'] = read_group_info(filenames['groupinfo']) else: data['groupinfo'] = default_group_info() # Find out the number of columns in the .fet file. with open(filenames['fet'], 'r') as f: f.readline() # Get the number of non-empty columns in the .fet file. data['fetcol'] = len( [col for col in f.readline().split(' ') if col.strip() != '']) metadata['nspikes'] = len(data['clu']) data['fileindex'] = fileindex # Open big Klusters files. data['fet'] = MemMappedText(filenames['fet'], np.int64, skiprows=1) if 'spk' in filenames and os.path.exists(filenames['spk'] or ''): data['spk'] = MemMappedBinary(filenames['spk'], np.int16, rowsize=metadata['nchannels'] * metadata['nsamples']) if 'uspk' in filenames and os.path.exists(filenames['uspk'] or ''): data['uspk'] = MemMappedBinary(filenames['uspk'], np.int16, rowsize=metadata['nchannels'] * metadata['nsamples']) if 'mask' in filenames and os.path.exists(filenames['mask'] or ''): data['mask'] = MemMappedText(filenames['mask'], np.float32, skiprows=1) # data['metadata'] = metadata data.update(metadata) return data
def open_klusters_oneshank(filename): filenames = find_filenames(filename) fileindex = find_index(filename) # Open small Klusters files. data = {} metadata = read_xml(filenames['xml'], fileindex) data['clu'] = read_clusters(filenames['clu']) # Read .aclu data. if 'aclu' in filenames and os.path.exists(filenames['aclu']): data['aclu'] = read_clusters(filenames['aclu']) else: data['aclu'] = data['clu'] # Read .acluinfo data. if 'acluinfo' in filenames and os.path.exists(filenames['acluinfo']): data['acluinfo'] = read_cluster_info(filenames['acluinfo']) # If the ACLUINFO does not exist, try CLUINFO (older file extension) elif 'cluinfo' in filenames and os.path.exists(filenames['cluinfo']): data['acluinfo'] = read_cluster_info(filenames['cluinfo']) else: data['acluinfo'] = default_cluster_info(np.unique(data['aclu'])) # Read group info. if 'groupinfo' in filenames and os.path.exists(filenames['groupinfo']): data['groupinfo'] = read_group_info(filenames['groupinfo']) else: data['groupinfo'] = default_group_info() # Find out the number of columns in the .fet file. with open(filenames['fet'], 'r') as f: f.readline() # Get the number of non-empty columns in the .fet file. data['fetcol'] = len([col for col in f.readline().split(' ') if col.strip() != '']) metadata['nspikes'] = len(data['clu']) data['fileindex'] = fileindex # Open big Klusters files. data['fet'] = MemMappedText(filenames['fet'], np.int64, skiprows=1) if 'spk' in filenames and os.path.exists(filenames['spk'] or ''): data['spk'] = MemMappedBinary(filenames['spk'], np.int16, rowsize=metadata['nchannels'] * metadata['nsamples']) if 'uspk' in filenames and os.path.exists(filenames['uspk'] or ''): data['uspk'] = MemMappedBinary(filenames['uspk'], np.int16, rowsize=metadata['nchannels'] * metadata['nsamples']) if 'mask' in filenames and os.path.exists(filenames['mask'] or ''): data['mask'] = MemMappedText(filenames['mask'], np.float32, skiprows=1) # data['metadata'] = metadata data.update(metadata) return data
def read_group_info(self): try: self.group_info = read_group_info(self.filename_groupinfo) info("Successfully loaded {0:s}".format(self.filename_groupinfo)) except IOError: info("The GROUPINFO file is missing, generating a default one.") self.group_info = default_group_info() # Convert to Pandas. self.group_colors = self.group_info['color'].astype(np.int32) self.group_names = self.group_info['name']