def __str__(self): out = [super().__str__()] out.append('Analysis Directory: %s' % self.analysis_dir) out.append('Recording Directories :') out.append(pt.print_dict(self.rec_labels, tabs=1)) out.append('\nTaste Mapping :') out.append(pt.print_dict(self.taste_map, tabs=1)) out.append('\nElectrode Mapping\n----------------') out.append(pt.print_dataframe(self.electrode_mapping)) if hasattr(self, 'held_units'): out.append('\nHeld Units :') out.append(pt.print_dataframe( self.held_units.drop(columns=['J3']))) return '\n'.join(out)
def confirm_parameter_dict(params, prompt, shell=False): '''Shows user a dictionary and asks them to confirm that the values are correct. If not they have an option to edit the dict. Parameters ---------- params: dict values in dict can be int, float, str, bool, list, dict or None prompt: str prompt to show user shell : bool (optional) True to use command line interface False (default) for GUI Returns ------- dict lists are returned as lists of str so other types m ust be cast manually by user ''' prompt = ('----------\n%s\n----------\n%s\nAre these parameters good?' % (prompt, pt.print_dict(params))) q = ask_user(prompt, choices=['Yes', 'Edit', 'Cancel'], shell=shell) if q == 2: return None elif q == 0: return params else: new_params = fill_dict(params, 'Enter new values:', shell=shell) return new_params
def make_unit_arrays(self): '''Make spike arrays for each unit and store in hdf5 store ''' params = self.spike_array_params print('Generating unit arrays with parameters:\n----------') print(pt.print_dict(params, tabs=1)) ss.make_spike_arrays(self.h5_file, params) self.process_status['make_unit_arrays'] = True self.save()
def read_rec_info(file_dir, shell=True): '''Reads the info.rhd file to get relevant parameters. Parameters ---------- file_dir : str, path to recording directory Returns ------- dict, necessary analysis info from info.rhd fields: amplifier_sampling_rate, dig_in_sampling_rate, notch_filter, ports (list, corresponds to channels), channels (list) Throws ------ FileNotFoundError : if info.rhd is not in file_dir ''' info_file = os.path.join(file_dir, 'info.rhd') if not os.path.isfile(info_file): raise FileNotFoundError('info.rhd file not found in %s' % file_dir) out = {} print('Reading info.rhd file...') try: info = load_intan_rhd_format.read_data(info_file) except Exception as e: # TODO: Have a way to manually input settings info = None userIO.tell_user('%s was unable to be read. May be corrupted or ' 'recording may have been interrupted' % info_file, shell=True) raise e freq_params = info['frequency_parameters'] notch_freq = freq_params['notch_filter_frequency'] amp_fs = freq_params['amplifier_sample_rate'] dig_in_fs = freq_params['board_dig_in_sample_rate'] out = { 'amplifier_sampling_rate': amp_fs, 'dig_in_sampling_rate': dig_in_fs, 'notch_filter': notch_freq } amp_ch = info['amplifier_channels'] ports = [x['port_prefix'] for x in amp_ch] channels = [x['native_order'] for x in amp_ch] out['ports'] = ports out['channels'] = channels out['num_channels'] = len(channels) if info.get('board_dig_in_channels'): dig_in = info['board_dig_in_channels'] din = [x['native_order'] for x in dig_in] out['dig_in'] = din if info.get('board_dig_out_channels'): dig_out = info['board_dig_out_channels'] dout = [x['native_order'] for x in dig_out] out['dig_out'] = dout out['file_type'] = get_recording_filetype(file_dir) print('\nRecording Info\n--------------\n') print(pt.print_dict(out)) return out
def cluster_spikes(self, data_quality=None, multi_process=True, n_cores=None, custom_params=None, umap=False): '''Write clustering parameters to file and Run blech_process on each electrode using GNU parallel Parameters ---------- data_quality : {'clean', 'noisy', None (default)} set if you want to change the data quality parameters for cutoff and spike detection before running clustering. These parameters are automatically set as "clean" during initial parameter setup accept_params : bool, False (default) set to True in order to skip popup confirmation of parameters when running ''' clustering_params = None if custom_params: clustering_params = custom_params elif data_quality: tmp = dio.params.load_params('clustering_params', self.root_dir, default_keyword=data_quality) if tmp: clustering_params = tmp else: raise ValueError('%s is not a valid data_quality preset. Must ' 'be "clean" or "noisy" or None.') # Get electrodes, throw out 'dead' electrodes em = self.electrode_mapping if 'dead' in em.columns: electrodes = em.Electrode[em['dead'] == False].tolist() else: electrodes = em.Electrode.tolist() # Setup progress bar pbar = tqdm(total=len(electrodes)) def update_pbar(ans): pbar.update() # get clustering params rec_dirs = list(self.rec_labels.values()) if clustering_params is None: dat = load_dataset(rec_dirs[0]) clustering_params = dat.clustering_params.copy() print('\nRunning Blech Clust\n-------------------') print('Parameters\n%s' % pt.print_dict(clustering_params)) # Write clustering params to recording directories & check for spike detection spike_detect = True for rd in rec_dirs: dat = load_dataset(rd) if dat.process_status['spike_detection'] == False: raise FileNotFoundError( 'Spike detection has not been run on %s' % rd) dat.clustering_params = clustering_params wt.write_params_to_json('clustering_params', rd, clustering_params) # dat.save() # Run clustering if not umap: clust_objs = [ bclust.BlechClust(rec_dirs, x, params=clustering_params) for x in electrodes ] else: clust_objs = [ bclust.BlechClust(rec_dirs, x, params=clustering_params, data_transform=bclust.UMAP_METRICS, n_pc=5) for x in electrodes ] if multi_process: if n_cores is None or n_cores > multiprocessing.cpu_count(): n_cores = multiprocessing.cpu_count() - 1 pool = multiprocessing.get_context('spawn').Pool(n_cores) for x in clust_objs: pool.apply_async(x.run, callback=update_pbar) pool.close() pool.join() else: for x in clust_objs: res = x.run() update_pbar(res) pbar.close() for rd in rec_dirs: dat = load_dataset(rd) dat.process_status['spike_clustering'] = True dat.process_status['cleanup_clustering'] = False # dat.save() # self.save() print('Clustering Complete\n------------------')
def palatability_identity_calculations(rec_dir, pal_ranks=None, params=None, shell=False): warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=RuntimeWarning) dat = load_dataset(rec_dir) dim = dat.dig_in_mapping if 'palatability_rank' in dim.columns: pass elif pal_ranks is None: dim = get_palatability_ranks(dim, shell=shell) else: dim['palatability_rank'] = dim['name'].map(pal_ranks) dim = dim.dropna(subset=['palatability_rank']) dim = dim[dim['palatability_rank'] > 0] dim = dim.reset_index(drop=True) num_tastes = len(dim) taste_names = dim.name.to_list() trial_list = dat.dig_in_trials.copy() trial_list = trial_list[[True if x in taste_names else False for x in trial_list.name]] num_trials = trial_list.groupby('channel').count()['name'].unique() if len(num_trials) > 1: raise ValueError('Unequal number of trials for tastes to used') else: num_trials = num_trials[0] dim['num_trials'] = num_trials # Get which units to use unit_table = h5io.get_unit_table(rec_dir) unit_types = ['Single', 'Multi', 'All', 'Custom'] unit_type = params.get('unit_type') if unit_type is None: q = userIO.ask_user('Which units do you want to use for taste ' 'discrimination and palatability analysis?', choices=unit_types, shell=shell) unit_type = unit_types[q] if unit_type == 'Single': chosen_units = unit_table.loc[unit_table['single_unit'], 'unit_num'].to_list() elif unit_type == 'Multi': chosen_units = unit_table.loc[unit_table['single_unit'] == False, 'unit_num'].to_list() elif unit_type == 'All': chosen_units = unit_table['unit_num'].to_list() else: selection = userIO.select_from_list('Select units to use:', unit_table['unit_num'], 'Select Units', multi_select=True) chosen_units = list(map(int, selection)) num_units = len(chosen_units) unit_table = unit_table.loc[chosen_units] # Enter Parameters if params is None or params.keys() != default_pal_id_params.keys(): params = default_pal_id_params.copy() params = userIO.confirm_parameter_dict(params, ('Palatability/Identity ' 'Calculation Parameters' '\nTimes in ms'), shell=shell) win_size = params['window_size'] win_step = params['window_step'] print('Running palatability/identity calculations with parameters:\n%s' % pt.print_dict(params)) with tables.open_file(dat.h5_file, 'r+') as hf5: trains_dig_in = hf5.list_nodes('/spike_trains') time = trains_dig_in[0].array_time[:] bin_times = np.arange(time[0], time[-1] - win_size + win_step, win_step) num_bins = len(bin_times) palatability = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=int) identity = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=int) unscaled_response = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=np.dtype('float64')) response = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=np.dtype('float64')) laser = np.empty((num_bins, num_units, num_tastes*num_trials, 2), dtype=float) # Fill arrays with data print('Filling data arrays...') onesies = np.ones((num_bins, num_units, num_trials)) for i, row in dim.iterrows(): idx = range(num_trials*i, num_trials*(i+1)) palatability[:, :, idx] = row.palatability_rank * onesies identity[:, :, idx] = row.channel * onesies for j, u in enumerate(chosen_units): for k,t in enumerate(bin_times): t_idx = np.where((time >= t) & (time <= t+win_size))[0] unscaled_response[k, j, idx] = \ np.mean(trains_dig_in[i].spike_array[:, u, t_idx], axis=1) try: lasers[k, j, idx] = \ np.vstack((trains_dig_in[i].laser_durations[:], trains_dig_in[i].laser_onset_lag[:])) except: laser[k, j, idx] = np.zeros((num_trials, 2)) # Scaling was not done, so: response = unscaled_response.copy() # Make ancillary_analysis node and put in arrays if '/ancillary_analysis' in hf5: hf5.remove_node('/ancillary_analysis', recursive=True) hf5.create_group('/', 'ancillary_analysis') hf5.create_array('/ancillary_analysis', 'palatability', palatability) hf5.create_array('/ancillary_analysis', 'identity', identity) hf5.create_array('/ancillary_analysis', 'laser', laser) hf5.create_array('/ancillary_analysis', 'scaled_neural_response', response) hf5.create_array('/ancillary_analysis', 'window_params', np.array([win_size, win_step])) hf5.create_array('/ancillary_analysis', 'bin_times', bin_times) hf5.create_array('/ancillary_analysis', 'unscaled_neural_response', unscaled_response) # for backwards compatibility hf5.create_array('/ancillary_analysis', 'params', np.array([win_size, win_step])) hf5.create_array('/ancillary_analysis', 'pre_stim', np.array(time[0])) hf5.flush() # Get unique laser (duration, lag) combinations print('Organizing trial data...') unique_lasers = np.vstack(list({tuple(row) for row in laser[0, 0, :, :]})) unique_lasers = unique_lasers[unique_lasers[:, 1].argsort(), :] num_conditions = unique_lasers.shape[0] trials = [] for row in unique_lasers: tmp_trials = [j for j in range(num_trials * num_tastes) if np.array_equal(laser[0, 0, j, :], row)] trials.append(tmp_trials) trials_per_condition = [len(x) for x in trials] if not all(x == trials_per_condition[0] for x in trials_per_condition): raise ValueError('Different number of trials for each laser condition') trials_per_condition = int(trials_per_condition[0] / num_tastes) #assumes same number of trials per taste per condition print('Detected:\n %i tastes\n %i laser conditions\n' ' %i trials per condition per taste' % (num_tastes, num_conditions, trials_per_condition)) trials = np.array(trials) # Store laser conditions and indices of trials per condition in trial x # taste space hf5.create_array('/ancillary_analysis', 'trials', trials) hf5.create_array('/ancillary_analysis', 'laser_combination_d_l', unique_lasers) hf5.flush() # Taste Similarity Calculation neural_response_laser = np.empty((num_conditions, num_bins, num_tastes, num_units, trials_per_condition), dtype=np.dtype('float64')) taste_cosine_similarity = np.empty((num_conditions, num_bins, num_tastes, num_tastes), dtype=np.dtype('float64')) taste_euclidean_distance = np.empty((num_conditions, num_bins, num_tastes, num_tastes), dtype=np.dtype('float64')) # Re-format neural responses from bin x unit x (trial*taste) to # laser_condition x bin x taste x unit x trial print('Reformatting data arrays...') for i, trial in enumerate(trials): for j, _ in enumerate(bin_times): for k, _ in dim.iterrows(): idx = np.where((trial >= num_trials*k) & (trial < num_trials*(k+1)))[0] neural_response_laser[i, j, k, :, :] = \ response[j, :, trial[idx]].T # Compute taste cosine similarity and euclidean distances print('Computing taste cosine similarity and euclidean distances...') for i, _ in enumerate(trials): for j, _ in enumerate(bin_times): for k, _ in dim.iterrows(): for l, _ in dim.iterrows(): taste_cosine_similarity[i, j, k, l] = \ np.mean(cosine_similarity( neural_response_laser[i, j, k, :, :].T, neural_response_laser[i, j, l, :, :].T)) taste_euclidean_distance[i, j, k, l] = \ np.mean(cdist( neural_response_laser[i, j, k, :, :].T, neural_response_laser[i, j, l, :, :].T, metric='euclidean')) hf5.create_array('/ancillary_analysis', 'taste_cosine_similarity', taste_cosine_similarity) hf5.create_array('/ancillary_analysis', 'taste_euclidean_distance', taste_euclidean_distance) hf5.flush() # Taste Responsiveness calculations bin_params = [params['num_comparison_bins'], params['comparison_bin_size']] discrim_p = params['discrim_p'] responsive_neurons = [] discriminating_neurons = [] taste_responsiveness = np.zeros((bin_params[0], num_units, 2)) new_bin_times = np.arange(0, np.prod(bin_params), bin_params[1]) baseline = np.where(bin_times < 0)[0] print('Computing taste responsiveness and taste discrimination...') for i, t in enumerate(new_bin_times): places = np.where((bin_times >= t) & (bin_times <= t+bin_params[1]))[0] for j, u in enumerate(chosen_units): # Check taste responsiveness f, p = f_oneway(np.mean(response[places, j, :], axis=0), np.mean(response[baseline, j, :], axis=0)) if np.isnan(f): f = 0.0 p = 1.0 if p <= discrim_p and u not in responsive_neurons: responsive_neurons.append(u) taste_responsiveness[i, j, 0] = 1 # Check taste discrimination taste_idx = [np.arange(num_trials*k, num_trials*(k+1)) for k in range(num_tastes)] taste_responses = [np.mean(response[places, j, :][:, k], axis=0) for k in taste_idx] f, p = f_oneway(*taste_responses) if np.isnan(f): f = 0.0 p = 1.0 if p <= discrim_p and u not in discriminating_neurons: discriminating_neurons.append(u) responsive_neurons = np.sort(responsive_neurons) discriminating_neurons = np.sort(discriminating_neurons) # Write taste responsive and taste discriminating units to text file save_file = os.path.join(rec_dir, 'discriminative_responsive_neurons.txt') with open(save_file, 'w') as f: print('Taste discriminative neurons', file=f) for u in discriminating_neurons: print(u, file=f) print('Taste responsive neurons', file=f) for u in responsive_neurons: print(u, file=f) hf5.create_array('/ancillary_analysis', 'taste_disciminating_neurons', discriminating_neurons) hf5.create_array('/ancillary_analysis', 'taste_responsive_neurons', responsive_neurons) hf5.create_array('/ancillary_analysis', 'taste_responsiveness', taste_responsiveness) hf5.flush() # Get time course of taste discrimibility print('Getting taste discrimination time course...') p_discrim = np.empty((num_conditions, num_bins, num_tastes, num_tastes, num_units), dtype=np.dtype('float64')) for i in range(num_conditions): for j, t in enumerate(bin_times): for k in range(num_tastes): for l in range(num_tastes): for m in range(num_units): _, p = ttest_ind(neural_response_laser[i, j, k, m, :], neural_response_laser[i, j, l, m, :], equal_var = False) if np.isnan(p): p = 1.0 p_discrim[i, j, k, l, m] = p hf5.create_array('/ancillary_analysis', 'p_discriminability', p_discrim) hf5.flush() # Palatability Rank Order calculation (if > 2 tastes) t_start = params['pal_deduce_start_time'] t_end = params['pal_deduce_end_time'] if num_tastes > 2: print('Deducing palatability rank order...') palatability_rank_order_deduction(rec_dir, neural_response_laser, unique_lasers, bin_times, [t_start, t_end]) # Palatability calculation r_spearman = np.zeros((num_conditions, num_bins, num_units)) p_spearman = np.ones((num_conditions, num_bins, num_units)) r_pearson = np.zeros((num_conditions, num_bins, num_units)) p_pearson = np.ones((num_conditions, num_bins, num_units)) f_identity = np.ones((num_conditions, num_bins, num_units)) p_identity = np.ones((num_conditions, num_bins, num_units)) lda_palatability = np.zeros((num_conditions, num_bins)) lda_identity = np.zeros((num_conditions, num_bins)) r_isotonic = np.zeros((num_conditions, num_bins, num_units)) id_pal_regress = np.zeros((num_conditions, num_bins, num_units, 2)) pairwise_identity = np.zeros((num_conditions, num_bins, num_tastes, num_tastes)) print('Computing palatability metrics...') for i, t in enumerate(trials): for j in range(num_bins): for k in range(num_units): ranks = rankdata(response[j, k, t]) r_spearman[i, j, k], p_spearman[i, j, k] = \ spearmanr(ranks, palatability[j, k, t]) r_pearson[i, j, k], p_pearson[i, j, k] = \ pearsonr(response[j, k, t], palatability[j, k, t]) if np.isnan(r_spearman[i, j, k]): r_spearman[i, j, k] = 0.0 p_spearman[i, j, k] = 1.0 if np.isnan(r_pearson[i, j, k]): r_pearson[i, j, k] = 0.0 p_pearson[i, j, k] = 1.0 # Isotonic regression of firing against palatability model = IsotonicRegression(increasing = 'auto') model.fit(palatability[j, k, t], response[j, k, t]) r_isotonic[i, j, k] = model.score(palatability[j, k, t], response[j, k, t]) # Multiple Regression of firing rate against palatability and identity # Regress palatability on identity tmp_id = identity[j, k, t].reshape(-1, 1) tmp_pal = palatability[j, k, t].reshape(-1, 1) tmp_resp = response[j, k, t].reshape(-1, 1) model_pi = LinearRegression() model_pi.fit(tmp_id, tmp_pal) pi_residuals = tmp_pal - model_pi.predict(tmp_id) # Regress identity on palatability model_ip = LinearRegression() model_ip.fit(tmp_pal, tmp_id) ip_residuals = tmp_id - model_ip.predict(tmp_pal) # Regress firing on identity model_fi = LinearRegression() model_fi.fit(tmp_id, tmp_resp) fi_residuals = tmp_resp - model_fi.predict(tmp_id) # Regress firing on palatability model_fp = LinearRegression() model_fp.fit(tmp_pal, tmp_resp) fp_residuals = tmp_resp - model_fp.predict(tmp_pal) # Get partial correlation coefficient of response with identity idp_reg0, p = pearsonr(fp_residuals, ip_residuals) if np.isnan(idp_reg0): idp_reg0 = 0.0 idp_reg1, p = pearsonr(fi_residuals, pi_residuals) if np.isnan(idp_reg1): idp_reg1 = 0.0 id_pal_regress[i, j, k, 0] = idp_reg0 id_pal_regress[i, j, k, 1] = idp_reg1 # Identity Calculation samples = [] for _, row in dim.iterrows(): taste = row.channel samples.append([trial for trial in t if identity[j, k, trial] == taste]) tmp_resp = [response[j, k, sample] for sample in samples] f_identity[i, j, k], p_identity[i, j, k] = f_oneway(*tmp_resp) if np.isnan(f_identity[i, j, k]): f_identity[i, j, k] = 0.0 p_identity[i, j, k] = 1.0 # Linear Discriminant analysis for palatability X = response[j, :, t] Y = palatability[j, 0, t] test_results = [] c_validator = LeavePOut(1) for train, test in c_validator.split(X, Y): model = LDA() model.fit(X[train, :], Y[train]) tmp = np.mean(model.predict(X[test]) == Y[test]) test_results.append(tmp) lda_palatability[i, j] = np.mean(test_results) # Linear Discriminant analysis for identity Y = identity[j, 0, t] test_results = [] c_validator = LeavePOut(1) for train, test in c_validator.split(X, Y): model = LDA() model.fit(X[train, :], Y[train]) tmp = np.mean(model.predict(X[test]) == Y[test]) test_results.append(tmp) lda_identity[i, j] = np.mean(test_results) # Pairwise Identity Calculation for ti1, r1 in dim.iterrows(): for ti2, r2 in dim.iterrows(): t1 = r1.channel t2 = r2.channel tmp_trials = np.where((identity[j, 0, :] == t1) | (identity[j, 0, :] == t2))[0] idx = [trial for trial in t if trial in tmp_trials] X = response[j, :, idx] Y = identity[j, 0, idx] test_results = [] c_validator = StratifiedShuffleSplit(n_splits=10, test_size=0.25, random_state=0) for train, test in c_validator.split(X, Y): model = GaussianNB() model.fit(X[train, :], Y[train]) tmp_score = model.score(X[test, :], Y[test]) test_results.append(tmp_score) pairwise_identity[i, j, ti1, ti2] = np.mean(test_results) hf5.create_array('/ancillary_analysis', 'r_pearson', r_pearson) hf5.create_array('/ancillary_analysis', 'r_spearman', r_spearman) hf5.create_array('/ancillary_analysis', 'p_pearson', p_pearson) hf5.create_array('/ancillary_analysis', 'p_spearman', p_spearman) hf5.create_array('/ancillary_analysis', 'lda_palatability', lda_palatability) hf5.create_array('/ancillary_analysis', 'lda_identity', lda_identity) hf5.create_array('/ancillary_analysis', 'r_isotonic', r_isotonic) hf5.create_array('/ancillary_analysis', 'id_pal_regress', id_pal_regress) hf5.create_array('/ancillary_analysis', 'f_identity', f_identity) hf5.create_array('/ancillary_analysis', 'p_identity', p_identity) hf5.create_array('/ancillary_analysis', 'pairwise_NB_identity', pairwise_identity) hf5.flush() warnings.filterwarnings('default', category=UserWarning) warnings.filterwarnings('default', category=RuntimeWarning)
def blech_clust_run(self, data_quality=None, n_cores=None): '''Write clustering parameters to file and Run blech_process on each electrode using GNU parallel Parameters ---------- data_quality : {'clean', 'noisy', None (default)} set if you want to change the data quality parameters for cutoff and spike detection before running clustering. These parameters are automatically set as "clean" during initial parameter setup accept_params : bool, False (default) set to True in order to skip popup confirmation of parameters when running ''' if data_quality: tmp = dio.params.load_params('clustering_params', self.root_dir, default_keyword=data_quality) if tmp: self.clustering_params = tmp else: raise ValueError('%s is not a valid data_quality preset. Must ' 'be "clean" or "noisy" or None.') print('\nRunning Blech Clust\n-------------------') print('Parameters\n%s' % pt.print_dict(self.clustering_params)) # Create folders for saving things within recording dir data_dir = self.root_dir directories = [ 'spike_waveforms', 'spike_times', 'clustering_results', 'Plots', 'memory_monitor_clustering' ] for d in directories: tmp_dir = os.path.join(data_dir, d) if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) os.mkdir(tmp_dir) # Set file for clusting log self.clustering_log = os.path.join(data_dir, 'results.log') if os.path.exists(self.clustering_log): os.remove(self.clustering_log) process_path = os.path.realpath(__file__) process_path = os.path.join(os.path.dirname(process_path), 'blech_process.py') em = self.electrode_mapping if 'dead' in em.columns: electrodes = em.Electrode[em['dead'] == False].tolist() else: electrodes = em.Electrode.tolist() pbar = tqdm(total=len(electrodes)) results = [(None, None, None)] * (max(electrodes) + 1) clust_errors = [(x, None) for x in electrodes] def update_pbar(ans): if isinstance(ans, tuple) and ans[0] is not None: results[ans[0]] = ans else: print('Unexpected error when clustering an electrode') pbar.update() if n_cores is None or n_cores > multiprocessing.cpu_count(): n_cores = multiprocessing.cpu_count() - 1 pool = multiprocessing.Pool(n_cores) for x in electrodes: pool.apply_async(blech_clust_process, args=(x, data_dir, self.clustering_params), callback=update_pbar) pool.close() pool.join() pbar.close() print('Electrode Result Cutoff (s)') cutoffs = {} clust_res = {} clustered = [] for x, y, z in results: if x is None: continue clustered.append(x) print(' {:<13}{:<10}{}'.format(x, y, z)) cutoffs[x] = z clust_res[x] = y print('1 - Sucess\n0 - No data or no spikes\n-1 - Error') em = self.electrode_mapping.copy() em['cutoff_time'] = em['Electrode'].map(cutoffs) em['clustering_result'] = em['Electrode'].map(clust_res) self.electrode_mapping = em.copy() self.process_status['blech_clust_run'] = True self.process_status['cleanup_clustering'] = False dio.h5io.write_electrode_map_to_h5(self.h5_file, em) self.save() print('Clustering Complete\n------------------') return results
def __str__(self): '''Put all information about dataset in string format Returns ------- str : representation of dataset object ''' out1 = super().__str__() out = [out1] out.append('\nObject creation date: ' + self.dataset_creation_date.strftime('%m/%d/%y')) if hasattr(self, 'raw_h5_file'): out.append('Deleted Raw h5 file: ' + self.raw_h5_file) out.append('h5 File: ' + self.h5_file) out.append('') out.append('--------------------') out.append('Processing Status') out.append('--------------------') out.append(pt.print_dict(self.process_status)) out.append('') if not hasattr(self, 'rec_info'): return '\n'.join(out) info = self.rec_info out.append('--------------------') out.append('Recording Info') out.append('--------------------') out.append(pt.print_dict(self.rec_info)) out.append('') out.append('--------------------') out.append('Electrodes') out.append('--------------------') out.append(pt.print_dataframe(self.electrode_mapping)) out.append('') if hasattr(self, 'CAR_electrodes'): out.append('--------------------') out.append('CAR Groups') out.append('--------------------') headers = ['Group %i' % x for x in range(len(self.CAR_electrodes))] out.append(pt.print_list_table(self.CAR_electrodes, headers)) out.append('') if not self.emg_mapping.empty: out.append('--------------------') out.append('EMG') out.append('--------------------') out.append(pt.print_dataframe(self.emg_mapping)) out.append('') if info.get('dig_in'): out.append('--------------------') out.append('Digital Input') out.append('--------------------') out.append(pt.print_dataframe(self.dig_in_mapping)) out.append('') if info.get('dig_out'): out.append('--------------------') out.append('Digital Output') out.append('--------------------') out.append(pt.print_dataframe(self.dig_out_mapping)) out.append('') out.append('--------------------') out.append('Clustering Parameters') out.append('--------------------') out.append(pt.print_dict(self.clustering_params)) out.append('') out.append('--------------------') out.append('Spike Array Parameters') out.append('--------------------') out.append(pt.print_dict(self.spike_array_params)) out.append('') out.append('--------------------') out.append('PSTH Parameters') out.append('--------------------') out.append(pt.print_dict(self.psth_params)) out.append('') out.append('--------------------') out.append('Palatability/Identity Parameters') out.append('--------------------') out.append(pt.print_dict(self.pal_id_params)) out.append('') return '\n'.join(out)
def validate_data_integrity(rec_dir, verbose=False): print('Raw Data Validation\n' + '-' * 19) test_names = [ 'file_type', 'recording_info', 'files', 'dropped_packets', 'data_length' ] number_names = [ 'sample_rate', 'dropped_packets', 'missing_files', 'recording_length' ] tests = dict.fromkeys(test_names, 'NOT TESTED') numbers = dict.fromkeys(number_names, -1) file_type = dio.rawIO.get_recording_filetype(rec_dir) if file_type is None: file_type_check = 'UNSUPPORTED' else: tests['file_type'] = 'PASS' # Check info.rhd integrity info_file = os.path.join(rec_dir, 'info.rhd') try: rec_info = dio.rawIO.read_rec_info(rec_dir, shell=True) with open(info_file, 'rb') as f: info = dio.load_intan_rhd_format.read_header(f) tests['recording_info'] = 'PASS' except FileNotFoundError: test['recording_info'] = 'MISSING' except Exception as e: info_size = os.path.getsize(os.path.join(rec_dir, 'info.rhd')) if info_size == 0: tests['recording_info'] = 'EMPTY' else: tests['recording_info'] = 'FAIL' print(pt.print_dict(tests, tabs=1)) return tests, numbers counts = {x: info(x) for x in info.keys() if 'num' in x} numbers.update(counts) fs = info['sample_rate'] # Check all files needed are present files_expected = ['time.dat'] if file_type == 'one file per signal type': files_expected.append('amplifier.dat') if rec_info.get('dig_in') is not None: files_expected.append('digitalin.dat') if rec_info.get('dig_out') is not None: files_expected.append('digitalout.dat') if info['num_auxilary_input_channels'] > 0: files_expected.append('auxiliary.dat') elif file_type == 'one file per channel': for x in info['amplifier_channels']: files_expected.append('amp-' + x['native_channel_name'] + '.dat') for x in info['board_dig_in_channels']: files_expected.append('board-%s.dat' % x['native_channel_name']) for x in info['board_dig_out_channels']: files_expected.append('board-%s.dat' % x['native_channel_name']) for x in info['aux_input_channels']: files_expected.append('aux-%s.dat' % x['native_channel_name']) missing_files = [] file_list = os.listdir(rec_dir) for x in file_expected: if x not in file_list: missing_file.append(x) if len(missing_files) == 0: tests['files'] = 'PASS' else: tests['files'] = 'MISSING' numbers['missing_files'] = missing_files # Check time data for dropped packets time = dio.rawIO.read_time_dat(rec_dir, sampling_rate=1) # get raw timestamps numbers['n_samples'] = len(time) numbers['recording_length'] = float(time[-1]) / fs expected_time = np.arange(time[0], time[-1] + 1, 1) missing_timestamps = np.setdiff1d(expected_time, time) missing_times = np.array([float(x) / fs for x in missing_timestamps]) if len(missing_timestamps) == 0: tests['dropped_packets'] = 'PASS' else: tests['dropped_packets'] = '%i' % len(missing_timestamps) numbers['dropped_packets'] = missing_times # Check recording length of each trace tests['data_traces'] = 'FAIL' if file_type == 'one file per signal type': try: data = dio.rawIO.read_amplifier_dat(rec_dir) if data is None: tests['data_traces'] = 'UNREADABLE' elif data.shape[0] == numbers['n_samples']: tests['data_traces'] = 'PASS' else: tests['data_traces'] = 'CUTOFF' numbers['data_trace_length (s)'] = data.shape[0] / fs except: tests['data_traces'] = 'UNREADABLE' elif file_type == 'one file per channel': chan_info = pd.DataFrame(columns=['port', 'channel', 'n_samples']) lengths = [] min_samples = numbers['n_samples'] max_samples = number['n_samples'] for x in info['amplifier_channels']: fn = os.path.join(rec_dir, 'amp-%s.dat' % x['native_channel_name']) if os.path.basename(fn) in missing_files: continue data = dio.rawIO.read_one_channel_file(fn) lengths.append((x['native_channel_name'], data.shape[0])) if data.shape[0] < min_samples: min_samples = data.shape[0] if data.shape[0] > max_samples: max_samples = data.shape[0] if min_samples == max_samples: tests['data_traces'] = 'PASS' else: test['data_traces'] = 'CUTOFF' numbers['max_recording_length (s)'] = max_samples / fs numbers['min_recording_length (s)'] = min_samples / fs