def mark_dead_channels(self, dead_channels=None, shell=False): '''Plots small piece of raw traces and a metric to help identify dead channels. Once user marks channels as dead a new column is added to electrode mapping Parameters ---------- dead_channels : list of int, optional if this is specified then nothing is plotted, those channels are simply marked as dead shell : bool, optional ''' if dead_channels is None: em = self.electrode_mapping.copy() fig, ax = datplt.plot_traces_and_outliers(self.h5_file) save_file = os.path.join(self.data_dir, 'Electrode_Traces.png') fig.savefig(save_file) subprocess.call(['xdg-open', save_file]) choice = userIO.select_from_list('Select dead channels:', em.Electrode.to_list(), 'Dead Channel Selection', multi_select=True, shell=shell) plt.close('all') plt.ioff() dead_channels = list(map(int, choice)) em['dead'] = False em.loc[dead_channels, 'dead'] = True self.electrode_mapping = em return dead_channels
def get_h5_filename(file_dir, shell=False): '''Return the name of the h5 file found in file_dir. Asks for selection if multiple found Parameters ---------- file_dir : str, path to recording directory Returns ------- str filename of h5 file in directory (not full path), None if no file found ''' file_list = os.listdir(file_dir) h5_files = [f for f in file_list if f.endswith('.h5')] if len(h5_files) > 1: choice = userIO.select_from_list('Choose which h5 file to load', h5_files, 'Multiple h5 stores found', shell=shell) if choice is None: return None else: h5_files = [choice] elif len(h5_files) == 0: return None return h5_files[0]
def __init__(self, exp_dir=None, shell=False): '''Setup for analysis across recording sessions Parameters ---------- exp_dir : str (optional) path to directory containing all recording directories if None (default) is passed then a popup to choose file will come up shell : bool (optional) True to use command-line interface for user input False (default) for GUI ''' if exp_dir is None: exp_dir = eg.diropenbox('Select Experiment Directory', 'Experiment Directory') if exp_dir is None or exp_dir == '': return fd = [os.path.join(exp_dir, x) for x in os.listdir(exp_dir)] file_dirs = [x for x in fd if os.path.isdir(x)] order_dict = dict.fromkeys(file_dirs, 0) tmp = userIO.dictIO(order_dict, shell=shell) order_dict = tmp.fill_dict(prompt=('Set order of recordings (1-%i)\n' 'Leave blank to delete directory' ' from list')) if order_dict is None: return file_dirs = [k for k, v in order_dict.items() if v is not None and v != 0] file_dirs = sorted(file_dirs, key=order_dict.get) file_dirs = [os.path.join(exp_dir, x) for x in file_dirs] file_dirs = [x[:-1] if x.endswith('/') else x for x in file_dirs] self.recording_dirs = file_dirs self.experiment_dir = exp_dir self.shell = shell dat = dataset.load_dataset(file_dirs[0]) em = dat.electrode_mapping.copy() ingc = userIO.select_from_list('Select all eletrodes confirmed in GC', em['Electrode'], multi_select=True, shell=shell) ingc = list(map(int, ingc)) em['Area'] = np.where(em['Electrode'].isin(ingc), 'GC', 'Other') self.electrode_mapping = em self.save_file = os.path.join(exp_dir, '%s_experiment.p' % os.path.basename(exp_dir))
def get_recording_filetype(file_dir, shell=False): '''Check Intan recording directory to determine type of recording and thus extraction method to use. Asks user to confirm, and manually correct if incorrect Parameters ---------- file_dir : str, recording directory to check Returns ------- str : file_type of recording ''' file_list = os.listdir(file_dir) file_type = None for k, v in support_rec_types.items(): regex = re.compile(v) if any([True for x in file_list if regex.match(x) is not None]): file_type = k if file_type is None: msg = '\n '.join([ 'unsupported recording type. Supported types are:', *list(support_rec_types.keys()) ]) else: msg = '\"' + file_type + '\"' query = 'Detected recording type is %s \nIs this correct?: ' % msg q = userIO.ask_user(query, choices=['Yes', 'No'], shell=shell) if q == 0: return file_type else: choice = userIO.select_from_list('Select correct recording type', list(support_rec_types.keys()), 'Select Recording Type', shell=shell) choice = list(support_rec_types.keys())[choice] return choice
def palatability_identity_calculations(rec_dir, pal_ranks=None, unit_type=None, params=None, shell=False): dat = dataset.load_dataset(rec_dir) dim = dat.dig_in_mapping if pal_ranks is None: dim = get_palatability_ranks(dim, shell=shell) elif 'palatability_rank' in dim.columns: pass else: dim['palatability_rank'] = dim['name'].map(pal_ranks) dim = dim.dropna(subset=['palatability_rank']) dim = dim.reset_index(drop=True) num_tastes = len(dim) taste_names = dim.name.to_list() trial_list = dat.dig_in_trials.copy() trial_list = trial_list[[True if x in taste_names else False for x in trial_list.name]] num_trials = trial_list.groupby('channel').count()['name'].unique() if len(num_trials) > 1: raise ValueError('Unequal number of trials for tastes to used') else: num_trials = num_trials[0] dim['num_trials'] = num_trials # Get which units to use unit_table = h5io.get_unit_table(rec_dir) unit_types = ['Single', 'Multi', 'All', 'Custom'] if unit_type is None: q = userIO.ask_user('Which units do you want to use for taste ' 'discrimination and palatability analysis?', choices=unit_types, shell=shell) unit_type = unit_types[q] if unit_type == 'Single': chosen_units = unit_table.loc[unit_table['single_unit'], 'unit_num'].to_list() elif unit_type == 'Multi': chosen_units = unit_table.loc[unit_table['single_unit'] == False, 'unit_num'].to_list() elif unit_type == 'All': chosen_units = unit_table['unit_num'].to_list() else: selection = userIO.select_from_list('Select units to use:', unit_table['unit_num'], 'Select Units', multi_select=True) chosen_units = list(map(int, selection)) num_units = len(chosen_units) unit_table = unit_table.loc[chosen_units] # Enter Parameters if params is None or params.keys() != default_pal_id_params.keys(): params = {'window_size': 250, 'window_step': 25, 'num_comparison_bins': 5, 'comparison_bin_size': 250, 'discrim_p': 0.01, 'pal_deduce_start_time': 700, 'pal_deduce_end_time': 1200} params = userIO.confirm_parameter_dict(params, ('Palatability/Identity ' 'Calculation Parameters' '\nTimes in ms'), shell=shell) win_size = params['window_size'] win_step = params['window_step'] print('Running palatability/identity calculations with parameters:\n%s' % dp.print_dict(params)) with tables.open_file(dat.h5_file, 'r+') as hf5: trains_dig_in = hf5.list_nodes('/spike_trains') time = trains_dig_in[0].array_time[:] bin_times = np.arange(time[0], time[-1] - win_size + win_step, win_step) num_bins = len(bin_times) palatability = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=int) identity = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=int) unscaled_response = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=np.dtype('float64')) response = np.empty((num_bins, num_units, num_tastes*num_trials), dtype=np.dtype('float64')) laser = np.empty((num_bins, num_units, num_tastes*num_trials, 2), dtype=float) # Fill arrays with data print('Filling data arrays...') onesies = np.ones((num_bins, num_units, num_trials)) for i, row in dim.iterrows(): idx = range(num_trials*i, num_trials*(i+1)) palatability[:, :, idx] = row.palatability_rank * onesies identity[:, :, idx] = row.dig_in * onesies for j, u in enumerate(chosen_units): for k,t in enumerate(bin_times): t_idx = np.where((time >= t) & (time <= t+win_size))[0] unscaled_response[k, j, idx] = \ np.mean(trains_dig_in[i].spike_array[:, u, t_idx], axis=1) try: lasers[k, j, idx] = \ np.vstack((trains_dig_in[i].laser_durations[:], trains_dig_in[i].laser_onset_lag[:])) except: laser[k, j, idx] = np.zeros((num_trials, 2)) # Scaling was not done, so: response = unscaled_response.copy() # Make ancillary_analysis node and put in arrays if '/ancillary_analysis' in hf5: hf5.remove_node('/ancillary_analysis', recursive=True) hf5.create_group('/', 'ancillary_analysis') hf5.create_array('/ancillary_analysis', 'palatability', palatability) hf5.create_array('/ancillary_analysis', 'identity', identity) hf5.create_array('/ancillary_analysis', 'laser', laser) hf5.create_array('/ancillary_analysis', 'scaled_neural_response', response) hf5.create_array('/ancillary_analysis', 'window_params', np.array([win_size, win_step])) hf5.create_array('/ancillary_analysis', 'bin_times', bin_times) hf5.create_array('/ancillary_analysis', 'unscaled_neural_response', unscaled_response) # for backwards compatibility hf5.create_array('/ancillary_analysis', 'params', np.array([win_size, win_step])) hf5.create_array('/ancillary_analysis', 'pre_stim', np.array(time[0])) hf5.flush() # Get unique laser (duration, lag) combinations print('Organizing trial data...') unique_lasers = np.vstack(list({tuple(row) for row in laser[0, 0, :, :]})) unique_lasers = unique_lasers[unique_lasers[:, 1].argsort(), :] num_conditions = unique_lasers.shape[0] trials = [] for row in unique_lasers: tmp_trials = [j for j in range(num_trials * num_tastes) if np.array_equal(laser[0, 0, j, :], row)] trials.append(tmp_trials) trials_per_condition = [len(x) for x in trials] if not all(x == trials_per_condition[0] for x in trials_per_condition): raise ValueError('Different number of trials for each laser condition') trials_per_condition = int(trials_per_condition[0] / num_tastes) #assumes same number of trials per taste per condition print('Detected:\n %i tastes\n %i laser conditions\n' ' %i trials per condition per taste' % (num_tastes, num_conditions, trials_per_condition)) trials = np.array(trials) # Store laser conditions and indices of trials per condition in trial x # taste space hf5.create_array('/ancillary_analysis', 'trials', trials) hf5.create_array('/ancillary_analysis', 'laser_combination_d_l', unique_lasers) hf5.flush() # Taste Similarity Calculation neural_response_laser = np.empty((num_conditions, num_bins, num_tastes, num_units, trials_per_condition), dtype=np.dtype('float64')) taste_cosine_similarity = np.empty((num_conditions, num_bins, num_tastes, num_tastes), dtype=np.dtype('float64')) taste_euclidean_distance = np.empty((num_conditions, num_bins, num_tastes, num_tastes), dtype=np.dtype('float64')) # Re-format neural responses from bin x unit x (trial*taste) to # laser_condition x bin x taste x unit x trial print('Reformatting data arrays...') for i, trial in enumerate(trials): for j, _ in enumerate(bin_times): for k, _ in dim.iterrows(): idx = np.where((trial >= num_trials*k) & (trial < num_trials*(k+1)))[0] neural_response_laser[i, j, k, :, :] = \ response[j, :, trial[idx]].T # Compute taste cosine similarity and euclidean distances print('Computing taste cosine similarity and euclidean distances...') for i, _ in enumerate(trials): for j, _ in enumerate(bin_times): for k, _ in dim.iterrows(): for l, _ in dim.iterrows(): taste_cosine_similarity[i, j, k, l] = \ np.mean(cosine_similarity( neural_response_laser[i, j, k, :, :].T, neural_response_laser[i, j, l, :, :].T)) taste_euclidean_distance[i, j, k, l] = \ np.mean(cdist( neural_response_laser[i, j, k, :, :].T, neural_response_laser[i, j, l, :, :].T, metric='euclidean')) hf5.create_array('/ancillary_analysis', 'taste_cosine_similarity', taste_cosine_similarity) hf5.create_array('/ancillary_analysis', 'taste_euclidean_distance', taste_euclidean_distance) hf5.flush() # Taste Responsiveness calculations bin_params = [params['num_comparison_bins'], params['comparison_bin_size']] discrim_p = params['discrim_p'] responsive_neurons = [] discriminating_neurons = [] taste_responsiveness = np.zeros((bin_params[0], num_units, 2)) new_bin_times = np.arange(0, np.prod(bin_params), bin_params[1]) baseline = np.where(bin_times < 0)[0] print('Computing taste responsiveness and taste discrimination...') for i, t in enumerate(new_bin_times): places = np.where((bin_times >= t) & (bin_times <= t+bin_params[1]))[0] for j, u in enumerate(chosen_units): # Check taste responsiveness f, p = f_oneway(np.mean(response[places, j, :], axis=0), np.mean(response[baseline, j, :], axis=0)) if np.isnan(f): f = 0.0 p = 1.0 if p <= discrim_p and u not in responsive_neurons: responsive_neurons.append(u) taste_responsiveness[i, j, 0] = 1 # Check taste discrimination taste_idx = [np.arange(num_trials*k, num_trials*(k+1)) for k in range(num_tastes)] taste_responses = [np.mean(response[places, j, :][:, k], axis=0) for k in taste_idx] f, p = f_oneway(*taste_responses) if np.isnan(f): f = 0.0 p = 1.0 if p <= discrim_p and u not in discriminating_neurons: discriminating_neurons.append(u) responsive_neurons = np.sort(responsive_neurons) discriminating_neurons = np.sort(discriminating_neurons) # Write taste responsive and taste discriminating units to text file save_file = os.path.join(rec_dir, 'discriminative_responsive_neurons.txt') with open(save_file, 'w') as f: print('Taste discriminative neurons', file=f) for u in discriminating_neurons: print(u, file=f) print('Taste responsive neurons', file=f) for u in responsive_neurons: print(u, file=f) hf5.create_array('/ancillary_analysis', 'taste_disciminating_neurons', discriminating_neurons) hf5.create_array('/ancillary_analysis', 'taste_responsive_neurons', responsive_neurons) hf5.create_array('/ancillary_analysis', 'taste_responsiveness', taste_responsiveness) hf5.flush() # Get time course of taste discrimibility print('Getting taste discrimination time course...') p_discrim = np.empty((num_conditions, num_bins, num_tastes, num_tastes, num_units), dtype=np.dtype('float64')) for i in range(num_conditions): for j, t in enumerate(bin_times): for k in range(num_tastes): for l in range(num_tastes): for m in range(num_units): _, p = ttest_ind(neural_response_laser[i, j, k, m, :], neural_response_laser[i, j, l, m, :], equal_var = False) if np.isnan(p): p = 1.0 p_discrim[i, j, k, l, m] = p hf5.create_array('/ancillary_analysis', 'p_discriminability', p_discrim) hf5.flush() # Palatability Rank Order calculation (if > 2 tastes) t_start = params['pal_deduce_start_time'] t_end = params['pal_deduce_end_time'] if num_tastes > 2: print('Deducing palatability rank order...') palatability_rank_order_deduction(rec_dir, neural_response_laser, unique_lasers, bin_times, [t_start, t_end]) # Palatability calculation r_spearman = np.zeros((num_conditions, num_bins, num_units)) p_spearman = np.ones((num_conditions, num_bins, num_units)) r_pearson = np.zeros((num_conditions, num_bins, num_units)) p_pearson = np.ones((num_conditions, num_bins, num_units)) f_identity = np.ones((num_conditions, num_bins, num_units)) p_identity = np.ones((num_conditions, num_bins, num_units)) lda_palatability = np.zeros((num_conditions, num_bins)) lda_identity = np.zeros((num_conditions, num_bins)) r_isotonic = np.zeros((num_conditions, num_bins, num_units)) id_pal_regress = np.zeros((num_conditions, num_bins, num_units, 2)) pairwise_identity = np.zeros((num_conditions, num_bins, num_tastes, num_tastes)) print('Computing palatability metrics...') for i, t in enumerate(trials): for j in range(num_bins): for k in range(num_units): ranks = rankdata(response[j, k, t]) r_spearman[i, j, k], p_spearman[i, j, k] = \ spearmanr(ranks, palatability[j, k, t]) r_pearson[i, j, k], p_pearson[i, j, k] = \ pearsonr(response[j, k, t], palatability[j, k, t]) if np.isnan(r_spearman[i, j, k]): r_spearman[i, j, k] = 0.0 p_spearman[i, j, k] = 1.0 if np.isnan(r_pearson[i, j, k]): r_pearson[i, j, k] = 0.0 p_pearson[i, j, k] = 1.0 # Isotonic regression of firing against palatability model = IsotonicRegression(increasing = 'auto') model.fit(palatability[j, k, t], response[j, k, t]) r_isotonic[i, j, k] = model.score(palatability[j, k, t], response[j, k, t]) # Multiple Regression of firing rate against palatability and identity # Regress palatability on identity tmp_id = identity[j, k, t].reshape(-1, 1) tmp_pal = palatability[j, k, t].reshape(-1, 1) tmp_resp = response[j, k, t].reshape(-1, 1) model_pi = LinearRegression() model_pi.fit(tmp_id, tmp_pal) pi_residuals = tmp_pal - model_pi.predict(tmp_id) # Regress identity on palatability model_ip = LinearRegression() model_ip.fit(tmp_pal, tmp_id) ip_residuals = tmp_id - model_ip.predict(tmp_pal) # Regress firing on identity model_fi = LinearRegression() model_fi.fit(tmp_id, tmp_resp) fi_residuals = tmp_resp - model_fi.predict(tmp_id) # Regress firing on palatability model_fp = LinearRegression() model_fp.fit(tmp_pal, tmp_resp) fp_residuals = tmp_resp - model_fp.predict(tmp_pal) # Get partial correlation coefficient of response with identity idp_reg0, p = pearsonr(fp_residuals, ip_residuals) if np.isnan(idp_reg0): idp_reg0 = 0.0 idp_reg1, p = pearsonr(fi_residuals, pi_residuals) if np.isnan(idp_reg1): idp_reg1 = 0.0 id_pal_regress[i, j, k, 0] = idp_reg0 id_pal_regress[i, j, k, 1] = idp_reg1 # Identity Calculation samples = [] for _, row in dim.iterrows(): taste = row.dig_in samples.append([trial for trial in t if identity[j, k, trial] == taste]) tmp_resp = [response[j, k, sample] for sample in samples] f_identity[i, j, k], p_identity[i, j, k] = f_oneway(*tmp_resp) if np.isnan(f_identity[i, j, k]): f_identity[i, j, k] = 0.0 p_identity[i, j, k] = 1.0 # Linear Discriminant analysis for palatability X = response[j, :, t] Y = palatability[j, 0, t] test_results = [] c_validator = LeavePOut(1) for train, test in c_validator.split(X, Y): model = LDA() model.fit(X[train, :], Y[train]) tmp = np.mean(model.predict(X[test]) == Y[test]) test_results.append(tmp) lda_palatability[i, j] = np.mean(test_results) # Linear Discriminant analysis for identity Y = identity[j, 0, t] test_results = [] c_validator = LeavePOut(1) for train, test in c_validator.split(X, Y): model = LDA() model.fit(X[train, :], Y[train]) tmp = np.mean(model.predict(X[test]) == Y[test]) test_results.append(tmp) lda_identity[i, j] = np.mean(test_results) # Pairwise Identity Calculation for _, r1 in dim.iterrows(): for _, r2 in dim.iterrows(): t1 = r1.dig_in t2 = r2.dig_in tmp_trials = np.where((identity[j, 0, :] == t1) | (identity[j, 0, :] == t2))[0] idx = [trial for trial in t if trial in tmp_trials] X = response[j, :, idx] Y = identity[j, 0, idx] test_results = [] c_validator = StratifiedShuffleSplit(n_splits=10, test_size=0.25, random_state=0) for train, test in c_validator.split(X, Y): model = GaussianNB() model.fit(X[train, :], Y[train]) tmp_score = model.score(X[test, :], Y[test]) test_results.append(tmp_score) pairwise_identity[i, j, t1, t2] = np.mean(test_results) hf5.create_array('/ancillary_analysis', 'r_pearson', r_pearson) hf5.create_array('/ancillary_analysis', 'r_spearman', r_spearman) hf5.create_array('/ancillary_analysis', 'p_pearson', p_pearson) hf5.create_array('/ancillary_analysis', 'p_spearman', p_spearman) hf5.create_array('/ancillary_analysis', 'lda_palatability', lda_palatability) hf5.create_array('/ancillary_analysis', 'lda_identity', lda_identity) hf5.create_array('/ancillary_analysis', 'r_isotonic', r_isotonic) hf5.create_array('/ancillary_analysis', 'id_pal_regress', id_pal_regress) hf5.create_array('/ancillary_analysis', 'f_identity', f_identity) hf5.create_array('/ancillary_analysis', 'p_identity', p_identity) hf5.create_array('/ancillary_analysis', 'pairwise_NB_identity', pairwise_identity) hf5.flush()
def get_CAR_groups(num_groups, electrode_mapping, shell=False): '''Returns a dict containing standard params for common average referencing Each dict field with fields, num groups, car_electrodes Can set num_groups to an integer or as unilateral or bilateral Settings as unilateral or bilateral will automatically assign channels to groups, setting to a number will allow choice of channels for each group unilateral: 1 CAR group, all channels on port bilateral: 2 CAR groups, [0-7,24-31] & [8-23], assumes same port for both Parameters ---------- num_groups : int or 'bilateral', number of CAR groups, bilateral autmatically assigns the first and last 8 electrodes to group 1 and the middle 16 to group 2 electrode_mapping : pandas.DataFrame, mapping electrode numbers to port and channel, has columns: 'Electrode', 'Port' and 'Channel' Returns ------- num_groups : int, number of CAR groups car_electrodes : list of lists of ints, list with a list of electrodes for each CAR group Throws ------ ValueError : if num_groups is not a valid int (>0) or 'bilateral' ''' if num_groups == 'bilateral32': num_groups = 2 implant_type = 'bilateral32' elif isinstance(num_groups, int) and num_groups > 0: implant_type = None else: raise ValueError( 'Num groups must be an integer >0 or a string bilateral') electrodes = electrode_mapping['Electrode'].tolist() car_electrodes = [] if implant_type == 'bilateral32': g1 = electrodes[:8] g1.extend(electrodes[-8:]) g2 = electrodes[8:-8] car_electrodes = [g1, g2] elif num_groups == 1: car_electrodes.append(electrodes) else: select_list = [] for idx, row in electrode_mapping.iterrows(): select_list.append(', '.join([str(x) for x in row])) for i in range(num_groups): tmp = userIO.select_from_list('Choose CAR electrodes for group %i' ': [Electrode, Port, Channel]' % i, select_list, title='Group %i Electrodes' % i, multi_select=True, shell=shell) if tmp is None: raise ValueError('Must select electrodes for CAR groups') car_electrodes.append([int(x.split(',')[0]) for x in tmp]) if 'dead' in electrode_mapping.columns: dead_ch = electrode_mapping['Electrode'][electrode_mapping['dead']] dead_ch = dead_ch.to_list() for group in car_electrodes: for dc in dead_ch: if dc in group: group.remove(dc) return num_groups, car_electrodes
def initParams(self, data_quality='clean', emg_port=None, emg_channels=None, shell=False, dig_in_names=None, dig_out_names=None, spike_array_params=None, psth_params=None, confirm_all=False): ''' Initializes basic default analysis parameters that can be customized before running processing methods Can provide data_quality as 'clean' or 'noisy' to preset some parameters that are useful for the different types. Best practice is to run as clean (default) and to re-run as noisy if you notice that a lot of electrodes are cutoff early ''' # Get parameters from info.rhd file_dir = self.data_dir rec_info = dio.rawIO.read_rec_info(file_dir, shell) ports = rec_info.pop('ports') channels = rec_info.pop('channels') sampling_rate = rec_info['amplifier_sampling_rate'] self.rec_info = rec_info self.sampling_rate = sampling_rate # Get default parameters for blech_clust clustering_params = deepcopy(dio.params.clustering_params) data_params = deepcopy(dio.params.data_params[data_quality]) bandpass_params = deepcopy(dio.params.bandpass_params) spike_snapshot = deepcopy(dio.params.spike_snapshot) if spike_array_params is None: spike_array_params = deepcopy(dio.params.spike_array_params) if psth_params is None: psth_params = deepcopy(dio.params.psth_params) # Ask for emg port & channels if emg_port is None and not shell: q = eg.ynbox('Do you have an EMG?', 'EMG') if q: emg_port = userIO.select_from_list('Select EMG Port:', ports, 'EMG Port', shell=shell) emg_channels = userIO.select_from_list( 'Select EMG Channels:', [y for x, y in zip(ports, channels) if x == emg_port], title='EMG Channels', multi_select=True, shell=shell) elif emg_port is None and shell: print('\nNo EMG port given.\n') electrode_mapping, emg_mapping = dio.params.flatten_channels( ports, channels, emg_port=emg_port, emg_channels=emg_channels) self.electrode_mapping = electrode_mapping self.emg_mapping = emg_mapping # Get digital input names and spike array parameters if rec_info.get('dig_in'): if dig_in_names is None: dig_in_names = dict.fromkeys(['dig_in_%i' % x for x in rec_info['dig_in']]) name_filler = userIO.dictIO(dig_in_names, shell=shell) dig_in_names = name_filler.fill_dict('Enter names for ' 'digital inputs:') if dig_in_names is None or \ any([x is None for x in dig_in_names.values()]): raise ValueError('Must name all dig_ins') dig_in_names = list(dig_in_names.values()) if spike_array_params['laser_channels'] is None: laser_dict = dict.fromkeys(dig_in_names, False) laser_filler = userIO.dictIO(laser_dict, shell=shell) laser_dict = laser_filler.fill_dict('Select any lasers:') if laser_dict is None: laser_channels = [] else: laser_channels = [i for i, v in zip(rec_info['dig_in'], laser_dict.values()) if v] spike_array_params['laser_channels'] = laser_channels else: laser_dict = dict.fromkeys(dig_in_names, False) for lc in spike_array_params['laser_channels']: laser_dict[dig_in_names[lc]] = True if spike_array_params['dig_ins_to_use'] is None: di = [x for x in rec_info['dig_in'] if x not in laser_channels] dn = [dig_in_names[x] for x in di] spike_dig_dict = dict.fromkeys(dn, True) filler = userIO.dictIO(spike_dig_dict, shell=shell) spike_dig_dict = filler.fill_dict('Select digital inputs ' 'to use for making spike' ' arrays:') if spike_dig_dict is None: spike_dig_ins = [] else: spike_dig_ins = [x for x, y in zip(di, spike_dig_dict.values()) if y] spike_array_params['dig_ins_to_use'] = spike_dig_ins dim = pd.DataFrame([(x, y) for x, y in zip(rec_info['dig_in'], dig_in_names)], columns=['dig_in', 'name']) dim['laser'] = dim['name'].apply(lambda x: laser_dict.get(x)) self.dig_in_mapping = dim.copy() # Get digital output names if rec_info.get('dig_out'): if dig_out_names is None: dig_out_names = dict.fromkeys(['dig_out_%i' % x for x in rec_info['dig_out']]) name_filler = userIO.dictIO(dig_out_names, shell=shell) dig_out_names = name_filler.fill_dict('Enter names for ' 'digital outputs:') if dig_out_names is None or \ any([x is None for x in dig_out_names.values()]): raise ValueError('Must name all dig_outs') dig_out_names = list(dig_out_names.values()) self.dig_out_mapping = pd.DataFrame([(x, y) for x, y in zip(rec_info['dig_out'], dig_out_names)], columns=['dig_out', 'name']) # Store clustering parameters self.clust_params = {'file_dir': file_dir, 'data_quality': data_quality, 'sampling_rate': sampling_rate, 'clustering_params': clustering_params, 'data_params': data_params, 'bandpass_params': bandpass_params, 'spike_snapshot': spike_snapshot} # Store and confirm spike array parameters spike_array_params['sampling_rate'] = sampling_rate self.spike_array_params = spike_array_params self.psth_params = psth_params if not confirm_all: prompt = ('\n----------\nSpike Array Parameters\n----------\n' + dp.print_dict(spike_array_params) + '\nAre these parameters good?') q_idx = userIO.ask_user(prompt, ('Yes', 'Edit'), shell=shell) if q_idx == 1: self.edit_spike_array_parameters(shell=shell) # Edit and store psth parameters prompt = ('\n----------\nPSTH Parameters\n----------\n' + dp.print_dict(psth_params) + '\nAre these parameters good?') q_idx = userIO.ask_user(prompt, ('Yes', 'Edit'), shell=shell) if q_idx == 1: self.edit_psth_parameters(shell=shell) self.save()