def variance_extract(file_paths): """ calculating the variance of neurons that are selective to evidence """ df_VarCE = pd.DataFrame( [], columns={'label', 'choice_left', 'choice_right', 'start_all'}) df_signN = pd.DataFrame([], columns={'label', 'evid_pos', 'evid_neg'}) for i, file in enumerate(file_paths): ## load files paod, trial_briefs = load(file) trial, choice, shape, _, _ = get_bhvinfo(paod, trial_briefs) resp_hidden = get_hidden_resp_all(paod, trial_briefs) ## group the response resp_group = resp_grouping(resp_hidden, choice.left, shape.rt) # variance var_ce = {} for key, value in resp_group.items(): var_ce[key] = variance(np.dstack(value)) df_VarCE.loc[i] = { 'label': file, 'choice_left': var_ce['choice_left'], 'choice_right': var_ce['choice_right'], 'start_all': var_ce['start_all'] } results = regress_resp(resp_hidden, trial, choice, shape) # 128 neurons by 5 time point by 5 parameters (1 bias term + 4 factors) # index 1 represent evidence selectivity, params = np.array(results['params'])[:, :, 1] pvalue = np.array(results['p_values'])[:, :, 1] p_threshold = 0.05 / pvalue.size # correction np.warnings.filterwarnings('ignore') signNeuron = np.all(pvalue < p_threshold, axis=1) posNeuron = np.all(params > -1e-10, axis=1) negNeuron = np.all(params < 1e-10, axis=1) df_signN.loc[i] = { 'label': file, 'evid_pos': np.logical_and(signNeuron, posNeuron), 'evid_neg': np.logical_and(signNeuron, negNeuron) } return df_VarCE, df_signN
def shape_prediction(path_files): df_pred = pd.DataFrame([], columns={'label', 'dist_neuro', 'weight', 'kl_div'}) for i, file in enumerate(path_files): ## load files paod, trial_briefs = load(file) trial, _, shape, _, _ = get_bhvinfo(paod, trial_briefs) numTrials = shape.rt.values #======================== shape prediction ======================== dist_neuro, weight = [], [] for i, rt in enumerate(numTrials): _, _, rd, _ = paod.get_neuron_behavior_pair( index=trial.num.iloc[i]) for j in range(rt): # the shapes in all the epoches are included weight.append(shape.tempweight.iloc[i][:j].sum()) dist_neuro.append(rd[2 + j * 5, shapes_pos]) # the sum weight before each shape onset weight = np.array(weight) # the response of neurons before the shape onset dist_neuro = np.asarray(dist_neuro) sega = np.linspace(weight.min(), weight.max(), num=11, endpoint=True) kl_div = [] for n_group in range(len(sega) - 1): index = (weight >= sega[n_group]) & (weight < sega[n_group + 1]) if n_group == len(sega) - 1 - 1: index = (weight >= sega[n_group]) & (weight <= sega[n_group + 1]) pred_dist = np.mean(dist_neuro[index], axis=0) pred_dist = pred_dist / np.sum(pred_dist) kl_div.append([ stats.entropy(pred_dist, qk=prob_L), stats.entropy(pred_dist, qk=prob_R) ]) df_pred.loc[i] = { 'label': file, 'weight': weight, 'kl_div': np.array(kl_div), 'dist_neuro': dist_neuro } return df_pred
def psth_extract(path_file): psth_resp = [] unitUrg = {'pos': [], 'neg': []} unitEvi = {'pos': [], 'neg': []} findPos = lambda x, y: np.where(np.all(x > y, axis=1))[0] findNeg = lambda x, y: np.where(np.all(x < y, axis=1))[0] for i, file in enumerate(path_file): # load behavioral data paod, trial_briefs = load(file) trial, choice, shapes, _, _ = get_bhvinfo(paod, trial_briefs) # load neural response resp_hidden = get_hidden_resp_all(paod, trial_briefs) for resp in resp_hidden: np.any(np.isnan(resp)) # neuronal selectivity test results = regress_resp(resp_hidden, trial, choice, shapes) # find units that are selective to evidence/urgency # 128 neurons by 5 time point by 5 parameters (1 bias term + 4 factors) params = np.array(results['params']) p_value = np.array(results['p_values']) threshold = 0.05 / p_value.size np.warnings.filterwarnings('ignore') signUrg = np.where(np.all(p_value[:, :, 3] < threshold, axis=1))[0] signEvi = np.where(np.all(p_value[:, :, 4] < threshold, axis=1))[0] # neurons with positive/negative selectivity to urgency and evidence unitUrg['pos'] = np.intersect1d(signUrg, findPos(params[:, :, 3], -1e-10)) unitUrg['neg'] = np.intersect1d(signUrg, findNeg(params[:, :, 3], 1e-10)) unitEvi['pos'] = np.intersect1d(signEvi, findPos(params[:, :, 4], -1e-10)) unitEvi['neg'] = np.intersect1d(signEvi, findNeg(params[:, :, 4], 1e-10)) resp_urg, resp_evi = psth_align(resp_hidden, choice, shapes, unitUrg, unitEvi) psth_resp.append([resp_urg, resp_evi]) return psth_resp
def data_extract(file_paths): """ """ df_detail = [] # theo denotes the bayesian inference df_summary = pd.DataFrame([], columns={'cho_prob', 'prpt', 'prpt_Baye'}) for nth, file in enumerate(file_paths): paod, trial_briefs = load(file) trials = get_multinfo(paod, trial_briefs) files_pd = pd.DataFrame([ trials["choice"], trials["reward"], trials["chosen"], trials["modality"], trials["direction"], trials["estimates"] ], [ 'choice', 'reward', 'chosen', 'modality', 'direction', 'estimates' ]) files_pd = files_pd.T df_detail.append(files_pd) # choice probability in each directions and modalities cho_prob = [[], [], []] for i in np.unique(trials["direction"]): trial = np.where(trials["direction"] == i)[0] for ii in range(3): temp = np.intersect1d(trial, np.where(trials["modality"] == ii)) cho_prob[ii].append(np.mean(trials["choice"][temp] == 1)) modality = trials["modality"] Baye_choice = np.diag( np.vstack(trials["estimates"].values)[:, modality]) + 1 # the std of fitted Gaussian curve is viewed as threshold prpt = psych_curve(trials["direction"], trials["choice"], trials["modality"]) prpt_Baye = psych_curve(trials["direction"], Baye_choice, trials["modality"]) df_summary.loc[nth] = { 'cho_prob': cho_prob, 'prpt': prpt, 'prpt_Baye': prpt_Baye } return df_detail, df_summary
def bhv_extract(file_paths): """ """ df_basic = pd.DataFrame([], columns = {'rt_mean','rt_sem','choice_prop', 'cr','cr_log','fr','label'}) df_logRT = pd.DataFrame([], columns = {'right_rt_log','left_rt_log', 'rt', 'choice'}) df_psycv = pd.DataFrame([], columns = {'cr','fr','psy_curve', 'fitting_x0', 'fitting_k'}) for i, file in enumerate(file_paths): paod, trial_briefs = load(file) _, choice, shape, _, finish_rate = get_bhvinfo(paod,trial_briefs) right_rt_log , left_rt_log = log_rt(shape, choice) psy_curve, prpt = psych_curve(shape, choice, groups = np.linspace(-psycv_range,psycv_range,num=20)) # keep summary data of each file for plotting df_basic.loc[i] = {'rt_mean': shape.rt.mean(), 'rt_sem': shape.rt.sem(), 'cr': choice.correct.mean(), 'fr': finish_rate, 'choice_prop': (choice.left.mean()+1)/2, 'cr_log': choice.correct_logLR.mean(), 'label': file } df_logRT.loc[i] = {'right_rt_log': right_rt_log['mean'].values, 'left_rt_log': left_rt_log['mean'].values, 'rt': shape.rt, 'choice': choice.left, } df_psycv.loc[i] = {'psy_curve': psy_curve, 'cr': np.round(choice.correct.mean(),3), 'fr': np.round(finish_rate,3), 'fitting_x0': prpt[0], 'fitting_k': prpt[1] } return df_basic, df_logRT, df_psycv
def shape_extract(path_files): df_temporal = pd.DataFrame([], columns = {'label','bais','coef'}) df_subweight = pd.DataFrame([], columns = {'label','bais','coef'}) for i, file in enumerate(path_files): ## load files paod, trial_briefs = load(file) _, choice, shape, _, finish_rate = get_bhvinfo(paod,trial_briefs) reg_temporal = temporal_weight(shape,choice) reg_subweight = subject_weight(shape, choice) df_temporal.loc[i] = {'label': file, 'bais': reg_temporal['bais'], 'coef': reg_temporal['coef'], } df_subweight.loc[i] = {'label': file, 'bais': reg_subweight['bais'], 'coef': reg_subweight['coef'], } return df_temporal, df_subweight
def data_extract(file_paths): """ """ df_detail = [] df_summary = pd.DataFrame([], columns = {'choice'}) sign_neurons = {'pos_choice':[], 'neg_choice':[]} n_resp_all = [] for i, file in enumerate(file_paths): paod, trial_briefs = load(file) trials = get_sureinfo(paod,trial_briefs) files_pd = pd.DataFrame([trials["choice"],trials["reward"],trials["randots_dur"], trials["sure_trial"],trials["coherence"]], ['choice','reward','randots_dur','sure_trial','coherence']) files_pd = files_pd.T df_detail.append(files_pd) choice =[] for ii in coherence_list: choice.append([ ii, np.where(trials["choice"][trials["coherence"]==ii]== 1)[0].shape[0], np.where(trials["choice"][trials["coherence"]==ii]== 2)[0].shape[0], np.where(trials["choice"][trials["coherence"]==ii]== 3)[0].shape[0] ]) choice = np.array(choice) df_summary.loc[i] = {'choice':choice} # response of neurons in hidden layer n_resp = get_hidden_resp_sure(paod, trial_briefs) n_resp_all.append(n_resp) # test the choice selectivity of each neuron choice_sel = selectivity_test(n_resp, trials) sign_neurons['pos_choice'].append(np.array(choice_sel['pos'])) sign_neurons['neg_choice'].append(np.array(choice_sel['neg'])) return df_detail, df_summary, n_resp_all, sign_neurons
def data_extract(file_paths): """ """ df_details = pd.DataFrame([], columns={ 'state', 'choice', 'reward', 'block', 'Q_diff', 'R_diff', 'Q_diff_pred' }) df_summary = pd.DataFrame( [], columns={ 'stay_num', 'type_num', 'cr', 'history', 'factors', 'fit_RL', 'fit_lm', 'fit_sm' }) for i, file in enumerate(file_paths): paod, trial_briefs = load(file) # load the choice information, trials = get_stinfo(paod, trial_briefs, completedOnly=False) # the correct rate cr = cr_block(trials) # load the choice information, only complete trials are included trials = get_stinfo(paod, trial_briefs) # the stay probability after each trial type stay_num, type_num, transition = stayprob(trials["choice"].values - 1, trials["state"].values, trials["reward"]) # how the history affect choice coef_hist = hist_effect(transition, trials["reward"], trials["choice"], hist_len=5) # how several factors affect choice coef_factors = factors_eff(transition, trials["reward"], trials["choice"], trials["block"]) ## fit the bhv with a reinforcement learning model bounds = [[0, 1], [0, 1], [0, 1], [0, 1], [0, 3], [0, 3]] params = np.array([0.7, 0.5, 0.8, 0.5, 0.1, 0.1]) cons = [] #construct the bounds in the form of constraints for factor in range(len(bounds)): l = { 'type': 'ineq', 'fun': lambda x: x[factor] - bounds[factor][0] } u = { 'type': 'ineq', 'fun': lambda x: bounds[factor][1] - x[factor] } cons.append(l) cons.append(u) nll_wrapper = lambda parameters: cost_mfmb(parameters, trials[ "choice"] - 1, trials["block"], trials["state"] - 1, trials[ "reward"]) res = minimize(nll_wrapper, x0=params, method='SLSQP', bounds=bounds, constraints=cons) # estimate the q value based on the fitted parameters _, Q_value, _ = cost_mfmb(res.x, trials["choice"].values - 1, trials["block"].values, trials["state"].values - 1, trials["reward"].values, ReturnQ=True) # the response of units in hidden layer and output layer resp_hidden, resp_output = get_resp_ts(paod, trial_briefs) # the differnce of response of two action units before choice b4_cho, left_u, right_u = 4, 6, 7 Q_diff = Q_value[:, 0, 0] - Q_value[:, 0, 1] R_diff = resp_output[1:, b4_cho, left_u] - resp_output[1:, b4_cho, right_u] # fit with a linear model lmodel = Model(lin) result_l = lmodel.fit(Q_diff, x=R_diff, k=1, b=0) # fit with a sigmoid model smodel = Model(sigmoid) result_s = smodel.fit(Q_diff, x=R_diff, a=0, b=1, c=1, d=0) # fit the response of units in the hidden layer reg = LinearRegression().fit(resp_hidden[1:, b4_cho, :], Q_diff) Q_diff_pred = reg.predict(resp_hidden[1:, b4_cho, :]) df_summary.loc[i] = { 'stay_num': stay_num, 'type_num': type_num, 'history': coef_hist, 'factors': coef_factors, 'fit_RL': res.x, # params, # 'fit_lm': result_l, 'fit_sm': result_s, 'cr': cr } df_details.loc[i] = { 'block': trials["block"].values, 'state': trials["state"].values, 'choice': trials["choice"].values, 'reward': trials["reward"].values, 'R_diff': R_diff, 'Q_diff': Q_diff, 'Q_diff_pred': Q_diff_pred } return df_details, df_summary