def comparison(df_, results, experiment): for window, df_sub in df_.groupby(['window']): df_sub_all = df_sub[df_sub['feature'] == 'all'] df_sub_one = df_sub[df_sub['feature'] != 'all'] for name in pd.unique(df_sub_one['feature']): a = df_sub_all['score'].values temp = df_sub_one[df_sub_one['feature'] == name] b = temp['score'].values ps = resample_ttest_2sample(a, b, n_ps=500, n_permutation=int(1e4), one_tail=True) results['feature'].append(name) results['ps_mean'].append(ps.mean()) results['ps_std'].append(ps.std()) results['window'].append(window) results['experiment'].append(experiment) return results
ps_mean = [], ps_std = [], model = [],) df = pos[(pos['window'] > 0) & (pos['window'] < 4)] for model,df_sub in df.groupby('model'): pairs = [['awareness','confidence'], ['awareness','correct'], ['confidence','correct']] for pair in pairs: a = df_sub[pair[0]].values b = df_sub[pair[1]].values if a.mean() < b.mean(): pair = [pair[1],pair[0]] a = df_sub[pair[0]].values b = df_sub[pair[1]].values ps = resample_ttest_2sample(a,b,500,10000) results['greater'].append(pair[0]) results['lesser'].append(pair[1]) results['ps_mean'].append(ps.mean()) results['ps_std'].append(ps.std()) results['model'].append(model) results = pd.DataFrame(results) temp = [] for modle, df_sub in results.groupby('model'): idx_sort = np.argsort(df_sub['ps_mean'].values) df_sub = df_sub.iloc[idx_sort,:] converter = MCPConverter(df_sub['ps_mean'].values) d = converter.adjust_many() df_sub['p_corrected'] = d['bonferroni'].values temp.append(df_sub)
'metadprime':"Meta-d'"}) results = dict( dprime_type = [], experiment = [], ps_mean = [], ps_std = [], high_aware = [], low_aware = [], ) for (dpr,exp),df_sub in df_plot.groupby(['dprime_type','Experiment']): df_sub df_high_awe = df_sub[df_sub['awareness'] == 'Aware'].sort_values(['sub','variable','Rating',]) df_low_awe = df_sub[df_sub['awareness'] == 'Unaware'].sort_values(['sub','variable','Rating',]) ps = utils.resample_ttest_2sample(df_high_awe['value'].values, df_low_awe['value'].values, n_ps = 100, n_permutation = 5000) results['dprime_type'].append(dpr) results['experiment'].append(exp) results['ps_mean'].append(ps.mean()) results['ps_std'].append(ps.std()) results['high_aware'].append(df_high_awe['value'].values.mean()) results['low_aware'].append(df_low_awe['value'].values.mean()) results = pd.DataFrame(results) results = results.sort_values('ps_mean') converter = utils.MCPConverter(pvals = results['ps_mean'].values) d = converter.adjust_many() results['p_corrected'] = d['bonferroni'].values results['stars'] = results['p_corrected'].apply(utils.stars)
]).mean().reset_index() ######################### compared against chance level ############### results = dict( model=[], window=[], ps_mean=[], ps_std=[], ) for ((model, window), df_sub), ((_, _), df_sub_chance) in zip( df.groupby(['model', 'window']), df_chance.groupby(['model', 'window'])): df_sub, df_sub_chance = df_sub.sort_values( 'sub'), df_sub_chance.sort_values('sub') ps = resample_ttest_2sample(df_sub['score'].values, df_sub_chance['score'].values, one_tail=False, n_ps=1000, n_permutation=10000) results['model'].append(model) results['window'].append(window) results['ps_mean'].append(ps.mean()) results['ps_std'].append(ps.std()) results = pd.DataFrame(results) temp = [] for model, df_sub in results.groupby('model'): idx_sort = np.argsort(df_sub['ps_mean']) for name in results.columns: df_sub[name] = df_sub[name].values[idx_sort] convert = MCPConverter(pvals=df_sub['ps_mean'].values) df_pvals = convert.adjust_many() df_sub['ps_corrected'] = df_pvals['bonferroni'].values
# 3-2-1 post hoc classes = pd.unique(df_plot['Transitions']) pairs = combinations(classes, 2) results = dict( pair1=[], pair2=[], ps_mean=[], ps_std=[], difference=[], ) for (a, b) in list(pairs): c1 = df_plot[df_plot['Transitions'] == a] c2 = df_plot[df_plot['Transitions'] == b] ps = utils.resample_ttest_2sample(c1['Transition_Probability'].values, c2['Transition_Probability'].values, n_ps=200, n_permutation=5000, one_tail=False, match_sample_size=True) results['pair1'].append(a) results['pair2'].append(b) results['ps_mean'].append(ps.mean()) results['ps_std'].append(ps.std()) results['difference'].append( np.mean( np.abs(c1['Transition_Probability'].values - c2['Transition_Probability'].values))) results = pd.DataFrame(results) temp = [] for ii, row in df_plot.iterrows(): a, b = row['Transitions'].split('-')
bbox_inches='tight') results = dict( experiment=[], model=[], window=[], ps_mean=[], ps_std=[], ) for attri, df_sub in df.groupby(['experiment', 'model', 'window']): df_sub_decode = df_sub[df_sub['chance'] == True] df_sub_chance = df_sub[df_sub['chance'] == False] ps = utils.resample_ttest_2sample( df_sub_decode['score'].values, df_sub_chance['score'].values, n_ps=100, n_permutation=5000, one_tail=True, ) results['experiment'].append(attri[0]) results['model'].append(attri[1]) results['window'].append(attri[2]) results['ps_mean'].append(np.mean(ps)) results['ps_std'].append(np.std(ps)) results = pd.DataFrame(results) corrected_results = [] for attri, df_sub in results.groupby(['experiment', 'model']): pvals = df_sub['ps_mean'] idx_sort = np.argsort(pvals) df_sub = df_sub.iloc[idx_sort, :]
df_condition = dict( roi = [], condition = [], ps_mean = [], ps_std = [], diff_mean = [], diff_std = [],) for (roi,condition),df_sub in df.groupby(['roi','condition']): df_sub_img = df_sub[df_sub['Model'] == 'Image2vec'].groupby(['sub']).mean().reset_index() df_sub_word = df_sub[df_sub['Model'] == 'Word2vec'].groupby(['sub']).mean().reset_index() a = df_sub_img['mean_variance'].values b = df_sub_word['mean_variance'].values ps = utils.resample_ttest_2sample(a,b, n_ps = 100, n_permutation = int(5e4), one_tail = False, match_sample_size = True) df_condition['roi'].append(roi) df_condition['condition'].append(condition) df_condition['ps_mean'].append(ps.mean()) df_condition['ps_std'].append(ps.std()) df_condition['diff_mean'].append(np.mean(a - b)) df_condition['diff_std'].append(np.std(a - b)) df_condition = pd.DataFrame(df_condition) temp = [] for condition, df_sub in df_condition.groupby(['condition']): df_sub = df_sub.sort_values(['ps_mean']) converter = utils.MCPConverter(pvals = df_sub['ps_mean'].values) d = converter.adjust_many()
current_trial = df_[['awareness','correctness']].shift(1).dropna().values previous_trial = df_['awareness'].shift(-1).dropna().values awareness_trials = np.vstack([current_trial[:,0],previous_trial]).T correctness = current_trial[:,1] for trial,correct in zip(awareness_trials,correctness): trial,correct trial_type = '{}-{}'.format(int(trial[0]),int(trial[1])) results_sub[trial_type]['awareness'].append(trial) results_sub[trial_type]['correctness'].append(correct) for trial_type in results.keys(): results[trial_type].append(float(np.sum(results_sub[trial_type]['correctness'])) / len(results_sub[trial_type]['correctness'])) results = pd.DataFrame(results) ax = axes[0][0] ps_1 = utils.resample_ttest_2sample(results['2-1'].values, results['1-1'].values, n_ps = 100, n_permutation = 10000, one_tail = False, match_sample_size = True) resample_21 = utils.bootstrap_resample(results['2-1'].values,n = None) resample_11 = utils.bootstrap_resample(results['1-1'].values,n = None) ax.hist(results['2-1'].values,label='2-1',color='red',density=True,alpha=alpha) ax.hist(results['1-1'],label='1-1',color='blue',density=True,alpha=alpha) ax.legend() title = 'experiment {}, p = {:.5}'.format(experiment,ps_1.mean()) ax.set(ylabel='count',xlabel='Accuracy',title=title) ax = axes[0][1] ps_2 = utils.resample_ttest_2sample(results['2-2'].values, results['1-2'].values, n_ps = 100, n_permutation = 10000, one_tail = False,
results = dict( model=[], experiment=[], window=[], ps21=[], ps32=[], ps42=[], ) for attri, df_sub in df1.groupby(['model', 'experiment', 'window']): m1 = df_sub[df_sub['variable'] == 'model1'] m2 = df_sub[df_sub['variable'] == 'model2'] m3 = df_sub[df_sub['variable'] == 'model3'] m4 = df_sub[df_sub['variable'] == 'model4'] ps21 = utils.resample_ttest_2sample(m2['value'].values, m1['value'].values, n_ps=200, n_permutation=5000, one_tail=True) ps32 = utils.resample_ttest_2sample(m3['value'].values, m2['value'].values, n_ps=200, n_permutation=5000, one_tail=True) ps42 = utils.resample_ttest_2sample(m4['value'].values, m2['value'].values, n_ps=200, n_permutation=5000) for var_name, name in zip(['model', 'experiment', 'window'], attri): results[var_name].append(name) results['ps21'].append(ps21.mean()) results['ps32'].append(ps32.mean())
window=[], feature_name=[], target_name=[], ps_mean=[], ps_std=[], ) for (window, formula, exp), df_sub in df.groupby(['window', 'formula', 'experiment']): exp = exp.lower() scores = df_sub[df_sub['chance'] == False] chance = df_sub[df_sub['chance'] == True] pvals = utils.resample_ttest_2sample(scores['score'].values, chance['score'].values, n_ps=100, n_permutation=int(1e4), one_tail=False) results['experiment'].append(exp) results['window'].append(window) results['feature_name'].append(formula.split('_')[0]) results['target_name'].append(formula.split('_')[1]) results['ps_mean'].append(pvals.mean()) results['ps_std'].append(pvals.std()) results = pd.DataFrame(results) corrected = [] for (exp), df_sub in results.groupby(['experiment']): df_sub = df_sub.sort_values(['ps_mean']) converter = utils.MCPConverter(pvals=df_sub['ps_mean'].values)