keys, values = options.keys(), options.values() opts = [dict(zip(keys,[it for it in items])) for items in product(*values)] # Permutation dataframe p_values = [] for item in opts: cond_dict = {k: v for k, v in item.items()} item = {k: [v] for k, v in item.items()} df_ = dataframe.copy() data_ = table.copy() data_ = filter_dataframe(data_, item) item.update({'permutation':[0]}) df_ = filter_dataframe(df_, item) df_avg = np.mean(df_['accuracy'].values) p = (np.count_nonzero(data_['accuracy'].values > df_avg) + 1) / 100. cond_dict['accuracy_perm'] = np.mean(data_['accuracy'].values) cond_dict['accuracy_true'] = np.mean(df_['accuracy'].values) cond_dict['p_value'] = p p_values.append(cond_dict) p_df = pd.DataFrame(p_values)
subjects = np.unique(event_file['name'].values) path_wk = "/home/robbis/mount/meg_workstation/Carlo_MDM/%s/RESIDUALS_MVPA/beta_attributes_full.txt" folders = os.listdir("/home/robbis/mount/meg_workstation/Carlo_MDM/") name_dict = {} for subj in subjects: subdir = folders[np.nonzero([s.find(subj) != -1 for s in folders])[0][0]] name_dict[subj] = subdir df = filter_dataframe(event_file, name=[subj]) df.to_csv(path_wk % (subdir), index=False, sep=" ") name_dict = {} for subj in subjects: subdir = folders[np.nonzero([s.find(subj) != -1 for s in folders])[0][0]] name_dict[subj] = subdir event_file['name'] = [name_dict[name] for name in event_file['name'].values]
from pyitab.analysis.results import get_results, df_fx_over_keys, \ get_permutation_values, filter_dataframe import statsmodels.api as sm dataframe = get_results( '/media/robbis/DATA/fmri/carlo_ofp/0_results/review/', dir_id="across", field_list=['sample_slicer'], ) df_lateral_ips = filter_dataframe(dataframe, permutation=[0], roi=['lateral_ips']) key_across = ['roi_value', 'evidence', 'fold'] key_within = ['roi_value', 'evidence', 'subject'] df_lips_avg = df_fx_over_keys(df_lateral_ips, keys=key_across, attr=['score_accuracy'], fx=np.mean) key_value = 'accuracy' results = [] dataframe_test = df_lips_avg #dataframe_test = df_old for roi in np.unique(dataframe_test['roi_value'].values):
mat = h5py.File(fname) t = mat['timevec'][:].T[0] idx = [0, 50, 100, 150, 200] idx = [0, 20, 40, 60, 80, 100] tt = t >= 0 xticklabels = np.array(["{:5.1f}".format(i) for i in t]) conditions = np.unique(average_df['targets']) sessions = np.unique(average_df['ses']) limits = (0.4, .95) occurences = list(itertools.product(conditions, sessions)) for occurence in occurences: df = filter_dataframe(average_df, targets=[occurence[0]], ses=[occurence[1]]) fig, axes = pl.subplots(2, 4, sharex=True) for r, (_, row) in enumerate(df.iterrows()): im = axes[0, r].imshow(row['score_score'], origin='lower', cmap=pl.cm.magma, vmin=limits[0], vmax=limits[1] ) clf = row['estimator__clf'] kernel = "" if r>2: kernel = clf[clf.find("kernel='"):clf.find(", p")] clf = clf[:clf.find("(")]+" "+kernel axes[0, r].set_title("%s | %s" % (clf, occurence[0].replace("+", "-")))
mapping_type = { 'source-subject': 'seed', 'source-aal':'aal', 'sensor-subject': 'sensor' } dataframe['type'] = [mapping_type["%s-%s" % (d[1]['space'], d[1]['atlas'])] \ for d in dataframe.iterrows()] fx_mean = lambda x : np.nanmean(np.dstack(x), axis=2).flatten() i = 0 for t in np.unique(dataframe['type']): for y in np.unique(dataframe['target_transformer__attr']): df = filter_dataframe(dataframe, **{'type': [t], 'target_transformer__attr': [y], 'clf': ['Lasso', '6-Layer-NN', 'NL-SVR-10', 'L-SVR-10'] }) for score in ['r2']: average_df = apply_function(df, ['sub', 'band', 'k', 'clf', 'target_transformer__attr'], attr='score_%s' % (score), fx=fx_mean) grid = sns.FacetGrid(average_df, col="band", row="clf", hue="sub", palette="tab20c",