best_k = [] for options in combinations: df_ = filter_dataframe(df, **options) options = {k: v[0] for k, v in options.items()} df_metric = calculate_metrics(df_, fixed_variables=options) df_metric = df_metric.sort_values('k') df_k = find_best_k(df_metric) metrics.append(df_metric) best_k.append(df_k) df_metrics = pd.concat(metrics) df_guess = pd.concat(best_k) df_guess['hit'] = np.int_(df_guess['guess'].values == 6) df_guess['abshit'] = np.abs(df_guess['guess'].values - 6) df_great_mean = apply_function(df_guess, keys=['name', 'algorithm'], attr='abshit', fx=np.mean) df_great_mean = apply_function(df_guess, keys=['name', 'algorithm'], attr='hit', fx=np.mean) # Plot of metrics df_mean = apply_function(df_guess, keys=['name'], attr='hit', fx=np.mean) arg_sort = np.argsort(df_mean['hit'].values)[::-1] for alg in np.unique(df_great_mean['algorithm']): df_a = filter_dataframe(df_great_mean, algorithm=[alg]) values = df_a['hit'].values[arg_sort] pl.plot(values, '-o') pl.xticks(np.arange(len(values)), df_a['name'].values[arg_sort])
df['phasez'] = scaler(df['phases'], b) sns.scatterplot(data=df, x='phasez', y='mep', edgecolor=None, ax=ax3, color='teal', alpha=0.5) ax4 = axes[1, 1] for b in range(4, 11): bins = pd.cut(df['phases'], bins=b) df['bin'] = [v.mid for v in bins] dfm = apply_function(df, keys=['bin'], attr='mep', fx=lambda x: np.median(x)) c = (b - 4) / 11 ax4.plot(dfm['bin'], dfm['mep'], '-o', color=cmap(c)) if b == 8: ax4.plot(dfm['bin'], dfm['mep'], '-o', lw=5, color='red') ax3.plot(scaler(dfm['bin'], b), dfm['mep'], '-o', lw=5, color='red') if b == 10: ax4.plot(dfm['bin'], dfm['mep'], '-o', lw=5, color='black') ax4.set_ylabel('mep')
'ds.a.prepro', 'ds.a.img_pattern', 'sample_slicer']) dataframe = filter_dataframe(dataframe, **{'ds.a.task':['CONN']}) fig, axes = pl.subplots(1,1, figsize=(5,5)) ax = axes k = 0 target = "0back+2back" df = filter_dataframe(dataframe, targets=[target]) df = filter_dataframe(df, **{"ds.a.task":['CONN']}) df_avg = apply_function(df, attr='score_score', keys=['k'], fx= np.mean) df_std = apply_function(df, attr='score_score', keys=['k'], fx= np.std) avg = df_avg['score_score'].values[10::8] std = (df_std['score_score'].values / np.sqrt(25))[10::8] kk = df_avg['k'].values[10::8] ax.plot(kk, avg, color='steelblue') ax.fill_between(kk, avg+std, avg-std, color='steelblue', alpha=0.3) ax.set_ylim(.45, .75) ax.set_ylabel('Classification accuracy', fontsize=14) ax.set_xlabel('k', fontsize=14) ax.set_title('Multiband classification accuracy', fontsize=14) ax.hlines(0.5, -2, np.max(df['k'].values)+2, colors='darkgray', linestyles='dashed') fig.savefig("/home/robbis/Dropbox/PhD/experiments/jaakko/Submission_2020/multiband.svg",
path = '/media/robbis/Seagate_Pt1/data/Viviana2018/meg/derivatives/' dataframe = get_results_bids(path=path, pipeline="movie+revenge", field_list=['sample_slicer'], result_keys=['features'] ) tasks = np.unique(dataframe['targets'].values) bands = np.unique(dataframe['band'].values) df_diagonal = apply_function(dataframe, keys=['value', 'band', 'targets'], attr='score_score', fx=lambda x: np.diagonal(np.mean(np.dstack(x), axis=2))) df_exploded = df_diagonal.explode('score_score') targets = len(np.unique(df_diagonal['targets'])) bands = len(np.unique(df_diagonal['band'])) tasks = targets * bands n_frames = len(df_exploded) / tasks frames = np.hstack([np.arange(int(n_frames))+1 for _ in range(tasks)]) df_exploded['value'] = np.int_(df_exploded['value']) df_exploded['frame'] = frames #pl.figure() grid = sns.FacetGrid(df_exploded, row="targets", hue="band", aspect=3, height=2) grid.map(pl.axhline, y=0.5, ls=":", c=".5")
from pyitab.utils.matrix import copy_matrix, array_to_matrix from pyitab.results.bids import filter_dataframe, get_results_bids from pyitab.results.dataframe import apply_function, query_rows from pyitab.plot.connectivity import plot_connectivity_circle_edited, plot_connectivity_lines path = "/scratch/work/guidotr1/data/derivatives" path = "/media/robbis/Seagate_Pt1/data/working_memory/derivatives/aalto/derivatives/" full_df = get_results_bids(path, pipeline="triton+old", field_list=[ 'estimator__fsel', 'ds.a.task', 'ds.a.prepro', 'ds.a.img_pattern', 'sample_slicer' ]) dataframe_accuracy = apply_function(full_df, keys=['targets', 'band', 'ds.a.task', 'k'], attr='score_score', fx=lambda x: np.mean(x)) dataframe_std = apply_function(full_df, keys=['targets', 'band', 'ds.a.task', 'k'], attr='score_score', fx=lambda x: np.std(x)) max_k = query_rows(dataframe_accuracy, keys=['targets', 'band', 'ds.a.task'], attr='score_score', fx=np.max) ######################################################################### from pyitab.utils.atlas import get_atlas_info from sklearn.preprocessing import minmax_scale from pyitab.plot.connectivity import plot_connectivity_lines
path = "/home/robbis/mount/permut1/sherlock/bids/derivatives/analysis-rsa/" #dataframe = get_results_bids(path, get_function=get_values_rsa, analysis=['rsa'], sub=['marcer']) info = get_aal_coords( "/media/robbis/DATA/fmri/templates_AAL/atlas90_mniafni_3mm.nii.gz") subjects = ['marcer', 'matsim', 'simpas'] for s in subjects: dataframe = get_results_bids(path, get_function=get_values_rsa, analysis=['rsa'], subjects=[s]) df = filter_dataframe(dataframe, sub=[s]) df = apply_function(df, keys=['sub'], attr='score_score', fx=lambda x: np.vstack(x)) X = df['score_score'].values[0] dist = 1 - pdist(X, metric='correlation') threshold = dist.mean() + 1.5 * dist.std() plotting.plot_connectome(squareform(dist), info[1:], node_size=squareform(dist).sum(0), edge_threshold=0.6, edge_cmap=pl.cm.magma_r) del dataframe, df, X, dist
ax1.set_yticks(np.arange(1, 1+np.max(d['Subject']))) ax1.set_yticklabels(np.unique(d['Subject'])) ax1.set_ylabel("Subject") ax1.set_title("Click distribution") ax2 = pl.subplot(grid[3:4, 0], sharex=ax) sns.distplot(d['VAS sec'], ax=ax2, bins=100, color='#205d89') ax2.set_xlim(-200, 200+np.max(d['VAS_Corr sec'])) ax1.set_xlim(-200, 200+np.max(d['VAS_Corr sec'])) #pl.savefig(os.path.join(path, experiment+"_clickdistribution.%s" % (filetype)), dpi=250) #pl.close() ### Distribution of errors ### drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean) dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean) color_rel = '#205d89' color_abs = '#cf784b' # Scatter ax3 = pl.subplot(grid[:4, 1]) ax3.scatter(d['VAS_Corr sec'], d['DIST sec'], alpha=0.2, marker='.', color=color_rel) ax3.plot(drel_mean['VAS_Corr sec'], drel_mean["DIST sec"], '-o', c=color_rel, label="Relative") ax3.scatter(d['VAS_Corr sec'], d['DIST(ABS) sec'], alpha=0.2, marker='.', color=color_abs) ax3.plot(dabs_mean['VAS_Corr sec'], dabs_mean["DIST(ABS) sec"], '-o', c=color_abs, label="Absolute") ax3.hlines(0, 0, np.max(d['VAS_Corr sec']), color='black', linestyles="dashed") legend = pl.legend(loc=3)
pipeline=[pipeline]) dataframe['y_attr'] = [ ast.literal_eval(x)['task'][0] for x in dataframe['kwargs__y_attr'].values ] if 'perm' in dataframe.keys(): dataframe['perm'] = np.int_(dataframe['perm']) dataframe = filter_dataframe(dataframe, perm=np.arange(500) + 1) dataframes.append(dataframe) kwargs = dict() for key in ['mse', 'neg_mean_squared_error', 'corr', 'r2']: perm = apply_function(dataframes[1], keys=['nodes_1', 'nodes_2'], attr=key, fx=lambda x: np.vstack(x).mean(1)) perm['p095'] = [np.sort(null_dist)[-25] for null_dist in perm[key].values] perm['p099'] = [np.sort(null_dist)[-5] for null_dist in perm[key].values] perm['p005'] = [np.sort(null_dist)[25] for null_dist in perm[key].values] perm['p001'] = [np.sort(null_dist)[5] for null_dist in perm[key].values] data = apply_function(dataframes[0], keys=['nodes_1', 'nodes_2', 'y_attr', 'band'], attr=key, fx=lambda x: np.mean(x)) data[key] = [np.mean(v) for v in data[key].values] distribution = np.sort(data[key].values)