pipeline = 'dexterity+prediction+network' path = "/media/robbis/DATA/meg/viviana-hcp/derivatives/pipeline-%s/" % ( pipeline) dataframe = get_results_bids(path, field_list=loaded_keywords, pipeline=[pipeline], scores=scores) for score in scores: score_key = 'score_' + score for clf in np.unique(dataframe['estimator__clf']): for t in np.unique(dataframe['task']): df = filter_dataframe(dataframe, estimator__clf=[clf], fx=[score], task=[t]) df = apply_function( df, keys=['nodes_1', 'nodes_2', 'band', 'estimator__clf', 'fx'], attr=score_key, fx=lambda x: np.vstack(x).mean()) grid = sns.FacetGrid(df, col="band") grid.map(plot_heatmap, "nodes_1", "nodes_2", score_key) fname = "/home/robbis/Dropbox/PhD/experiments/blp-hcp-viviana/figure-regression-%s.png" % ( t) grid.savefig(fname, dpi=150)
from pyitab.results import get_results_bids, filter_dataframe, apply_function import seaborn as sns import h5py path = "/media/robbis/DATA/meg/viviana-hcp/derivatives/pipeline-blp-dexterity-trial" dataframe = get_results_bids(path, field_list=['sample_slicer','estimator__clf', 'feature_slicer'], pipeline=['raw'], ) dataframe['networks'] = dataframe['nodes_1']+'+'+dataframe['nodes_2'] average_df = apply_function(dataframe, keys=['nodes_1', 'nodes_2', 'band', 'task', 'estimator__clf'], attr='score_accuracy', fx=lambda x:np.vstack(x).mean()) grid = sns.FacetGrid(dataframe, col="band", row="task", palette="tab20c", height=1.5) grid.map(sns.barplot, "networks", 'score_accuracy') def plot_heatmap(nodes1, nodes2, accuracy, **kwargs): df = dict(n1=nodes1, n2=nodes2, a=accuracy) df = pd.DataFrame(df) pdf = df.pivot("n1", "n2", "a") nz = np.nonzero(np.isnan(pdf.values)) pdf.values[nz] = pdf.values[nz[::-1]] sns.heatmap(pdf, annot=True, cmap="RdBu", fmt=".2f", center=.5, vmax=.8)
markersize=10, markeredgecolor=None, palette='Dark2') axes = grid.axes score_key = 'score_' + score for clf in ['Lasso(alpha=0.5)']: score_df = filter_dataframe(dataframe, fx=[score], estimator__clf=[clf]) average_df_score = apply_function( score_df, keys=['band', 'task', 'target_transformer__attr'], attr=score_key, fx=lambda x: np.nanmean(np.vstack(x))) perm_df = filter_dataframe(dataframe_perm, fx=[score], estimator__clf=[clf]) average_df = apply_function(perm_df, keys=[ 'band', 'task', 'perm', 'estimator__clf', 'target_transformer__attr' ], attr=score_key, fx=lambda x: np.nanmean(np.vstack(x)))