Beispiel #1
0
def avg_plot(**kwargs):
    from scipy.stats import ttest_1samp
    data = kwargs['data']

    n_frames = np.max(data['frame'].values)
    frame_list = np.arange(n_frames) + 1

    df_avg = df_fx_over_keys(dataframe=data,
                             keys=['frame'],
                             attr='score_score',
                             fx=np.mean)
    test_values = []
    for i in frame_list:
        df_frame = filter_dataframe(data, frame=[i])
        t, p = ttest_1samp(df_frame['score_score'].values, 0.5)
        test_values.append([t, p])

    test_values = np.array(test_values)

    sign_values = np.logical_and(test_values[:, 1] < 0.01 / 49.,
                                 test_values[:, 0] > 0)

    pl.plot(frame_list, df_avg['score_score'], 'o-', color='k')
    """
Beispiel #2
0
from pyitab.results.bids import get_searchlight_results_bids

dataframe = get_searchlight_results_bids(
    '/media/robbis/DATA/fmri/EGG/derivatives/', pipeline=['egg+delay'])

# Plain
filtered = filter_dataframe(dataframe, task=['plain'], filetype=['mean'])

command = "3dttest++ -singletonA 0.5 -setB task %s -prefix %s"
setB = ""
avg = []
for i, df in filtered.iterrows():

    #setB += "sub%02d %s'[0]' " % (i+1, df['filename'].values[0])
    img = ni.load(df['filename'])
    avg.append(img.data)

save_map(path + "average-plain.nii.gz", np.mean(avg, axis=0), img.affine)

######################################################################

from pyitab.results.base import filter_dataframe
from pyitab.utils.image import save_map
import nibabel as ni

dataframe = get_searchlight_results_bids(
    '/media/robbis/DATA/fmri/EGG/derivatives/', pipeline=['egg+delay'])
dataframe = get_searchlight_results_bids(
    '/media/robbis/DATA/fmri/EGG/derivatives/', pipeline=['egg+below+chance'])

path = '/media/robbis/DATA/fmri/EGG/derivatives/'
data = get_results(path,  
                   pipeline=pipeline, 
                   field_list=['sample_slicer', 
                               'n_clusters', 
                               'algorithm'],
                               
                    #filter={'algorithm':['KMeans']}
                    )

conditions = {

    'runs': ['1', '2', '3'],
    'algorithm': ['KMeans',
                  'AgglomerativeClustering']    
}

combinations = make_dict_product(**conditions)
df = data
metrics = []
best_k = []
for options in combinations:
    df_ = filter_dataframe(df, **options)
    options = {k: v[0] for k, v in options.items()}
    df_metric = calculate_metrics(df_, fixed_variables=options)
    df_metric = df_metric.sort_values('k')
    df_k = find_best_k(df_metric)
    metrics.append(df_metric)
    best_k.append(df_k)
df_metrics = pd.concat(metrics)
df_guess = pd.concat(best_k)
Beispiel #4
0
    'time': [0.5, 1, 1.5, 2.],
    #'num': [str(j) for j in np.arange(1, 480)],
    'snr': [10, 100, 1000, 10000],
    'algorithm': [
        'GaussianMixture', 'KMeans', 'AgglomerativeClustering',
        'SpectralClustering', 'MiniBatchKMeans'
    ],
    'subject': [str(i) for i in np.arange(1, 21)]
}

combinations = make_dict_product(**conditions)

metrics = []
best_k = []
for options in combinations:
    df_ = filter_dataframe(df, **options)
    options = {k: v[0] for k, v in options.items()}
    df_metric = calculate_metrics(df_, fixed_variables=options)
    df_metric = df_metric.sort_values('k')
    df_k = find_best_k(df_metric)
    metrics.append(df_metric)
    best_k.append(df_k)
df_metrics = pd.concat(metrics)
df_guess = pd.concat(best_k)

df_guess['hit'] = np.int_(df_guess['guess'].values == 6)
df_guess['abshit'] = np.abs(df_guess['guess'].values - 6)
df_great_mean = apply_function(df_guess,
                               keys=['name', 'algorithm'],
                               attr='abshit',
                               fx=np.mean)
        for field in ['design_info', 'MSE', 'theta', 'r_square']:
            fields[field] = mat[field]


        if 'stats_contrasts' in mat.keys():
            for contrast in mat['stats_contrasts'].dtype.names:
                p_value = mat['stats_contrasts'][contrast][()]['p_values'][()]
                fields[contrast] = p_value

        results.append(fields.copy())
        
    return results


import matplotlib.pyplot as pl
from scipy.spatial.distance import squareform

path = '/media/robbis/DATA/meg/viviana-hcp/derivatives/derivatives/'
dataframe = get_results_bids(path, get_function=get_values_lm, analysis=['linear+model'])


matrix = np.zeros_like(nanmask, dtype=np.float)
for band in np.unique(dataframe['band']):
    df = filter_dataframe(dataframe, band=[band])

    theta = df['theta'].values
    columns = df['design_info'].values



Beispiel #6
0
        a.save()
    except Exception as err:
        errs.append(err)

from pyitab.results.base import get_searchlight_results, filter_dataframe

path = "/media/robbis/DATA/fmri/ds105/derivatives/mvpa"
name = "ds105_balancer"
df = get_searchlight_results(path, name, field_list=['sample_slicer'])

for balancer in np.unique(df['balancer']):
    for ratio in np.unique(df['sampling_strategy'])[:-1]:
        command = "3dttest++ -setA %s_%s " % (balancer, str(ratio))
        prefix = " -paired -prefix %s/%s_%s" % (path, balancer, str(ratio))
        df_filtered = filter_dataframe(df,
                                       balancer=[balancer],
                                       sampling_strategy=[ratio])

        set_a = " "
        set_b = " -setB base "
        for i, sub in enumerate(np.unique(df_filtered['name'])):
            df_base = filter_dataframe(df,
                                       balancer=[balancer],
                                       sampling_strategy=[1.0],
                                       name=[sub])

            set_a += "sub%s %s'[0]' " % (sub, df_filtered['map'].values[i])
            set_b += "sub%s %s'[0]' " % (sub, df_base['map'].values[0])

        sys_command = command + set_a + set_b + prefix
        print(sys_command)
Beispiel #7
0
grid = sns.FacetGrid(dataframe,
                     row="attr",
                     col="mask",
                     hue="value",
                     height=3.5)
grid.map(pl.plot, "evidence", "score_score", marker="o")

grid.map_dataframe(avg_plot)

rushmore = ["#E1BD6D", "#EABE94", "#0B775E", "#35274A", "#F2300F"]
palette = sns.color_palette(rushmore)

fig = pl.figure(figsize=(15, 15))

for i, task in enumerate(tasks):
    df = filter_dataframe(dataframe, attr=[task], mask=[task])
    ax = fig.add_subplot(2, 2, i + 1)

    data = df['score_score'].values
    evidences = df['evidence'].values
    colors = df['value'].values
    for j, mask in enumerate(np.unique(colors)):
        df_roi = filter_dataframe(df, value=[mask])
        df_avg = df_fx_over_keys(df_roi,
                                 attr="score_score",
                                 keys=['evidence'],
                                 fx=np.mean)
        #ax.scatter(evidences+(0.02*j), data, alpha=0.8, c=np.array([palette[j]]))
        ax.plot(df_avg['evidence'].values,
                df_avg['score_score'].values,
                marker='o',
Beispiel #8
0
    try:
        a = AnalysisPipeline(conf, name="fingerprint").fit(ds, **kwargs)
        a.save(path="/home/carlos/fmri/carlo_mdm/0_results/")
    except Exception as err:
        errs.append([conf._default_options, err])
        capture_exception(err)


##### Results #####
from pyitab.results.base import filter_dataframe
from pyitab.results.bids import get_searchlight_results_bids
from scipy.stats import zscore


dataframe = get_searchlight_results_bids('/media/robbis/DATA/fmri/carlo_mdm/0_results/derivatives/')
df = filter_dataframe(dataframe, id=['uv5oyc6s'], filetype=['full'])

mask = ni.load("/media/robbis/DATA/fmri/carlo_mdm/1_single_ROIs/mask_intersection.nii.gz").get_data()






image_dict = {}
image_list = []
for i, attr in enumerate(df['attr'].values):
    img = ni.load(df['filename'].values[i])
    data = img.get_data()[np.bool_(mask)]

    zdata = zscore(data, axis=0)
Beispiel #9
0
grid.savefig(figname, dpi=250)


###### Matrix
import itertools
fig1, ax1 = pl.subplots(2, 4, sharex=True)

tasks = np.unique(dataframe['targets'].values)
bands = np.unique(dataframe['band'].values)

combinations = itertools.product(tasks, bands)
cmap = pl.cm.Paired

for j, (target, band) in enumerate(combinations):

    filtered_df = filter_dataframe(dataframe, targets=[target], band=[band])
    df_average = apply_function(filtered_df, 
                                keys=['value', 'band', 'targets'], 
                                attr='score_score', 
                                fx=lambda x: np.mean(np.dstack(x), axis=2))
    
    matrix = df_average['score_score'].values[0]

    p = ax1[0, j].plot(np.diagonal(matrix), 'o-', c=cmap.colors[j])

    palette_within = sns.light_palette(cmap.colors[j], 
                                       #reverse=True,
                                       n_colors=256,
                                       as_cmap=True)

    im = ax1[1, j].imshow(matrix, 
}

limits = {
    "VAS_INTERO": [5250, 20],
    "VAS60old_boundaries": [3600, 20],
    "VAS_W&G": [3600, 15]
}


for experiment in experiment_list[:3]:
    fig = pl.figure()
    grid = pl.GridSpec(8, 3, figure=fig)

    data = pd.read_excel(os.path.join(path, experiment+".xlsx"))

    d = filter_dataframe(data, corresp=[1], **{'IR.ACC':[1]})
    d = d.dropna()

    #### Click distribution ###
    value_click = np.int_(np.sign(d['DIST sec']) == 1)

    #grid = pl.GridSpec(4, 1, top=0.88, bottom=0.11, left=0.15,
    #                        right=0.85, hspace=0.2, wspace=0.2)
    
    ax1 = pl.subplot(grid[:3, 0])
    scatter = ax1.scatter(d['VAS sec'], d['Subject'], 
                        marker='|', 
                        c=value_click, 
                        cmap=palette_scatter)
    l = ax1.vlines(limits[experiment][0], 
                   0.5, limits[experiment][1]+0.5, 
Beispiel #11
0
import pandas as pd

data = pd.read_excel(
    '/home/robbis/Dropbox/PhD/experiments/memory_movie/VAS_Roberto.xlsx',
    sheet_name=None)

########## Statistical tests ###############
from pyitab.results.base import filter_dataframe
from scipy.stats import ttest_1samp

df = filter_dataframe(data, corresp=[1])

subjects = np.unique(df['Subject'])
movie_clips = np.unique(df['VAS_Corr']) / 6.

timewise_tests = []
for clip in np.unique(df['VAS_Corr']):
    df_ = filter_dataframe(df, VAS_Corr=[clip])

    values = df_['VAS sec'].values
    values = values[np.logical_not(np.isnan(values))]

    popmean = np.mean(df_['VAS_Corr sec'])

    t, p = ttest_1samp(values, popmean)
    timewise_tests.append({
        'clip': clip,
        't': t,
        'p': p,
        'mean': values.mean(),
        'attendend': popmean
dataframes = []
for pipeline in ['fingerprint+tavor+taskprediction', 'fingerprint+tavor+perm']:
    path = '/media/robbis/DATA/meg/viviana-hcp/derivatives/pipeline-' + pipeline
    dataframe = get_results_bids(path,
                                 field_list=loaded_keywords,
                                 get_function=get_values_tavor,
                                 pipeline=[pipeline])
    dataframe['y_attr'] = [
        ast.literal_eval(x)['task'][0]
        for x in dataframe['kwargs__y_attr'].values
    ]

    if 'perm' in dataframe.keys():
        dataframe['perm'] = np.int_(dataframe['perm'])
        dataframe = filter_dataframe(dataframe, perm=np.arange(500) + 1)

    dataframes.append(dataframe)

kwargs = dict()
for key in ['mse', 'neg_mean_squared_error', 'corr', 'r2']:
    perm = apply_function(dataframes[1],
                          keys=['nodes_1', 'nodes_2'],
                          attr=key,
                          fx=lambda x: np.vstack(x).mean(1))

    perm['p095'] = [np.sort(null_dist)[-25] for null_dist in perm[key].values]
    perm['p099'] = [np.sort(null_dist)[-5] for null_dist in perm[key].values]
    perm['p005'] = [np.sort(null_dist)[25] for null_dist in perm[key].values]
    perm['p001'] = [np.sort(null_dist)[5] for null_dist in perm[key].values]