コード例 #1
0
def pchange_net():
    sns.set(style="whitegrid", font_scale=2)
    fig, axs = plt.subplots(2, 2, figsize=(36, 20), sharey="row", sharex="col",
                            gridspec_kw={'width_ratios': [7 / 19, 12 / 19]})
    for col, tpt in enumerate([tpt_sh, tpt_cole]):
        for row, (lib, label, name) in enumerate(lib_details):
            df = lib.gen_long_data(tpt) \
                .and_filter(subject=lib.find_shared_subjects(tpt, task_order())) \
                .groupby(["task", "subject", "network", "region"]).mean().reset_index() \
                .groupby(["subject", "network", "region"]).apply(calc_percentage_change).reset_index() \
                .groupby(["task", "network", "region"]).mean().reset_index()

            ax = axs[row, col]
            pt.RainCloud(data=df, hue="task", y="pchange", x="network", alpha=.65, hue_order=task_order(False),
                         order=tpt.net_order, ax=ax, offset=0.1, dodge=True, bw=.2, width_viol=.7,
                         pointplot=True, palette=task_colors())
            ax.set(xlabel="", ylabel=f"{label} Change From Rest (%)" if col == 0 else "")
            ax.set_xticklabels(tpt.net_labels, rotation=90)
            ax.get_legend().remove()
    fig.subplots_adjust(wspace=0.1, hspace=0.1)
    legend_handles = []
    for task, color in zip(task_order(False), task_colors()):
        legend_handles.append(Patch(facecolor=color, edgecolor=color, label=task))
    lgn = fig.legend(handles=legend_handles, loc=2, ncol=3, mode="expand",
                     bbox_to_anchor=(0.12, -0.08, 0.785, 1))
    print(savefig(fig, "pchange.net", low=False, extra_artists=(lgn,)))
コード例 #2
0
def pchange_cp():
    for col, (tpt, h_name) in enumerate(template_meta_combination):
        for row, (lib, label, name) in enumerate(lib_details):
            df = lib.gen_long_data(tpt) \
                .and_filter(subject=lib.find_shared_subjects(tpt, task_order())) \
                .groupby(["task", "subject", "network", "region"]).mean().reset_index() \
                .groupby(["subject", "network", "region"]).apply(calc_percentage_change).reset_index() \
                .add_net_meta(tpt.net_hierarchy(h_name)) \
                .groupby(["task", "region", "net_meta"]).mean().reset_index()

            model = ols(
                'pchange ~ C(task) + C(net_meta) + C(task):C(net_meta)',
                data=df).fit()
            print(f"\n\n####### {tpt}-{h_name}-{name} #######")
            model.summary()
            robust = None if model.diagn["omnipv"] > 0.05 else "hc3"
            aov_table = anova_table(
                sm.stats.anova_lm(model, typ=2, robust=robust))
            print(aov_table.to_string())
            df["comb"] = pd.Series(df.task + "/" + df.net_meta, df.index, str)
            result = df.pairwise_tukey(dv="pchange",
                                       between="comb",
                                       effsize="cohen")
            left = result.A.str.split("/", expand=True)
            right = result.B.str.split("/", expand=True)
            for task in task_order(False):
                print(result[(left[0] == task)
                             & (right[0] == task)].to_string())
コード例 #3
0
def pchange_cp():
    sns.set(style="whitegrid", font_scale=2)
    legend_handles = []
    for task, color in zip(task_order(False), task_colors()):
        legend_handles.append(Patch(facecolor=color, edgecolor=color, label=task))
    fig, axs = plt.subplots(2, 3, figsize=(36, 24), sharex="col", sharey="row")
    for col, (tpt, h_name) in enumerate(template_meta_combination):
        for row, (lib, label, name) in enumerate(lib_details):
            df = lib.gen_long_data(tpt) \
                .and_filter(subject=lib.find_shared_subjects(tpt, task_order())) \
                .groupby(["task", "subject", "network", "region"]).mean().reset_index() \
                .groupby(["subject", "network", "region"]).apply(calc_percentage_change).reset_index() \
                .add_net_meta(tpt.net_hierarchy(h_name)) \
                .groupby(["task", "region", "net_meta"]).mean().reset_index()
            ax = axs[row, col]
            pt.RainCloud(data=df, hue="task", y="pchange", x="net_meta", alpha=.65, hue_order=task_order(False),
                         order=h_name.keys, ax=ax, offset=0.1, dodge=True, bw=.2, width_viol=.7,
                         pointplot=True, palette=task_colors())
            ax.set(xlabel="", ylabel=f"{label} Change From Rest (%)" if col == 0 else "")
            ax.get_legend().remove()
            ax.set_xticklabels(h_name.labels if row == 1 else [])
            if row == 0:
                ax.legend(handles=legend_handles, loc=2)
    fig.subplots_adjust(wspace=0.1, hspace=0.1)
    print(savefig(fig, "pchange.cp", low=False))
コード例 #4
0
def alpha():
    sns.set(style="whitegrid", font_scale=2)
    legend_handles = []
    for task, color in zip(task_order(), task_colors(True)):
        legend_handles.append(Patch(facecolor=color, edgecolor=color, label=task))
    fig, axs = plt.subplots(2, 3, figsize=(36, 24), sharex="col", sharey="row")
    for col, (tpt, h_name) in enumerate(template_meta_combination):
        for row, (lib, label) in enumerate(zip([acwb, aczb], ["ACW-50", "ACW-0"])):
            df = lib.gen_long_data(tpt) \
                .groupby(["task", "region", "network"]).mean().reset_index() \
                .convert_column(metric=lambda x: x * 1000) \
                .add_net_meta(tpt.net_hierarchy(h_name)).drop("network", 1) \
                .groupby(["task", "net_meta"]).apply(remove_outliers, of="metric").reset_index(drop=True)

            ax = axs[row, col]
            pt.RainCloud(data=df, hue="task", y="metric", x="net_meta", alpha=.65, hue_order=task_order(),
                         order=h_name.keys, ax=ax, offset=0.1, dodge=True, bw=.2, width_viol=.7,
                         pointplot=True, palette=task_colors(True))
            ax.set(xlabel="", ylabel=f"{label} (ms)" if col == 0 else "")
            ax.get_legend().remove()
            ax.set_xticklabels(h_name.labels if row == 1 else [])
            if row == 0:
                ax.legend(handles=legend_handles)

    fig.subplots_adjust(wspace=0.1, hspace=0.1)
    print(savefig(fig, "alpha.cp", low=False))
コード例 #5
0
def rest_task_regression():
    for tpt in [tpt_cole, tpt_sh]:
        fig, axs = plt.subplots(2, 3, figsize=(16, 10), sharex="row", sharey="row")
        txt = None
        for li, (lib, name, lbl) in enumerate(lib_details):
            df = lib.gen_long_data(tpt) \
                .groupby(["task", "region", "network"]).mean().reset_index() \
                .convert_column(metric=lambda x: x * 1000)
            df_rest = df.and_filter(task="Rest")
            txt = []
            for ti, task in enumerate(task_order(False)):
                dft = pd.merge(df_rest, df.and_filter(task=task), on=["region", "network"])
                ax = axs[li, ti]
                sns.scatterplot(data=dft, x="metric_x", y=f"metric_y", hue="network", hue_order=tpt.net_order,
                                ax=ax, palette=tpt.net_colors)
                slope, intercept, r_value, _, _ = stats.linregress(dft.metric_x, dft.metric_y)
                sns.lineplot(dft.metric_x, slope * dft.metric_x + intercept, ax=ax, color='black')
                ax.text(0.3, 0.8, f"$r^2$={r_value ** 2:.2f}***", ha='center', va='center', transform=ax.transAxes)
                ax.set(xlabel=f"Rest {lbl}", ylabel="")
                ax.get_legend().remove()
                txt.append(ax.text(-0.15 if ti == 0 else -0.05, 0.5, f"{task} {lbl}",
                                   transform=ax.transAxes, rotation=90, va='center', ha='center'))
        legend_handles = []
        for net, color, label in zip(tpt.net_order, tpt.net_colors, tpt.net_labels(break_space=False)):
            legend_handles.append(Line2D([], [], color=color, marker='o', linestyle='None', markersize=5, label=label))
        n_col = 6 if len(tpt.net_order) == 12 else 7
        lgn = fig.legend(handles=legend_handles, loc=2, ncol=n_col, handletextpad=0.1, mode="expand",
                         bbox_to_anchor=(0.12, -0.04, 0.785, 1))
        print(savefig(fig, f"regression.{tpt}", extra_artists=txt + [lgn, ], low=False))
コード例 #6
0
def task_cp_reg():
    sns.set(style="whitegrid", font_scale=2)
    legend_handles = []
    for task, color in zip(task_order(False), task_colors()):
        legend_handles.append(Patch(facecolor=color, edgecolor=color, label=task))
    fig, axs = plt.subplots(2, 3, figsize=(36, 24), sharex="col", sharey="row")
    for col, (tpt, h_name) in enumerate(template_meta_combination):
        for row, (lib, label, name) in enumerate(lib_details):
            df = lib.gen_long_data(tpt).groupby(["task", "region", "network"]).mean().reset_index() \
                .and_filter(NOTtask="Rest") \
                .convert_column(metric=lambda x: x * 1000) \
                .add_topo(topo_at[tpt.key]) \
                .add_net_meta(tpt.net_hierarchy(h_name)) \
                .groupby("task").apply(
                lambda x: pd.merge(x, pd.Series(sm.OLS(x.metric, x.coord_y).fit().resid, x.index, float, "resid"),
                                   left_index=True, right_index=True)).reset_index(drop=True) \
                .groupby(["task", "net_meta"]).apply(remove_outliers, of="metric").reset_index(drop=True)

            ax = axs[row, col]
            pt.RainCloud(data=df, hue="task", y="resid", x="net_meta", alpha=.65, hue_order=task_order(False),
                         order=h_name.keys, ax=ax, offset=0.1, dodge=True, bw=.2, width_viol=.7,
                         pointplot=True, palette=task_colors())
            ax.set(xlabel="", ylabel=f"{name} Residual" if col == 0 else "")
            ax.get_legend().remove()
            ax.set_xticklabels(h_name.labels if row == 1 else [])
            if row == 0:
                ax.legend(handles=legend_handles, loc=2)
    fig.subplots_adjust(wspace=0.1, hspace=0.1)
    print(savefig(fig, "task.cp.res", low=False))
コード例 #7
0
def task_cp():
    for col, (tpt, h_name) in enumerate(template_meta_combination):
        for row, (lib, label, name) in enumerate(lib_details):
            df = lib.gen_long_data(tpt).groupby(["task", "region", "network"]).mean().reset_index() \
                .and_filter(NOTtask="Rest") \
                .convert_column(metric=lambda x: x * 1000) \
                .add_net_meta(tpt.net_hierarchy(h_name)) \
                .drop("network", 1)

            model = ols('metric ~ C(task) + C(net_meta) + C(task):C(net_meta)',
                        data=df).fit()
            print(f"\n\n####### {tpt}-{h_name}-{name} #######")
            model.summary()
            robust = None if model.diagn["omnipv"] > 0.05 else "hc3"
            aov_table = anova_table(
                sm.stats.anova_lm(model, typ=2, robust=robust))
            print(aov_table.to_string())
            df["comb"] = pd.Series(df.task + "/" + df.net_meta, df.index, str)
            result = df.pairwise_tukey(dv="metric",
                                       between="comb",
                                       effsize="cohen")
            left = result.A.str.split("/", expand=True)
            right = result.B.str.split("/", expand=True)
            for task in task_order(False):
                print(result[(left[0] == task)
                             & (right[0] == task)].to_string())
コード例 #8
0
def task_net():
    for col, tpt in enumerate(["sh2007", "cole"]):
        for row, (lib, label, name) in enumerate(lib_details):
            df = lib.gen_long_data(tpt) \
                .groupby(["task", "region", "network"]).mean().reset_index() \
                .and_filter(NOTtask="Rest") \
                .convert_column(metric=lambda x: x * 1000)

            model = ols('metric ~ C(task) + C(network) + C(task):C(network)',
                        data=df).fit()
            print(f"\n\n####### {tpt}-{name} #######")
            model.summary()
            robust = None if model.diagn["omnipv"] > 0.05 else "hc3"
            aov_table = anova_table(
                sm.stats.anova_lm(model, typ=2, robust=robust))
            print(aov_table.to_string())

            for task in task_order(False):
                dft = df.and_filter(task=task)
                print(f"\n\n####### {tpt}-{name}-{task} #######")
                model = ols('metric ~ + C(network)', data=dft).fit()
                model.summary()
                robust = None if model.diagn["omnipv"] > 0.05 else "hc3"
                aov_table = anova_table(
                    sm.stats.anova_lm(model, typ=2, robust=robust))
                print(aov_table.to_string())
コード例 #9
0
def run_script(tpt: TemplateMap):
    from config import RAW_DATA_ROOT_DIR
    for task in task_order():
        storage = MEGLocalStorage(RAW_DATA_ROOT_DIR, tpt.name, task, ANYTHING)
        files_dict = storage.get_all_by_scan()
        for scan_id, file_infos in files_dict.items():
            output_file = out_of(f"megs-hcp-alpha-{task}.acz.rois-{tpt.name}.scan-{scan_id}.npy", False)
            subj_ids, files = list(zip(*file_infos))
            output = np.asarray(Parallel(n_jobs=30)(delayed(do_a_file)(file) for file in files))
            np.save(output_file, (task, scan_id, subj_ids, output))
コード例 #10
0
def rest_task_regional_corr():
    for tpt in [tpt_cole, tpt_sh]:
        for lib, name, lbl in lib_details:
            df = lib.gen_long_data(tpt) \
                .groupby(["task", "subject", "region"]).mean().reset_index()
            df_rest = df.and_filter(task="Rest")
            maps = []
            for task in task_order(False):
                corr = pd.merge(df_rest, df.and_filter(task=task), on=["subject", "region"]) \
                    .sort_values("subject").reset_index(drop=True) \
                    .groupby("region").apply(
                    lambda x: pd.DataFrame(
                        np.asarray(stats.pearsonr(x.metric_x, x.metric_y)).reshape(1, -1), columns=["a", "p"])) \
                    .reset_index().drop("level_1", 1)
                rejected, _, _, _ = multipletests(corr.p, method="fdr_bh")
                corr["a_sig"] = corr.a.copy()
                corr.loc[~rejected, "a_sig"] = 0
                maps.append(corr[["region", "a"]].build_single_topo_map(tpt))
                maps.append(corr[["region", "a_sig"]].build_single_topo_map(tpt))

            topo, brain, series = combine_topo_map(maps)
            savemap(f"regcorr.{tpt}.{name}", topo, brain, series)
コード例 #11
0
def kruskal():
    for mes, mes_name in zip([acw, acz], ["acw", "acz"]):
        for task in task_order(True):
            for meta in ["net_meta", "network"]:
                df = mes.gen_long_data(tpt_name)\
                    .and_filter(task=task)\
                    .add_net_meta(get_net("pmc", tpt_name))\
                    .groupby(["subject", meta]).mean().reset_index()\
                    .convert_column(metric=lambda x: x * 1000)

                # feather.write_feather(df, f"r/{mes_name}.{task}.{meta}.feather")

                model = ols(f'metric ~ C({meta})', data=df)
                result = model.fit()
                result.summary()
                robust = None if result.diagn["omnipv"] > 0.05 else "hc3"
                aov_table = anova_table(
                    sm.stats.anova_lm(result, typ=2, robust=robust))
                print(aov_table.to_string())
                if meta == "net_meta":
                    mc = MultiComparison(df.metric, df[meta])
                    mc_results = mc.tukeyhsd()
                    print(mc_results)
コード例 #12
0
def gen_long_data(tpt: TemplateMap):
    return generate_long_data(find_files, prepare_file_content(True), tpt, task_order(True))
コード例 #13
0
import hcp_acf_zero as acz
import hcp_acf_window_bp as acwb
import hcp_acf_zero_bp as aczb
import matplotlib.pyplot as plt
import ptitprince as pt
import seaborn as sns
import pandas as pd
from neuro_helper.dataframe import remove_outliers, get_outlier_bounds, normalize, calc_percentage_change
from neuro_helper.hcp.meg.generic import task_order
from neuro_helper.generic import combine_topo_map
import numpy as np
from neuro_helper.plot import *
from config import *


tasks = task_order()
lib_details = [
    (acw, "acw", "ACW-50"),
    (acz, "acz", "ACW-0")
]
font_scale = 1.1
sns.set(font_scale=font_scale, style="whitegrid")
PC_colors_tuple = paired_colors(True)


def map_regions_pc_sh2007():
    tpt = tpt_sh
    img, (lbl, brain) = cifti.read(tpt.file_full_path)
    regions = lbl.label.item()
    hierarch = tpt.net_hierarchy(HierarchyName.PERIPHERY_CORE)
    cp_out = {}