Exemple #1
0
def main(arglist):

    # Process cmdline args
    parser = tools.parser
    parser.description = help
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    args = tools.parser.parse_args(arglist)
    plugin, plugin_args = lyman.determine_engine(args)

    # Load up the lyman info
    subject_list = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()

    # Create the workflow object
    method = project["normalization"]
    wf_func = getattr(anatwarp, "create_{}_workflow".format(method))
    normalize = wf_func(project["data_dir"], subject_list)
    normalize.base_dir = os.path.join(project["working_dir"], "warp")
    
    # Put crashdumps somewhere out of the way
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Execute the workflow
    lyman.run_workflow(normalize, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(normalize.base_dir)
def get_subject_order(exp):

    subjects = lyman.determine_subjects([exp + "_subjects"])
    accs = pd.Series(index=subjects, dtype=np.float)
    for subj in subjects:
        fname = "decoding_analysis/{}_{}_ifs.pkz".format(subj, exp)
        accs.ix[subj] = moss.load_pkl(fname).acc
    return list(accs.sort(inplace=False, ascending=False).index)
Exemple #3
0
def main(arglist):

    # Parse the command line
    args = parse_args(arglist)

    # Load the lyman data
    subjects = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
    contrasts = exp["contrast_names"]
    z_thresh = exp["cluster_zthresh"]

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Group-level
    # ===========

    if args.level == "group":
        temp_base = op.join(project["analysis_dir"], exp_name, args.output,
                            args.regspace, "{contrast}")
        if args.regspace == "fsaverage":
            sig_thresh = -np.log10(stats.norm.sf(z_thresh))
            sig_thresh = np.round(sig_thresh) * 10
            corr_sign = exp["surf_corr_sign"]
            sig_name = "cache.th%d.%s.sig.masked.mgh" % (sig_thresh, corr_sign)
            stat_temp = op.join(temp_base, "{hemi}/osgm", sig_name)
            mask_temp = op.join(temp_base, "{hemi}/mask.mgh")
            png_temp = op.join(temp_base, "{hemi}/osgm/zstat_threshold.png")
        else:
            stat_temp = op.join(temp_base, "{hemi}.zstat1_threshold.mgz")
            mask_temp = op.join(temp_base, "{hemi}.group_mask.mgz")
            png_temp = op.join(temp_base, "zstat1_threshold_surf.png")
            corr_sign = "pos"

        contrast_loop("fsaverage", contrasts, stat_temp, mask_temp, png_temp,
                      args, z_thresh, corr_sign)

    # Subject-level
    # =============

    elif args.level == "subject":
        temp_base = op.join(project["analysis_dir"], exp_name, "{subj}", "ffx",
                            args.regspace, "smoothed/{contrast}")
        mask_temp = op.join(temp_base, "{hemi}.mask.mgz")
        stat_temp = op.join(temp_base, "{hemi}.zstat1.mgz")
        png_temp = op.join(temp_base, "zstat1_surf.png")

        for subj in subjects:
            contrast_loop(subj, contrasts, stat_temp, mask_temp, png_temp,
                          args, 1.96, "abs")
def main(arglist):

    # Parse the command line
    args = parse_args(arglist)

    # Load the lyman data
    subjects = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
    contrasts = exp["contrast_names"]
    z_thresh = exp["cluster_zthresh"]

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Group-level
    # ===========

    if args.level == "group":
        temp_base = op.join(project["analysis_dir"], exp_name, args.output,
                            args.regspace, "{contrast}")
        if args.regspace == "fsaverage":
            sig_thresh = -np.log10(stats.norm.sf(z_thresh))
            sig_thresh = np.round(sig_thresh) * 10
            corr_sign = exp["surf_corr_sign"]
            sig_name = "cache.th%d.%s.sig.masked.mgh" % (sig_thresh, corr_sign)
            stat_temp = op.join(temp_base, "{hemi}/osgm", sig_name)
            mask_temp = op.join(temp_base, "{hemi}/mask.mgh")
            png_temp = op.join(temp_base, "{hemi}/osgm/zstat_threshold.png")
        else:
            stat_temp = op.join(temp_base, "{hemi}.zstat1_threshold.mgz")
            mask_temp = op.join(temp_base, "{hemi}.group_mask.mgz")
            png_temp = op.join(temp_base, "zstat1_threshold_surf.png")
            corr_sign = "pos"

        contrast_loop("fsaverage", contrasts, stat_temp, mask_temp, png_temp,
                      args, z_thresh, corr_sign)

    # Subject-level
    # =============

    elif args.level == "subject":
        temp_base = op.join(project["analysis_dir"], exp_name, "{subj}",
                            "ffx", args.regspace, "smoothed/{contrast}")
        mask_temp = op.join(temp_base, "{hemi}.mask.mgz")
        stat_temp = op.join(temp_base, "{hemi}.zstat1.mgz")
        png_temp = op.join(temp_base, "zstat1_surf.png")

        for subj in subjects:
            contrast_loop(subj, contrasts, stat_temp, mask_temp, png_temp,
                          args, 1.96, "abs")
Exemple #5
0
def main(arglist):

    args = parse_args(arglist)

    if args.subjects is None:
        args.subjects = lyman.determine_subjects()

    for subj in args.subjects:

        print "Running subject", subj

        searchlight_dir = op.join(analysis_dir, "dksort", subj,
                                  "mvpa/searchlight")

        if not op.exists(searchlight_dir):
            os.mkdir(searchlight_dir)

        vol_fname = op.join(searchlight_dir, "dimension_dksort_pfc.nii.gz")

        if "fit" in args.do and (not op.exists(vol_fname) or args.overwrite):

            print " Doing searchlight"

            mask_img, X, y, runs = load_data(subj)

            s = SearchLight(mask_img,
                            radius=10,
                            n_jobs=10,
                            estimator=LogisticRegression(),
                            cv=LeaveOneLabelOut(runs))
            s.fit(X, y)
            out_img = nib.Nifti1Image(s.scores_, s.mask_img.get_affine())
            out_img.to_filename(vol_fname)

        surf_fnames = [
            op.join(searchlight_dir, "lh.dimension_dksort_pfc.mgz"),
            op.join(searchlight_dir, "rh.dimension_dksort_pfc.mgz")
        ]

        if "surf" in args.do and (not all(map(op.exists, surf_fnames))
                                  or args.overwrite):

            print " Doing surfreg"

            reg_fname = op.join(analysis_dir, "dksort", subj,
                                "preproc/run_1/func2anat_tkreg.dat")

            for i, hemi in enumerate(["lh", "rh"]):

                cmdline = [
                    "mri_vol2surf", "--mov", vol_fname, "--reg", reg_fname,
                    "--trgsubject", "fsaverage", "--projfrac-avg", "0", "1",
                    ".1", "--surf-fwhm", "5", "--hemi", hemi, "--o",
                    surf_fnames[i]
                ]
                sp.check_output(" ".join(cmdline), shell=True)
def main(arglist):

    args = parse_args(arglist)

    if args.subjects is None:
        args.subjects = lyman.determine_subjects()

    for subj in args.subjects:

        print "Running subject", subj

        searchlight_dir = op.join(analysis_dir, "dksort", subj,
                                  "mvpa/searchlight")

        if not op.exists(searchlight_dir):
            os.mkdir(searchlight_dir)

        vol_fname = op.join(searchlight_dir, "dimension_dksort_pfc.nii.gz")

        if "fit" in args.do and (not op.exists(vol_fname) or args.overwrite):

            print " Doing searchlight" 

            mask_img, X, y, runs = load_data(subj)

            s = SearchLight(mask_img, radius=10, n_jobs=10,
                            estimator=LogisticRegression(),
                            cv=LeaveOneLabelOut(runs))
            s.fit(X, y)
            out_img = nib.Nifti1Image(s.scores_, s.mask_img.get_affine())
            out_img.to_filename(vol_fname)

        surf_fnames = [op.join(searchlight_dir, "lh.dimension_dksort_pfc.mgz"),
                       op.join(searchlight_dir, "rh.dimension_dksort_pfc.mgz")]

        if "surf" in args.do and (not all(map(op.exists, surf_fnames))
                                  or args.overwrite):

            print " Doing surfreg"

            reg_fname = op.join(analysis_dir, "dksort", subj,
                                "preproc/run_1/func2anat_tkreg.dat")

            for i, hemi in enumerate(["lh", "rh"]):

                cmdline = ["mri_vol2surf",
                           "--mov", vol_fname,
                           "--reg", reg_fname,
                           "--trgsubject", "fsaverage",
                           "--projfrac-avg", "0", "1", ".1",
                           "--surf-fwhm", "5",
                           "--hemi", hemi,
                           "--o", surf_fnames[i]]
                sp.check_output(" ".join(cmdline), shell=True)
Exemple #7
0
def main(arglist):

    # Find the subjects for this execution
    args = parse_args(arglist)
    subjects = lyman.determine_subjects(args.subjects)

    # Find the project details
    proj = lyman.gather_project_info()
    data_dir = proj["data_dir"]

    # Make images for each subject
    for subj in subjects:

        # Make sure the output directory exists
        out_dir = op.join(data_dir, subj, "snapshots")
        if not op.exists(out_dir):
            os.mkdir(out_dir)

        # Do each chunk of reporting
        surface_images(out_dir, subj)
        curvature_normalization(data_dir, subj)
        volume_images(data_dir, subj)
Exemple #8
0
def main(arglist):

    # Process cmdline args
    args = tools.parser.parse_args(arglist)
    plugin, plugin_args = lyman.determine_engine(args)

    # Load up the lyman info
    subject_list = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()

    # Create the workflow object
    normalize = anatwarp.create_anatwarp_workflow(
                    project["data_dir"], subject_list)
    normalize.base_dir = project["working_dir"]
    normalize.config["execution"]["crashdump_dir"] = "/tmp"

    # Execute the workflow
    lyman.run_workflow(normalize, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(normalize.base_dir)
def main(arglist):

    # Find the subjects for this execution
    args = parse_args(arglist)
    subjects = lyman.determine_subjects(args.subjects)

    # Find the project details
    proj = lyman.gather_project_info()
    data_dir = proj["data_dir"]

    # Make images for each subject
    for subj in subjects:

        # Make sure the output directory exists
        out_dir = op.join(data_dir, subj, "snapshots")
        if not op.exists(out_dir):
            os.mkdir(out_dir)

        # Do each chunk of reporting
        inflated_surfaces(out_dir, subj, args.close)
        curvature_normalization(data_dir, subj, args.close)
        volume_images(data_dir, subj)
Exemple #10
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
                                                exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain_template"] = exp["whole_brain_template"]

    preproc_source = Node(SelectFiles(preproc_templates,
                                      base_directory=project["data_dir"]),
                          "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source,
                                        preproc_source, preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source,
                                          preproc_sink, preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
        )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source,
                                      model_source, model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source,
                                        model_sink, model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(flow_name,
                                                        space,
                                                        regtype,
                                                        warp_method,
                                                        args.residual)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
                         )

    if regtype == "model":
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(dict(
            copes=op.join(reg_base, "cope*.nii.gz"),
            varcopes=op.join(reg_base, "varcope*.nii.gz"),
            sumsquares=op.join(reg_base, "ss*.nii.gz"),
                                  ))
    else:
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = reg_templates.keys()

    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)

    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Define the registration data source node
    reg_source = Node(SelectFiles(reg_templates,
                                  force_lists=reg_lists,
                                  base_directory=analysis_dir),
                      "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source,
                                    reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source,
                                    reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"])

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
                         )
    ffx_lists = ffx_templates.keys()

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        bg = "{subject_id}/preproc/run_1/mean_func.nii.gz"
        reg = "{subject_id}/preproc/run_1/func2anat_tkreg.dat"
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(SelectFiles(ffx_templates,
                                  force_lists=ffx_lists,
                                  base_directory=analysis_dir),
                      "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source,
                                    ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source,
                                      ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([
        ("_smoothing_", ""), ("flamestats", "")
                                          ])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
import numpy as np
import pandas as pd
from scipy import stats, signal
import statsmodels.api as sm
import nibabel as nib
import seaborn as sns
import matplotlib.pyplot as plt

from surfer import Brain

import moss
from moss import glm
from moss.nipy import VolumeImg
import lyman

subjects = lyman.determine_subjects()
project = lyman.gather_project_info()
data_dir = project["data_dir"]
analysis_dir = project["analysis_dir"]
exp = project["default_exp"]
design_temp = op.join(data_dir, "{}/design/{}.csv")


class SlicePlotter(object):
    """Object to generate single slice images or mosaics of volume results."""
    def __init__(self,
                 model,
                 contrast,
                 corrected=True,
                 stat_thresh=2.3,
                 stat_range=(2, 5),
Exemple #12
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
        exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain"] = exp["whole_brain_template"]
    if exp["fieldmap_template"]:
        preproc_templates["fieldmap"] = exp["fieldmap_template"]

    preproc_source = Node(
        SelectFiles(preproc_templates, base_directory=project["data_dir"]),
        "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source, preproc_source,
                                        preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source, preproc_sink,
                                          preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        nuisance_file=op.join(model_base, "nuisance_variables.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
    )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                 "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source, model_source,
                                      model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source, model_sink,
                                        model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Are we registering across experiments?
    cross_exp = args.regexp is not None

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(
        flow_name, space, regtype, warp_method, args.residual, cross_exp)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
    )

    if regtype == "model":
        # First-level model summary statistic images
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(
            dict(
                copes=op.join(reg_base, "cope*.nii.gz"),
                varcopes=op.join(reg_base, "varcope*.nii.gz"),
                sumsquares=op.join(reg_base, "ss*.nii.gz"),
            ))
    else:
        # Timeseries images
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = list(reg_templates.keys())

    # Native anatomy to group anatomy affine matrix and warpfield
    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)
    else:
        if args.regexp is None:
            tkreg_base = analysis_dir
        else:
            tkreg_base = op.join(project["analysis_dir"], args.regexp)
        reg_templates["tkreg_rigid"] = op.join(tkreg_base, "{subject_id}",
                                               "preproc", "run_1",
                                               "func2anat_tkreg.dat")

    # Rigid (6dof) functional-to-anatomical matrices
    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Rigid matrix from anatomy to target experiment space
    if args.regexp is not None:
        targ_analysis_dir = op.join(project["analysis_dir"], args.regexp)
        reg_templates["first_rigid"] = op.join(targ_analysis_dir,
                                               "{subject_id}", "preproc",
                                               "run_1", "func2anat_flirt.mat")

    # Define the registration data source node
    reg_source = Node(
        SelectFiles(reg_templates,
                    force_lists=reg_lists,
                    base_directory=analysis_dir), "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source, reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source, reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"],
                                                        exp_info=exp)

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
    )
    ffx_lists = list(ffx_templates.keys())

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        reg_dir = "{subject_id}/reg/epi/{smoothing}/run_1"
        bg = op.join(reg_dir, "mean_func_xfm.nii.gz")
        reg = op.join(reg_dir, "func2anat_tkreg.dat")
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(
        SelectFiles(ffx_templates,
                    force_lists=ffx_lists,
                    base_directory=analysis_dir), "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source, ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source, ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([("_smoothing_", ""),
                                          ("flamestats", "")])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(working_dir)
Exemple #13
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])


	### Set up group info
	## Regular design
    group_info = pd.read_csv(group_filepath)
    
	# Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [group_info[group_info.subid == x].reset_index().at[0,'group'] for x in subject_list]
    group_vector = [1 if sub == "group1" else 2 for sub in groups] # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                             "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Exemple #14
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])


	### Set up group info
	## Regular design
	# Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
#     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
#     cov_col = 'mean_acc'

    # load in covariate (subids and value for each subject (in cov_col))
    cov = pd.read_csv(cov_filepath)
    cov = cov.loc[cov.subid.isin(subject_list)] # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] - cov[cov_col].mean()) / cov[cov_col].std() # zscore
    print cov.describe()

    cov_reg = [cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list], z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]


    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                             "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
import numpy as np
import pandas as pd

import moss
import lyman

if __name__ == "__main__":

    subjects = dict(dots=lyman.determine_subjects(["dots_subjects"]),
                    sticks=lyman.determine_subjects(["sticks_subjects"]))

    # ----- Combine decoding results

    rois = ["ifs", "mfc"]

    decoding_data = {}
    for exp, subj_list in subjects.iteritems():

        # Set up the dataframe for this experiment
        idx = pd.MultiIndex.from_product([subj_list, rois],
                                         names=["subj", "roi"])
        exp_df = pd.DataFrame(index=idx,
                              columns=["acc", "chance", "pctile"],
                              dtype=np.float)

        # Load the data for each subject/roi
        for subj, roi in idx:
            fname = "decoding_analysis/{}_{}_{}.pkz".format(subj, exp, roi)
            res = moss.load_pkl(fname)
            exp_df.ix[subj, roi] = [res.acc, res.chance, res.acc_pctile]
        decoding_data[exp] = exp_df
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
    #     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
    #     cov_col = 'mean_acc'

    # load in covariate for cort analysis
    cov = pd.read_csv(
        '/Volumes/group/awagner/sgagnon/AP/data/cortisol/cort_percentchange_testbaseline_controlassay.csv'
    )
    cov_col = 'cort_controlassay'

    cov = cov.loc[cov.subid.isin(
        subject_list)]  # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] -
                    cov[cov_col].mean()) / cov[cov_col].std()  # zscore
    print cov.describe()

    cov_reg = [
        cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list
    ]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list],
                      z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Exemple #17
0
    def __init__(self,
                 subject_list,
                 experiment,
                 roi_name,
                 orig_type,
                 force_serial=False,
                 debug=False):

        # Set up basic info
        self.subject_list = lyman.determine_subjects(subject_list)
        project = lyman.gather_project_info()
        self.experiment = experiment
        self.roi_name = roi_name
        self.orig_type = orig_type
        self.debug = debug
        if debug:
            print "Setting up for %d subjects" % len(subject_list)
            print "Experiment name:", experiment
            print "ROI name:", roi_name

        # Set up directories
        if project["default_exp"] is not None and experiment is None:
            experiment = project["default_exp"]
        self.experiment = experiment
        self.data_dir = project["data_dir"]
        self.anal_dir = project["analysis_dir"]

        # Set up temporary output
        self.temp_dir = mkdtemp()

        # Set the SUBJECTS_DIR variable for Freesurfer
        os.environ["SUBJECTS_DIR"] = self.data_dir

        # Set up parallel execution
        self.parallel = False
        if force_serial:
            self.map = map
        else:
            try:
                rc = Client()
                self.dv = rc[:]
                self.map = self.dv.map_async
                # Push SUBJECTS_DIR to engines
                self.dv.execute("import os")
                self.dv["data_dir"] = self.data_dir
                self.dv.execute("os.environ['SUBJECTS_DIR'] = data_dir")
                self.parallel = True

            except (TimeoutError, IOError):
                self.map = map
        if debug:
            print "Set to run in %s" % ("parallel"
                                        if self.parallel else "serial")

        # Set up some persistent templates
        self.epi_template = op.join(self.anal_dir, self.experiment, "%(subj)s",
                                    "preproc/run_1/mean_func.nii.gz")
        self.fov_template = op.join(self.anal_dir, self.experiment, "%(subj)s",
                                    "preproc/run_1/functional_mask.nii.gz")
        self.reg_template = op.join(self.anal_dir, self.experiment, "%(subj)s",
                                    "preproc/run_1/func2anat_tkreg.dat")
        self.out_template = op.join(self.data_dir, "%(subj)s",
                                    "masks/%s.nii.gz" % self.roi_name)
        if debug:
            print "EPI template: %s" % self.epi_template
            print "Reg template: %s" % self.reg_template
            print "Output template: %s" % self.out_template

        # Ensure the output directory will exist
        for subj in self.subject_list:
            mask_dir = op.join(self.data_dir, subj, "masks")
            if not op.exists(mask_dir):
                os.mkdir(mask_dir)
Exemple #18
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base,
                             "{subject_id}/reg/epi/", ffxsmooth,
                             "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
import numpy as np
import pandas as pd
from scipy import stats, signal
import statsmodels.api as sm
import nibabel as nib
import seaborn as sns
import matplotlib.pyplot as plt

from surfer import Brain

import moss
from moss import glm
from moss.nipy import VolumeImg
import lyman

subjects = lyman.determine_subjects()
project = lyman.gather_project_info()
data_dir = project["data_dir"]
analysis_dir = project["analysis_dir"]
exp = project["default_exp"]
design_temp = op.join(data_dir, "{}/design/{}.csv")


class SlicePlotter(object):
    """Object to generate single slice images or mosaics of volume results."""
    def __init__(self, model, contrast, corrected=True,
                 stat_thresh=2.3, stat_range=(2, 5),
                 stat_cmap="OrRd_r", stat_alpha=.85,
                 sharp=False, label_slices=True):
        """Initialize the object but do not plot anything yet."""
        anat_img = nib.load(op.join(data_dir, "average_anat.nii.gz"))
Exemple #20
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/reg/epi/",
                                  ffxsmooth, "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Exemple #21
0
    def __init__(self, subject_list, experiment, roi_name, orig_type, force_serial=False, debug=False):

        # Set up basic info
        self.subject_list = lyman.determine_subjects(subject_list)
        project = lyman.gather_project_info()
        self.experiment = experiment
        self.roi_name = roi_name
        self.orig_type = orig_type
        self.debug = debug
        if debug:
            print "Setting up for %d subjects" % len(subject_list)
            print "Experiment name:", experiment
            print "ROI name:", roi_name

        # Set up directories
        if project["default_exp"] is not None and experiment is None:
            experiment = project["default_exp"]
        self.experiment = experiment
        self.data_dir = project["data_dir"]
        self.anal_dir = project["analysis_dir"]

        # Set up temporary output
        self.temp_dir = mkdtemp()

        # Set the SUBJECTS_DIR variable for Freesurfer
        os.environ["SUBJECTS_DIR"] = self.data_dir

        # Set up parallel execution
        self.parallel = False
        if force_serial:
            self.map = map
        else:
            try:
                rc = Client()
                self.dv = rc[:]
                self.map = self.dv.map_async
                # Push SUBJECTS_DIR to engines
                self.dv.execute("import os")
                self.dv["data_dir"] = self.data_dir
                self.dv.execute("os.environ['SUBJECTS_DIR'] = data_dir")
                self.parallel = True

            except (TimeoutError, IOError):
                self.map = map
        if debug:
            print "Set to run in %s" % ("parallel" if self.parallel else "serial")

        # Set up some persistent templates
        self.epi_template = op.join(self.anal_dir, self.experiment, "%(subj)s", "preproc/run_1/mean_func.nii.gz")
        self.fov_template = op.join(self.anal_dir, self.experiment, "%(subj)s", "preproc/run_1/functional_mask.nii.gz")
        self.reg_template = op.join(self.anal_dir, self.experiment, "%(subj)s", "preproc/run_1/func2anat_tkreg.dat")
        self.out_template = op.join(self.data_dir, "%(subj)s", "masks/%s.nii.gz" % self.roi_name)
        if debug:
            print "EPI template: %s" % self.epi_template
            print "Reg template: %s" % self.reg_template
            print "Output template: %s" % self.out_template

        # Ensure the output directory will exist
        for subj in self.subject_list:
            mask_dir = op.join(self.data_dir, subj, "masks")
            if not op.exists(mask_dir):
                os.mkdir(mask_dir)
Exemple #22
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    group_info = pd.read_csv(group_filepath)

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [
        group_info[group_info.subid == x].reset_index().at[0, 'group']
        for x in subject_list
    ]
    group_vector = [1 if sub == "group1" else 2
                    for sub in groups]  # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[
        contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals
    ]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Exemple #23
0
                   redirect, send_file)

import lyman

project = lyman.gather_project_info()
default_exp = project["default_exp"]

parser = argparse.ArgumentParser()
parser.add_argument("-subjects", nargs="*")
parser.add_argument("-experiment", default=default_exp)
parser.add_argument("-port", type=int, default=5000)
parser.add_argument("-external", action="store_true")
parser.add_argument("-debug", action="store_true")
args = parser.parse_args(sys.argv[1:])

subjects = lyman.determine_subjects(args.subjects)
exp_base = args.experiment

app = Flask(__name__)


def basic_info(experiment=exp_base):
    """Basic information needed before any report options are set."""
    experiments = list_experiments()
    exp = experiment_info(experiment)

    subjects_size = min(len(subjects), 10)

    runs = range(1, exp["n_runs"] + 1)

    contrasts = exp["contrast_names"]