Example #1
0
def main(arglist):

    # Parse the command line
    args = parse_args(arglist)

    # Load the lyman data
    subjects = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
    contrasts = exp["contrast_names"]
    z_thresh = exp["cluster_zthresh"]

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Group-level
    # ===========

    if args.level == "group":
        temp_base = op.join(project["analysis_dir"], exp_name, args.output,
                            args.regspace, "{contrast}")
        if args.regspace == "fsaverage":
            sig_thresh = -np.log10(stats.norm.sf(z_thresh))
            sig_thresh = np.round(sig_thresh) * 10
            corr_sign = exp["surf_corr_sign"]
            sig_name = "cache.th%d.%s.sig.masked.mgh" % (sig_thresh, corr_sign)
            stat_temp = op.join(temp_base, "{hemi}/osgm", sig_name)
            mask_temp = op.join(temp_base, "{hemi}/mask.mgh")
            png_temp = op.join(temp_base, "{hemi}/osgm/zstat_threshold.png")
        else:
            stat_temp = op.join(temp_base, "{hemi}.zstat1_threshold.mgz")
            mask_temp = op.join(temp_base, "{hemi}.group_mask.mgz")
            png_temp = op.join(temp_base, "zstat1_threshold_surf.png")
            corr_sign = "pos"

        contrast_loop("fsaverage", contrasts, stat_temp, mask_temp, png_temp,
                      args, z_thresh, corr_sign)

    # Subject-level
    # =============

    elif args.level == "subject":
        temp_base = op.join(project["analysis_dir"], exp_name, "{subj}",
                            "ffx", args.regspace, "smoothed/{contrast}")
        mask_temp = op.join(temp_base, "{hemi}.mask.mgz")
        stat_temp = op.join(temp_base, "{hemi}.zstat1.mgz")
        png_temp = op.join(temp_base, "zstat1_surf.png")

        for subj in subjects:
            contrast_loop(subj, contrasts, stat_temp, mask_temp, png_temp,
                          args, 1.96, "abs")
Example #2
0
def main(arglist):

    # Parse the command line
    args = parse_args(arglist)

    # Load the lyman data
    subjects = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
    contrasts = exp["contrast_names"]
    z_thresh = exp["cluster_zthresh"]

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Group-level
    # ===========

    if args.level == "group":
        temp_base = op.join(project["analysis_dir"], exp_name, args.output,
                            args.regspace, "{contrast}")
        if args.regspace == "fsaverage":
            sig_thresh = -np.log10(stats.norm.sf(z_thresh))
            sig_thresh = np.round(sig_thresh) * 10
            corr_sign = exp["surf_corr_sign"]
            sig_name = "cache.th%d.%s.sig.masked.mgh" % (sig_thresh, corr_sign)
            stat_temp = op.join(temp_base, "{hemi}/osgm", sig_name)
            mask_temp = op.join(temp_base, "{hemi}/mask.mgh")
            png_temp = op.join(temp_base, "{hemi}/osgm/zstat_threshold.png")
        else:
            stat_temp = op.join(temp_base, "{hemi}.zstat1_threshold.mgz")
            mask_temp = op.join(temp_base, "{hemi}.group_mask.mgz")
            png_temp = op.join(temp_base, "zstat1_threshold_surf.png")
            corr_sign = "pos"

        contrast_loop("fsaverage", contrasts, stat_temp, mask_temp, png_temp,
                      args, z_thresh, corr_sign)

    # Subject-level
    # =============

    elif args.level == "subject":
        temp_base = op.join(project["analysis_dir"], exp_name, "{subj}", "ffx",
                            args.regspace, "smoothed/{contrast}")
        mask_temp = op.join(temp_base, "{hemi}.mask.mgz")
        stat_temp = op.join(temp_base, "{hemi}.zstat1.mgz")
        png_temp = op.join(temp_base, "zstat1_surf.png")

        for subj in subjects:
            contrast_loop(subj, contrasts, stat_temp, mask_temp, png_temp,
                          args, 1.96, "abs")
Example #3
0
def experiment_info(experiment):

    parts = experiment.split("-")
    try:
        exp_base, altmodel = parts
    except ValueError:
        exp_base, = parts
        altmodel = None

    info = lyman.gather_experiment_info(exp_base, altmodel)
    return info
Example #4
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    group_info = pd.read_csv(group_filepath)

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [
        group_info[group_info.subid == x].reset_index().at[0, 'group']
        for x in subject_list
    ]
    group_vector = [1 if sub == "group1" else 2
                    for sub in groups]  # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[
        contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals
    ]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Example #5
0
def extract_subject(subj, problem, roi_name, mask_name=None, frames=None,
                    collapse=None, confounds=None, upsample=None,
                    smoothed=False, exp_name=None, event_names=None):
    """Build decoding dataset from predictable lyman outputs.

    This function will make use of the LYMAN_DIR environment variable
    to access information about where the relevant data live, so that
    must be set properly.

    This function caches its results and, on repeated calls,
    hashes the arguments and checks those against the hash value
    associated with the stored data. The hashing process considers
    the timestamp on the relevant data files, but not the data itself.

    Parameters
    ----------
    subj : string
        subject id
    problem : string
        problem name corresponding to design file name
    roi_name : string
        ROI name associated with data
    mask_name : string, optional
        name of ROI mask that can be found in data hierachy,
        uses roi_name if absent
    frames : int or sequence of ints, optional
        extract frames relative to event onsets or at onsets if None
    collapse : int, slice, or (subj x frames | frames) array
        if int, returns that element in first dimension
        if slice, take mean over the slice (both relative to
        frames, not to the actual onsets)
        if array, take weighted average of each frame (possibly
        with different weights by subject) otherwise return each frame
    confounds : string or list of strings
        column name(s) in schedule datafame to be regressed out of the
        data matrix during extraction
    upsample : int
        upsample the raw timeseries by this factor using cubic spline
        interpolation
    smoothed : bool
        whether to use the spatially smoothed timeseries data
    exp_name : string, optional
        lyman experiment name where timecourse data can be found
        in analysis hierarchy (uses default if None)
    event_names : list of strings
        list of condition names to use, otherwise uses sorted unique
        values in the condition field of the event schedule

    Returns
    -------
    data : dictionary
        dictionary with X, y, and runs entries, along with metadata

    """
    project = gather_project_info()
    exp = gather_experiment_info(exp_name)
    if exp_name is None:
        exp_name = project["default_exp"]

    if mask_name is None:
        mask_name = roi_name

    if smoothed:
        roi_name += "_smoothed"

    # Find the relevant disk location for the dataaset file
    ds_file = op.join(project["analysis_dir"],
                      exp_name, subj, "mvpa",
                      problem, roi_name, "dataset.npz")

    # Make sure the target location exists
    try:
        os.makedirs(op.dirname(ds_file))
    except OSError:
        pass

    # Get paths to the relevant files
    mask_file = op.join(project["data_dir"], subj, "masks",
                        "%s.nii.gz" % mask_name)
    design_file = op.join(project["data_dir"], subj, "design",
                          "%s.csv" % problem)
    smoothing = "smoothed" if smoothed else "unsmoothed"
    ts_dir = op.join(project["analysis_dir"], exp_name, subj,
                     "reg", "epi", smoothing)
    n_runs = len(glob(op.join(ts_dir, "run_*")))
    ts_files = [op.join(ts_dir, "run_%d/timeseries_xfm.nii.gz" % r_i)
                for r_i in range(1, n_runs + 1)]

    # Get the hash value for this dataset
    ds_hash = hashlib.sha1()
    ds_hash.update(mask_name)
    ds_hash.update(str(op.getmtime(mask_file)))
    ds_hash.update(str(op.getmtime(design_file)))
    for ts_file in ts_files:
        ds_hash.update(str(op.getmtime(ts_file)))
    ds_hash.update(np.asarray(frames).data)
    ds_hash.update(str(confounds))
    ds_hash.update(str(upsample))
    ds_hash.update(str(event_names))
    ds_hash = ds_hash.hexdigest()

    # If the file exists and the hash matches, convert to a dict and return
    if op.exists(ds_file):
        with np.load(ds_file) as ds_obj:
            if ds_hash == str(ds_obj["hash"]):
                dataset = dict(ds_obj.items())
                for k, v in dataset.items():
                    if v.dtype.kind == "S":
                        dataset[k] = str(v)
                # Possibly perform temporal compression
                _temporal_compression(collapse, dataset)
                return dataset

    # Otherwise, initialize outputs
    X, y, runs, use = [], [], [], []

    # Load mask file
    mask_data = nib.load(mask_file).get_data().astype(bool)

    # Load the event information
    sched = pd.read_csv(design_file)

    # Get a list of event names
    if event_names is None:
        event_names = sorted(sched.condition.unique())
    else:
        sched = sched[sched.condition.isin(event_names)]

    # Make each runs' dataset
    for r_i, sched_r in sched.groupby("run"):
        ts_data = nib.load(ts_files[int(r_i - 1)]).get_data()

        # Use the basic extractor function
        X_i, y_i, use_i = extract_dataset(sched_r, ts_data,
                                          mask_data, exp["TR"],
                                          frames, upsample, event_names)

        # Just add to list
        X.append(X_i)
        y.append(y_i)
        use.append(use_i)

    # Find the voxels that are good in every run and make a final mask
    good_features = np.all(use, axis=0)
    mask_data[mask_data] = good_features

    # Stick the list items together for final dataset
    if frames is not None and len(frames) > 1:
        X = np.concatenate(X, axis=1)
    else:
        X = np.concatenate(X, axis=0)
    y = np.concatenate(y)
    runs = sched.run

    # Apply the feature mask
    X = np.atleast_3d(X)[:, :, good_features].squeeze()

    # Regress the confound vector out from the data matrix
    if confounds is not None:
        X = np.atleast_3d(X)
        confounds = np.asarray(sched[confounds])
        confounds = stats.zscore(confounds.reshape(X.shape[1], -1))
        denom = confounds / np.dot(confounds.T, confounds)
        for X_i in X:
            X_i -= np.dot(X_i.T, confounds).T * denom
        X = X.squeeze()

    # Save to disk and return
    dataset = dict(X=X, y=y, runs=runs, roi_name=roi_name, subj=subj,
                   event_names=event_names, problem=problem, frames=frames,
                   confounds=confounds, upsample=upsample, smoothed=smoothed,
                   hash=ds_hash, mask=mask_data, mask_name=mask_name)
    np.savez(ds_file, **dataset)

    # Possibly perform temporal compression
    _temporal_compression(collapse, dataset)

    return dataset
Example #6
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/reg/epi/",
                                  ffxsmooth, "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Example #7
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
                                                exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain_template"] = exp["whole_brain_template"]

    preproc_source = Node(SelectFiles(preproc_templates,
                                      base_directory=project["data_dir"]),
                          "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source,
                                        preproc_source, preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source,
                                          preproc_sink, preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
        )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source,
                                      model_source, model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source,
                                        model_sink, model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(flow_name,
                                                        space,
                                                        regtype,
                                                        warp_method,
                                                        args.residual)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
                         )

    if regtype == "model":
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(dict(
            copes=op.join(reg_base, "cope*.nii.gz"),
            varcopes=op.join(reg_base, "varcope*.nii.gz"),
            sumsquares=op.join(reg_base, "ss*.nii.gz"),
                                  ))
    else:
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = reg_templates.keys()

    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)

    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Define the registration data source node
    reg_source = Node(SelectFiles(reg_templates,
                                  force_lists=reg_lists,
                                  base_directory=analysis_dir),
                      "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source,
                                    reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source,
                                    reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"])

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
                         )
    ffx_lists = ffx_templates.keys()

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        bg = "{subject_id}/preproc/run_1/mean_func.nii.gz"
        reg = "{subject_id}/preproc/run_1/func2anat_tkreg.dat"
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(SelectFiles(ffx_templates,
                                  force_lists=ffx_lists,
                                  base_directory=analysis_dir),
                      "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source,
                                    ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source,
                                      ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([
        ("_smoothing_", ""), ("flamestats", "")
                                          ])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Example #8
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base,
                             "{subject_id}/reg/epi/", ffxsmooth,
                             "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Example #9
0
import matplotlib as mpl
mpl.use("Agg")

import nipype
from nipype import Node, SelectFiles, DataSink, IdentityInterface

import lyman
import lyman.workflows as wf
from lyman import tools

project = lyman.gather_project_info()

# Set roots of output storage
data_dir = project["data_dir"]
exp_name = 'ser_8mm'
exp = lyman.gather_experiment_info('ser_8mm', None)
subj_source = tools.make_subject_source(['fd_104'])
analysis_dir = op.join(project["analysis_dir"], exp_name)
working_dir = op.join(project["working_dir"], exp_name)
nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

# Is this a model or timeseries registration?
regtype =  "model"
space = 'mni'
smoothing = 'unsmoothed'

# Are we registering across experiments?
cross_exp = False

subject_id = 'fd_104'
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
    #     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
    #     cov_col = 'mean_acc'

    # load in covariate for cort analysis
    cov = pd.read_csv(
        '/Volumes/group/awagner/sgagnon/AP/data/cortisol/cort_percentchange_testbaseline_controlassay.csv'
    )
    cov_col = 'cort_controlassay'

    cov = cov.loc[cov.subid.isin(
        subject_list)]  # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] -
                    cov[cov_col].mean()) / cov[cov_col].std()  # zscore
    print cov.describe()

    cov_reg = [
        cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list
    ]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list],
                      z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Example #11
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
        exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain"] = exp["whole_brain_template"]
    if exp["fieldmap_template"]:
        preproc_templates["fieldmap"] = exp["fieldmap_template"]

    preproc_source = Node(
        SelectFiles(preproc_templates, base_directory=project["data_dir"]),
        "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source, preproc_source,
                                        preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source, preproc_sink,
                                          preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        nuisance_file=op.join(model_base, "nuisance_variables.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
    )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                 "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source, model_source,
                                      model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source, model_sink,
                                        model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Are we registering across experiments?
    cross_exp = args.regexp is not None

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(
        flow_name, space, regtype, warp_method, args.residual, cross_exp)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
    )

    if regtype == "model":
        # First-level model summary statistic images
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(
            dict(
                copes=op.join(reg_base, "cope*.nii.gz"),
                varcopes=op.join(reg_base, "varcope*.nii.gz"),
                sumsquares=op.join(reg_base, "ss*.nii.gz"),
            ))
    else:
        # Timeseries images
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = list(reg_templates.keys())

    # Native anatomy to group anatomy affine matrix and warpfield
    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)
    else:
        if args.regexp is None:
            tkreg_base = analysis_dir
        else:
            tkreg_base = op.join(project["analysis_dir"], args.regexp)
        reg_templates["tkreg_rigid"] = op.join(tkreg_base, "{subject_id}",
                                               "preproc", "run_1",
                                               "func2anat_tkreg.dat")

    # Rigid (6dof) functional-to-anatomical matrices
    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Rigid matrix from anatomy to target experiment space
    if args.regexp is not None:
        targ_analysis_dir = op.join(project["analysis_dir"], args.regexp)
        reg_templates["first_rigid"] = op.join(targ_analysis_dir,
                                               "{subject_id}", "preproc",
                                               "run_1", "func2anat_flirt.mat")

    # Define the registration data source node
    reg_source = Node(
        SelectFiles(reg_templates,
                    force_lists=reg_lists,
                    base_directory=analysis_dir), "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source, reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source, reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"],
                                                        exp_info=exp)

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
    )
    ffx_lists = list(ffx_templates.keys())

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        reg_dir = "{subject_id}/reg/epi/{smoothing}/run_1"
        bg = op.join(reg_dir, "mean_func_xfm.nii.gz")
        reg = op.join(reg_dir, "func2anat_tkreg.dat")
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(
        SelectFiles(ffx_templates,
                    force_lists=ffx_lists,
                    base_directory=analysis_dir), "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source, ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source, ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([("_smoothing_", ""),
                                          ("flamestats", "")])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(working_dir)
Example #12
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])


	### Set up group info
	## Regular design
    group_info = pd.read_csv(group_filepath)
    
	# Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [group_info[group_info.subid == x].reset_index().at[0,'group'] for x in subject_list]
    group_vector = [1 if sub == "group1" else 2 for sub in groups] # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                             "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Example #13
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])


	### Set up group info
	## Regular design
	# Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
#     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
#     cov_col = 'mean_acc'

    # load in covariate (subids and value for each subject (in cov_col))
    cov = pd.read_csv(cov_filepath)
    cov = cov.loc[cov.subid.isin(subject_list)] # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] - cov[cov_col].mean()) / cov[cov_col].std() # zscore
    print cov.describe()

    cov_reg = [cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list], z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]


    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                             "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])