Beispiel #1
0
def main(arglist):

    # Process cmdline args
    parser = tools.parser
    parser.description = help
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    args = tools.parser.parse_args(arglist)
    plugin, plugin_args = lyman.determine_engine(args)

    # Load up the lyman info
    subject_list = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()

    # Create the workflow object
    method = project["normalization"]
    wf_func = getattr(anatwarp, "create_{}_workflow".format(method))
    normalize = wf_func(project["data_dir"], subject_list)
    normalize.base_dir = os.path.join(project["working_dir"], "warp")
    
    # Put crashdumps somewhere out of the way
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Execute the workflow
    lyman.run_workflow(normalize, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(normalize.base_dir)
def main(arglist):

    # Parse the command line
    args = parse_args(arglist)

    # Load the lyman data
    subjects = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
    contrasts = exp["contrast_names"]
    z_thresh = exp["cluster_zthresh"]

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Group-level
    # ===========

    if args.level == "group":
        temp_base = op.join(project["analysis_dir"], exp_name, args.output,
                            args.regspace, "{contrast}")
        if args.regspace == "fsaverage":
            sig_thresh = -np.log10(stats.norm.sf(z_thresh))
            sig_thresh = np.round(sig_thresh) * 10
            corr_sign = exp["surf_corr_sign"]
            sig_name = "cache.th%d.%s.sig.masked.mgh" % (sig_thresh, corr_sign)
            stat_temp = op.join(temp_base, "{hemi}/osgm", sig_name)
            mask_temp = op.join(temp_base, "{hemi}/mask.mgh")
            png_temp = op.join(temp_base, "{hemi}/osgm/zstat_threshold.png")
        else:
            stat_temp = op.join(temp_base, "{hemi}.zstat1_threshold.mgz")
            mask_temp = op.join(temp_base, "{hemi}.group_mask.mgz")
            png_temp = op.join(temp_base, "zstat1_threshold_surf.png")
            corr_sign = "pos"

        contrast_loop("fsaverage", contrasts, stat_temp, mask_temp, png_temp,
                      args, z_thresh, corr_sign)

    # Subject-level
    # =============

    elif args.level == "subject":
        temp_base = op.join(project["analysis_dir"], exp_name, "{subj}",
                            "ffx", args.regspace, "smoothed/{contrast}")
        mask_temp = op.join(temp_base, "{hemi}.mask.mgz")
        stat_temp = op.join(temp_base, "{hemi}.zstat1.mgz")
        png_temp = op.join(temp_base, "zstat1_surf.png")

        for subj in subjects:
            contrast_loop(subj, contrasts, stat_temp, mask_temp, png_temp,
                          args, 1.96, "abs")
Beispiel #3
0
def main(arglist):

    # Parse the command line
    args = parse_args(arglist)

    # Load the lyman data
    subjects = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
    contrasts = exp["contrast_names"]
    z_thresh = exp["cluster_zthresh"]

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Group-level
    # ===========

    if args.level == "group":
        temp_base = op.join(project["analysis_dir"], exp_name, args.output,
                            args.regspace, "{contrast}")
        if args.regspace == "fsaverage":
            sig_thresh = -np.log10(stats.norm.sf(z_thresh))
            sig_thresh = np.round(sig_thresh) * 10
            corr_sign = exp["surf_corr_sign"]
            sig_name = "cache.th%d.%s.sig.masked.mgh" % (sig_thresh, corr_sign)
            stat_temp = op.join(temp_base, "{hemi}/osgm", sig_name)
            mask_temp = op.join(temp_base, "{hemi}/mask.mgh")
            png_temp = op.join(temp_base, "{hemi}/osgm/zstat_threshold.png")
        else:
            stat_temp = op.join(temp_base, "{hemi}.zstat1_threshold.mgz")
            mask_temp = op.join(temp_base, "{hemi}.group_mask.mgz")
            png_temp = op.join(temp_base, "zstat1_threshold_surf.png")
            corr_sign = "pos"

        contrast_loop("fsaverage", contrasts, stat_temp, mask_temp, png_temp,
                      args, z_thresh, corr_sign)

    # Subject-level
    # =============

    elif args.level == "subject":
        temp_base = op.join(project["analysis_dir"], exp_name, "{subj}", "ffx",
                            args.regspace, "smoothed/{contrast}")
        mask_temp = op.join(temp_base, "{hemi}.mask.mgz")
        stat_temp = op.join(temp_base, "{hemi}.zstat1.mgz")
        png_temp = op.join(temp_base, "zstat1_surf.png")

        for subj in subjects:
            contrast_loop(subj, contrasts, stat_temp, mask_temp, png_temp,
                          args, 1.96, "abs")
Beispiel #4
0
def _results_fname(dataset, model, split_pred, trialwise, logits, shuffle,
                   exp_name):
    """Get a path to where files storing decoding results will live."""
    project = gather_project_info()
    if exp_name is None:
        exp_name = project["default_exp"]

    roi_name = dataset["roi_name"]
    collapse = dataset["collapse"]
    problem = dataset["problem"]
    subj = dataset["subj"]

    res_path = op.join(project["analysis_dir"],
                       exp_name, subj, "mvpa",
                       problem, roi_name)

    try:
        model_str = "_".join([i[0] for i in model.steps])
    except AttributeError:
        model_str = model.__class__.__name__

    collapse_str, split_str, trial_str, logit_str, shuffle_str = ("", "", "",
                                                                  "", "")
    if collapse is not None:
        if isinstance(collapse, slice):
            collapse_str = "%s-%s" % (collapse.start, collapse.stop)
        elif isinstance(collapse, int):
            collapse_str = str(collapse)
        else:
            collapse_str = "weighted"
    if split_pred is not None:
        split_str = "split"
        if hasattr(split_pred, "name"):
            if split_pred.name is None:
                split_str = split_pred.name
    if trialwise:
        trial_str = "trialwise"
    if logits:
        logit_str = "logits"
    if shuffle:
        shuffle_str = "shuffle"

    res_fname = "_".join([model_str, collapse_str, split_str,
                          trial_str, logit_str, shuffle_str])
    res_fname = re.sub("_{2,}", "_", res_fname)
    res_fname = res_fname.strip("_") + ".npz"
    res_fname = op.join(res_path, res_fname)

    return res_fname
Beispiel #5
0
def main(arglist):

    args = parse_args(arglist)

    project = lyman.gather_project_info()
    data_dir = Path(project["data_dir"])
    anal_dir = Path(project["analysis_dir"])

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    # Start building the command line
    cmdline = ["freeview"]

    # Background hires anatomy
    anat_vol = data_dir / args.subject / "mri/brain.mgz"
    cmdline.extend(["-v", str(anat_vol)])

    # Load the mean functional volume with its registration matrix
    preproc_dir = (anal_dir / exp_name / args.subject / "preproc" /
                   ("run_" + args.run))
    epi_vol = preproc_dir / "mean_func.nii.gz"
    reg_mat = preproc_dir / "func2anat_tkreg.dat"
    epi_arg = (str(epi_vol) + ":reg=" + str(reg_mat) + ":sample=cubic")
    cmdline.append(epi_arg)

    # Load the white and pial surfaces
    cmdline.append("-f")
    meshes = ["white", "pial"]
    colors = ['#fac205', u'#c44240']
    for mesh, color in zip(meshes, colors):
        for hemi in ["lh", "rh"]:
            surf = data_dir / args.subject / "surf" / (hemi + "." + mesh)
            surf_arg = (str(surf) + ":edgecolor=" + color + ":hide_in_3d=true")
            cmdline.append(surf_arg)

    # Show the coronal view by default
    cmdline.extend(["-viewport", "coronal"])

    # Freeview spews a lot of garbage to the terminal; typcially silence that
    if not args.debug:
        cmdline.append("> /dev/null 2>&1")

    # Call out to freeview
    os.system(" ".join(cmdline))
Beispiel #6
0
def main(arglist):

    args = parse_args(arglist)

    project = lyman.gather_project_info()
    data_dir = Path(project["data_dir"])
    anal_dir = Path(project["analysis_dir"])

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Start building the command line
    cmdline = ["freeview"]

    # Background hires anatomy
    anat_vol = data_dir / args.subject / "mri/brain.mgz"
    cmdline.extend(["-v", str(anat_vol)])

    # Statistical overlay
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"
    stat_vol = (anal_dir / exp_name / args.subject / "ffx" / "epi" /
                smoothing / args.contrast / "zstat1.nii.gz")
    reg_file = (anal_dir / exp_name / args.subject / "reg" / "epi" /
                smoothing / "run_1" / "func2anat_tkreg.dat")
    stat_arg = (str(stat_vol) + ":reg=" + str(reg_file) + ":colormap=heat" +
                ":heatscale=1.64,2.3,4.2" + ":sample=trilinear")
    cmdline.extend(["-v", stat_arg])

    # Mesh overlay
    if args.mesh is not None:
        for hemi in ["lh", "rh"]:
            surf = data_dir / args.subject / "surf" / (hemi + "." + args.mesh)
            surf_arg = str(surf) + ":edgecolor=limegreen"
            cmdline.extend(["-f", surf_arg])

    # Call out to freeview
    os.system(" ".join(cmdline))
Beispiel #7
0
def main(arglist):

    # Find the subjects for this execution
    args = parse_args(arglist)
    subjects = lyman.determine_subjects(args.subjects)

    # Find the project details
    proj = lyman.gather_project_info()
    data_dir = proj["data_dir"]

    # Make images for each subject
    for subj in subjects:

        # Make sure the output directory exists
        out_dir = op.join(data_dir, subj, "snapshots")
        if not op.exists(out_dir):
            os.mkdir(out_dir)

        # Do each chunk of reporting
        surface_images(out_dir, subj)
        curvature_normalization(data_dir, subj)
        volume_images(data_dir, subj)
def main(arglist):

    # Find the subjects for this execution
    args = parse_args(arglist)
    subjects = lyman.determine_subjects(args.subjects)

    # Find the project details
    proj = lyman.gather_project_info()
    data_dir = proj["data_dir"]

    # Make images for each subject
    for subj in subjects:

        # Make sure the output directory exists
        out_dir = op.join(data_dir, subj, "snapshots")
        if not op.exists(out_dir):
            os.mkdir(out_dir)

        # Do each chunk of reporting
        inflated_surfaces(out_dir, subj, args.close)
        curvature_normalization(data_dir, subj, args.close)
        volume_images(data_dir, subj)
Beispiel #9
0
def main(arglist):

    # Process cmdline args
    args = tools.parser.parse_args(arglist)
    plugin, plugin_args = lyman.determine_engine(args)

    # Load up the lyman info
    subject_list = lyman.determine_subjects(args.subjects)
    project = lyman.gather_project_info()

    # Create the workflow object
    normalize = anatwarp.create_anatwarp_workflow(
                    project["data_dir"], subject_list)
    normalize.base_dir = project["working_dir"]
    normalize.config["execution"]["crashdump_dir"] = "/tmp"

    # Execute the workflow
    lyman.run_workflow(normalize, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(normalize.base_dir)
Beispiel #10
0
def calculate_evoked(data,
                     n_bins,
                     problem=None,
                     events=None,
                     tr=2,
                     calc_method="FIR",
                     offset=0,
                     upsample=1,
                     percent_change=True,
                     correct_baseline=True,
                     event_names=None):
    """Calcuate an evoked response for a list of datapoints.

    Parameters
    ----------
    data : sequence of n_run x n_tp arrays
        timeseries data
    n_bins : int
        number of bins for the peristumulus trace
    problem : string
        problem name for event file in data hierarchy
        overrides `events` if both are passed
    events : dataframe or list of dataframes
        one dataframe describing event information for each subj.
        must contain `onset`, `run`, and `condition` columns
        caution: `run` should be 1-based
    tr : int
        original time resolution of the data
    upsample : int
        factor to upsample the data with using cubic splines
    calc_method : string
        name of method on nitime EventRelatedAnalyzer object to
        calculate the evoked response.
    offset : float
        value to adjust onset times by
    percent_change : boolean
        if True, convert signal to percent change by run
    correct_baseline : boolean
        if True, adjust evoked trace to be 0 in first bin
    event_names : list of strings
        names of conditions, otherwise uses sorted unique
        values for the condition field in the event dataframe

    Returns
    -------

    evoked : squeezed n_obs x n_class x n_bins array
        evoked response, by observation and event type

    """
    project = gather_project_info()
    design_template = op.join(project["data_dir"], "%s",
                              "design/%s.csv" % problem)

    evoked = []
    for i, data_i in enumerate(data):

        # Can get event information in one of two ways
        if problem is not None:
            subj = data_i["subj"]
            events_i = pd.read_csv(design_template % subj)
        else:
            events_i = events[i]

        # Map from event names to integer index values
        if event_names is None:
            event_names = sorted(events_i.condition.unique())
        event_map = pd.Series(range(1,
                                    len(event_names) + 1),
                              index=event_names)

        # Create the timeseries of event occurances
        calc_tr = float(tr) / upsample

        event_list = []
        data_list = []
        for run, run_data in enumerate(data_i["data"], 1):

            # Possibly upsample the data
            if upsample != 1:
                time_points = len(run_data)
                x = np.linspace(0, time_points - 1, time_points)
                xx = np.linspace(0, time_points,
                                 time_points * upsample + 1)[:-upsample]
                interpolator = interp1d(x, run_data, "cubic", axis=0)
                run_data = interpolator(xx)

            run_events = events_i[events_i.run == run]
            run_events.onset += offset

            event_id = np.zeros(len(run_data), int)
            event_index = np.array(run_events.onset / calc_tr).astype(int)
            event_id[event_index] = run_events.condition.map(event_map)
            event_list.append(event_id)

            if percent_change:
                run_data = nit.utils.percent_change(run_data, ax=0)
            data_list.append(run_data)

        # Set up the Nitime objects
        event_info = np.concatenate(event_list)
        data = np.concatenate(data_list, axis=0)

        # Do the calculations
        calc_bins = n_bins * upsample
        if data.ndim == 1:
            evoked_data = _evoked_1d(data, event_info, calc_bins, calc_tr,
                                     calc_method, correct_baseline)
        elif data.ndim == 2:
            evoked_data = _evoked_2d(data, event_info, n_bins, calc_tr,
                                     calc_method, correct_baseline)
        evoked.append(evoked_data)

    return np.array(evoked).squeeze()
Beispiel #11
0
def extract_subject(subj, problem, roi_name, mask_name=None, frames=None,
                    collapse=None, confounds=None, upsample=None,
                    smoothed=False, exp_name=None, event_names=None):
    """Build decoding dataset from predictable lyman outputs.

    This function will make use of the LYMAN_DIR environment variable
    to access information about where the relevant data live, so that
    must be set properly.

    This function caches its results and, on repeated calls,
    hashes the arguments and checks those against the hash value
    associated with the stored data. The hashing process considers
    the timestamp on the relevant data files, but not the data itself.

    Parameters
    ----------
    subj : string
        subject id
    problem : string
        problem name corresponding to design file name
    roi_name : string
        ROI name associated with data
    mask_name : string, optional
        name of ROI mask that can be found in data hierachy,
        uses roi_name if absent
    frames : int or sequence of ints, optional
        extract frames relative to event onsets or at onsets if None
    collapse : int, slice, or (subj x frames | frames) array
        if int, returns that element in first dimension
        if slice, take mean over the slice (both relative to
        frames, not to the actual onsets)
        if array, take weighted average of each frame (possibly
        with different weights by subject) otherwise return each frame
    confounds : string or list of strings
        column name(s) in schedule datafame to be regressed out of the
        data matrix during extraction
    upsample : int
        upsample the raw timeseries by this factor using cubic spline
        interpolation
    smoothed : bool
        whether to use the spatially smoothed timeseries data
    exp_name : string, optional
        lyman experiment name where timecourse data can be found
        in analysis hierarchy (uses default if None)
    event_names : list of strings
        list of condition names to use, otherwise uses sorted unique
        values in the condition field of the event schedule

    Returns
    -------
    data : dictionary
        dictionary with X, y, and runs entries, along with metadata

    """
    project = gather_project_info()
    exp = gather_experiment_info(exp_name)
    if exp_name is None:
        exp_name = project["default_exp"]

    if mask_name is None:
        mask_name = roi_name

    if smoothed:
        roi_name += "_smoothed"

    # Find the relevant disk location for the dataaset file
    ds_file = op.join(project["analysis_dir"],
                      exp_name, subj, "mvpa",
                      problem, roi_name, "dataset.npz")

    # Make sure the target location exists
    try:
        os.makedirs(op.dirname(ds_file))
    except OSError:
        pass

    # Get paths to the relevant files
    mask_file = op.join(project["data_dir"], subj, "masks",
                        "%s.nii.gz" % mask_name)
    design_file = op.join(project["data_dir"], subj, "design",
                          "%s.csv" % problem)
    smoothing = "smoothed" if smoothed else "unsmoothed"
    ts_dir = op.join(project["analysis_dir"], exp_name, subj,
                     "reg", "epi", smoothing)
    n_runs = len(glob(op.join(ts_dir, "run_*")))
    ts_files = [op.join(ts_dir, "run_%d/timeseries_xfm.nii.gz" % r_i)
                for r_i in range(1, n_runs + 1)]

    # Get the hash value for this dataset
    ds_hash = hashlib.sha1()
    ds_hash.update(mask_name)
    ds_hash.update(str(op.getmtime(mask_file)))
    ds_hash.update(str(op.getmtime(design_file)))
    for ts_file in ts_files:
        ds_hash.update(str(op.getmtime(ts_file)))
    ds_hash.update(np.asarray(frames).data)
    ds_hash.update(str(confounds))
    ds_hash.update(str(upsample))
    ds_hash.update(str(event_names))
    ds_hash = ds_hash.hexdigest()

    # If the file exists and the hash matches, convert to a dict and return
    if op.exists(ds_file):
        with np.load(ds_file) as ds_obj:
            if ds_hash == str(ds_obj["hash"]):
                dataset = dict(ds_obj.items())
                for k, v in dataset.items():
                    if v.dtype.kind == "S":
                        dataset[k] = str(v)
                # Possibly perform temporal compression
                _temporal_compression(collapse, dataset)
                return dataset

    # Otherwise, initialize outputs
    X, y, runs, use = [], [], [], []

    # Load mask file
    mask_data = nib.load(mask_file).get_data().astype(bool)

    # Load the event information
    sched = pd.read_csv(design_file)

    # Get a list of event names
    if event_names is None:
        event_names = sorted(sched.condition.unique())
    else:
        sched = sched[sched.condition.isin(event_names)]

    # Make each runs' dataset
    for r_i, sched_r in sched.groupby("run"):
        ts_data = nib.load(ts_files[int(r_i - 1)]).get_data()

        # Use the basic extractor function
        X_i, y_i, use_i = extract_dataset(sched_r, ts_data,
                                          mask_data, exp["TR"],
                                          frames, upsample, event_names)

        # Just add to list
        X.append(X_i)
        y.append(y_i)
        use.append(use_i)

    # Find the voxels that are good in every run and make a final mask
    good_features = np.all(use, axis=0)
    mask_data[mask_data] = good_features

    # Stick the list items together for final dataset
    if frames is not None and len(frames) > 1:
        X = np.concatenate(X, axis=1)
    else:
        X = np.concatenate(X, axis=0)
    y = np.concatenate(y)
    runs = sched.run

    # Apply the feature mask
    X = np.atleast_3d(X)[:, :, good_features].squeeze()

    # Regress the confound vector out from the data matrix
    if confounds is not None:
        X = np.atleast_3d(X)
        confounds = np.asarray(sched[confounds])
        confounds = stats.zscore(confounds.reshape(X.shape[1], -1))
        denom = confounds / np.dot(confounds.T, confounds)
        for X_i in X:
            X_i -= np.dot(X_i.T, confounds).T * denom
        X = X.squeeze()

    # Save to disk and return
    dataset = dict(X=X, y=y, runs=runs, roi_name=roi_name, subj=subj,
                   event_names=event_names, problem=problem, frames=frames,
                   confounds=confounds, upsample=upsample, smoothed=smoothed,
                   hash=ds_hash, mask=mask_data, mask_name=mask_name)
    np.savez(ds_file, **dataset)

    # Possibly perform temporal compression
    _temporal_compression(collapse, dataset)

    return dataset
Beispiel #12
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/reg/epi/",
                                  ffxsmooth, "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #13
0
    def __init__(self,
                 subject_list,
                 experiment,
                 roi_name,
                 orig_type,
                 force_serial=False,
                 debug=False):

        # Set up basic info
        self.subject_list = lyman.determine_subjects(subject_list)
        project = lyman.gather_project_info()
        self.experiment = experiment
        self.roi_name = roi_name
        self.orig_type = orig_type
        self.debug = debug
        if debug:
            print "Setting up for %d subjects" % len(subject_list)
            print "Experiment name:", experiment
            print "ROI name:", roi_name

        # Set up directories
        if project["default_exp"] is not None and experiment is None:
            experiment = project["default_exp"]
        self.experiment = experiment
        self.data_dir = project["data_dir"]
        self.anal_dir = project["analysis_dir"]

        # Set up temporary output
        self.temp_dir = mkdtemp()

        # Set the SUBJECTS_DIR variable for Freesurfer
        os.environ["SUBJECTS_DIR"] = self.data_dir

        # Set up parallel execution
        self.parallel = False
        if force_serial:
            self.map = map
        else:
            try:
                rc = Client()
                self.dv = rc[:]
                self.map = self.dv.map_async
                # Push SUBJECTS_DIR to engines
                self.dv.execute("import os")
                self.dv["data_dir"] = self.data_dir
                self.dv.execute("os.environ['SUBJECTS_DIR'] = data_dir")
                self.parallel = True

            except (TimeoutError, IOError):
                self.map = map
        if debug:
            print "Set to run in %s" % ("parallel"
                                        if self.parallel else "serial")

        # Set up some persistent templates
        self.epi_template = op.join(self.anal_dir, self.experiment, "%(subj)s",
                                    "preproc/run_1/mean_func.nii.gz")
        self.fov_template = op.join(self.anal_dir, self.experiment, "%(subj)s",
                                    "preproc/run_1/functional_mask.nii.gz")
        self.reg_template = op.join(self.anal_dir, self.experiment, "%(subj)s",
                                    "preproc/run_1/func2anat_tkreg.dat")
        self.out_template = op.join(self.data_dir, "%(subj)s",
                                    "masks/%s.nii.gz" % self.roi_name)
        if debug:
            print "EPI template: %s" % self.epi_template
            print "Reg template: %s" % self.reg_template
            print "Output template: %s" % self.out_template

        # Ensure the output directory will exist
        for subj in self.subject_list:
            mask_dir = op.join(self.data_dir, subj, "masks")
            if not op.exists(mask_dir):
                os.mkdir(mask_dir)
Beispiel #14
0
def extract_subject(subj, mask_name, summary_func=np.mean,
                    exp_name=None):
    """Extract timeseries from within a mask, summarizing flexibly.

    Parameters
    ----------
    subj : string
        subject name
    mask_name : string
        name of mask in data hierarchy
    summary_func : callable or None
        callable to reduce data over voxel dimensions. can take an
        ``axis`` argument to operate over each frame, if this
        argument does not exist the function will be called on the
        n_tr x n_voxel array. if None, simply returns all voxels.
    exp_name : string
        experiment name, if not using the default experiment

    Returns
    -------
    data : dict with ndarray
        datta array is n_runs x n_timepoint x n_dimension,
        data are not otherwise altered

    """
    project = gather_project_info()
    if exp_name is None:
        exp_name = project["default_exp"]

    # Get a path to the file where
    cache_dir = op.join(project["analysis_dir"],
                        exp_name, subj, "evoked")

    try:
        os.makedirs(cache_dir)
    except OSError:
        pass

    if summary_func is None:
        func_name = ""
    else:
        func_name = summary_func.__name__
    cache_fname = mask_name + "_" + func_name
    cache_fname = cache_fname.strip("_") + ".npz"
    cache_file = op.join(cache_dir, cache_fname)

    # Get paths to the relevant files
    mask_file = op.join(project["data_dir"], subj, "masks",
                        "%s.nii.gz" % mask_name)
    ts_dir = op.join(project["analysis_dir"], exp_name, subj,
                     "reg", "epi", "unsmoothed")
    n_runs = len(glob(op.join(ts_dir, "run_*")))
    ts_files = [op.join(ts_dir, "run_%d" % (r_i + 1),
                        "timeseries_xfm.nii.gz") for r_i in range(n_runs)]

    # Get the hash value for this extraction
    cache_hash = hashlib.sha1()
    cache_hash.update(mask_name)
    cache_hash.update(str(op.getmtime(mask_file)))
    for ts_file in ts_files:
        cache_hash.update(str(op.getmtime(ts_file)))
    cache_hash = cache_hash.hexdigest()

    # If the file exists and the hash matches, return the data
    if op.exists(cache_file):
        with np.load(cache_file) as cache_obj:
            if cache_hash == str(cache_obj["hash"]):
                return dict(cache_obj.items())

    # Otherwise, do the extraction
    data = []
    mask = nib.load(mask_file).get_data().astype(bool)
    for run, ts_file in enumerate(ts_files):
        ts_data = nib.load(ts_file).get_data()
        roi_data = ts_data[mask].T

        if summary_func is None:
            data.append(roi_data)
            continue

        # Try to use the axis argument to summarize over voxels
        try:
            roi_data = summary_func(roi_data, axis=1)
        # Catch a TypeError and just call the function
        # This lets us do e.g. a PCA
        except TypeError:
            roi_data = summary_func(roi_data)

        data.append(roi_data)

    data = map(np.squeeze, data)

    # Save the results and return them
    data_dict = dict(data=data, subj=subj, hash=cache_hash)
    np.savez(cache_file, **data_dict)

    return data_dict
Beispiel #15
0
def calculate_evoked(data, n_bins, problem=None, events=None, tr=2,
                     calc_method="FIR", offset=0, upsample=1,
                     percent_change=True, correct_baseline=True,
                     event_names=None):
    """Calcuate an evoked response for a list of datapoints.

    Parameters
    ----------
    data : sequence of n_run x n_tp arrays
        timeseries data
    n_bins : int
        number of bins for the peristumulus trace
    problem : string
        problem name for event file in data hierarchy
        overrides `events` if both are passed
    events : dataframe or list of dataframes
        one dataframe describing event information for each subj.
        must contain `onset`, `run`, and `condition` columns
        caution: `run` should be 0-based
    tr : int
        original time resolution of the data
    upsample : int
        factor to upsample the data with using cubic splines
    calc_method : string
        name of method on nitime EventRelatedAnalyzer object to
        calculate the evoked response.
    offset : float
        value to adjust onset times by
    percent_change : boolean
        if True, convert signal to percent change by run
    correct_baseline : boolean
        if True, adjust evoked trace to be 0 in first bin
    event_names : list of strings
        names of conditions, otherwise uses sorted unique
        values for the condition field in the event dataframe

    Returns
    -------

    evoked : squeezed n_obs x n_class x n_bins array
        evoked response, by observation and event type

    """
    project = gather_project_info()
    event_template = op.join(project["data_dir"], "%s",
                             "events/%s.csv" % problem)

    evoked = []
    for i, data_i in enumerate(data):

        # Can get event information in one of two ways
        if problem is not None:
            subj = data_i["subj"]
            events_i = pd.read_csv(event_template % subj)
        else:
            events_i = events[i]

        # Map from event names to integer index values
        if event_names is None:
            event_names = sorted(events_i.condition.unique())
        event_map = pd.Series(range(1, len(event_names) + 1),
                              index=event_names)

        # Create the timeseries of event occurances
        event_list = []
        data_list = []
        for run, run_data in enumerate(data_i["data"]):

            # Possibly upsample the data
            if upsample != 1:
                time_points = len(run_data)
                x = np.linspace(0, time_points - 1, time_points)
                xx = np.linspace(0, time_points - 1,
                                 time_points * upsample + 1)
                interpolator = interp1d(x, run_data, "cubic", axis=0)
                run_data = interpolator(xx)

            run_events = events_i[events_i.run == run]
            run_events.onset += offset

            event_id = np.zeros(len(run_data), int)
            event_index = np.array(run_events.onset / tr).astype(int)
            event_index *= upsample
            event_id[event_index] = run_events.condition.map(event_map)
            event_list.append(event_id)

            if percent_change:
                run_data = nit.utils.percent_change(run_data, ax=0)
            data_list.append(run_data)

        # Set up the Nitime objects
        event_info = np.concatenate(event_list)
        data = np.concatenate(data_list, axis=0)

        # Do the calculations
        calc_tr = float(tr) / upsample
        calc_bins = n_bins * upsample
        if data.ndim == 1:
            evoked_data = _evoked_1d(data, event_info, calc_bins, calc_tr,
                                     calc_method, correct_baseline)
        elif data.ndim == 2:
            evoked_data = _evoked_2d(data, event_info, n_bins, calc_tr,
                                     calc_method, correct_baseline)
        evoked.append(evoked_data)

    return np.array(evoked).squeeze()
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
    #     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
    #     cov_col = 'mean_acc'

    # load in covariate for cort analysis
    cov = pd.read_csv(
        '/Volumes/group/awagner/sgagnon/AP/data/cortisol/cort_percentchange_testbaseline_controlassay.csv'
    )
    cov_col = 'cort_controlassay'

    cov = cov.loc[cov.subid.isin(
        subject_list)]  # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] -
                    cov[cov_col].mean()) / cov[cov_col].std()  # zscore
    print cov.describe()

    cov_reg = [
        cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list
    ]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list],
                      z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #17
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
        exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain"] = exp["whole_brain_template"]
    if exp["fieldmap_template"]:
        preproc_templates["fieldmap"] = exp["fieldmap_template"]

    preproc_source = Node(
        SelectFiles(preproc_templates, base_directory=project["data_dir"]),
        "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source, preproc_source,
                                        preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source, preproc_sink,
                                          preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        nuisance_file=op.join(model_base, "nuisance_variables.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
    )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                 "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source, model_source,
                                      model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source, model_sink,
                                        model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Are we registering across experiments?
    cross_exp = args.regexp is not None

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(
        flow_name, space, regtype, warp_method, args.residual, cross_exp)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
    )

    if regtype == "model":
        # First-level model summary statistic images
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(
            dict(
                copes=op.join(reg_base, "cope*.nii.gz"),
                varcopes=op.join(reg_base, "varcope*.nii.gz"),
                sumsquares=op.join(reg_base, "ss*.nii.gz"),
            ))
    else:
        # Timeseries images
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = list(reg_templates.keys())

    # Native anatomy to group anatomy affine matrix and warpfield
    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)
    else:
        if args.regexp is None:
            tkreg_base = analysis_dir
        else:
            tkreg_base = op.join(project["analysis_dir"], args.regexp)
        reg_templates["tkreg_rigid"] = op.join(tkreg_base, "{subject_id}",
                                               "preproc", "run_1",
                                               "func2anat_tkreg.dat")

    # Rigid (6dof) functional-to-anatomical matrices
    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Rigid matrix from anatomy to target experiment space
    if args.regexp is not None:
        targ_analysis_dir = op.join(project["analysis_dir"], args.regexp)
        reg_templates["first_rigid"] = op.join(targ_analysis_dir,
                                               "{subject_id}", "preproc",
                                               "run_1", "func2anat_flirt.mat")

    # Define the registration data source node
    reg_source = Node(
        SelectFiles(reg_templates,
                    force_lists=reg_lists,
                    base_directory=analysis_dir), "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source, reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source, reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"],
                                                        exp_info=exp)

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
    )
    ffx_lists = list(ffx_templates.keys())

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        reg_dir = "{subject_id}/reg/epi/{smoothing}/run_1"
        bg = op.join(reg_dir, "mean_func_xfm.nii.gz")
        reg = op.join(reg_dir, "func2anat_tkreg.dat")
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(
        SelectFiles(ffx_templates,
                    force_lists=ffx_lists,
                    base_directory=analysis_dir), "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source, ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source, ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([("_smoothing_", ""),
                                          ("flamestats", "")])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(working_dir)
Beispiel #18
0
def extract_subject(subj,
                    mask_name,
                    summary_func=np.mean,
                    residual=False,
                    exp_name=None):
    """Extract timeseries from within a mask, summarizing flexibly.

    Parameters
    ----------
    subj : string
        subject name
    mask_name : string
        name of mask in data hierarchy
    summary_func : callable or None
        callable to reduce data over voxel dimensions. can take an
        ``axis`` argument to operate over each frame, if this
        argument does not exist the function will be called on the
        n_tr x n_voxel array. if None, simply returns all voxels.
    residual : boolean
        If True, extract from the registered residual timecourse.
    exp_name : string
        experiment name, if not using the default experiment

    Returns
    -------
    data : dict with ndarray
        datta array is n_runs x n_timepoint x n_dimension,
        data are not otherwise altered

    """
    project = gather_project_info()
    if exp_name is None:
        exp_name = project["default_exp"]

    # Get a path to the file where
    cache_dir = op.join(project["analysis_dir"], exp_name, subj, "evoked")

    try:
        os.makedirs(cache_dir)
    except OSError:
        pass

    if summary_func is None:
        func_name = ""
    else:
        func_name = summary_func.__name__
    cache_fname = mask_name + "_" + func_name
    cache_fname = cache_fname.strip("_") + ".npz"
    cache_file = op.join(cache_dir, cache_fname)

    # Get paths to the relevant files
    mask_file = op.join(project["data_dir"], subj, "masks",
                        "%s.nii.gz" % mask_name)
    ts_dir = op.join(project["analysis_dir"], exp_name, subj, "reg", "epi",
                     "unsmoothed")
    n_runs = len(glob(op.join(ts_dir, "run_*")))

    ftemp = op.join(ts_dir, "run_{:d}/{}_xfm.nii.gz")
    fstem = "res4d" if residual else "timeseries"
    ts_files = [ftemp.format(r_i, fstem) for r_i in range(n_runs)]

    # Get the hash value for this extraction
    cache_hash = hashlib.sha1()
    cache_hash.update(mask_name)
    cache_hash.update(str(op.getmtime(mask_file)))
    for ts_file in ts_files:
        cache_hash.update(str(op.getmtime(ts_file)))
    cache_hash = cache_hash.hexdigest()

    # If the file exists and the hash matches, return the data
    if op.exists(cache_file):
        with np.load(cache_file) as cache_obj:
            if cache_hash == str(cache_obj["hash"]):
                return dict(cache_obj.items())

    # Otherwise, do the extraction
    data = []
    mask = nib.load(mask_file).get_data().astype(bool)
    for run, ts_file in enumerate(ts_files):
        ts_data = nib.load(ts_file).get_data()
        roi_data = ts_data[mask].T

        if summary_func is None:
            data.append(roi_data)
            continue

        # Try to use the axis argument to summarize over voxels
        try:
            roi_data = summary_func(roi_data, axis=1)
        # Catch a TypeError and just call the function
        # This lets us do e.g. a PCA
        except TypeError:
            roi_data = summary_func(roi_data)

        data.append(roi_data)

    data = np.array(list(map(np.squeeze, data)))

    # Save the results and return them
    data_dict = dict(data=data, subj=subj, hash=cache_hash)
    np.savez(cache_file, **data_dict)

    return data_dict
Beispiel #19
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base,
                             "{subject_id}/reg/epi/", ffxsmooth,
                             "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #20
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
                                                exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain_template"] = exp["whole_brain_template"]

    preproc_source = Node(SelectFiles(preproc_templates,
                                      base_directory=project["data_dir"]),
                          "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source,
                                        preproc_source, preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source,
                                          preproc_sink, preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
        )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source,
                                      model_source, model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source,
                                        model_sink, model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(flow_name,
                                                        space,
                                                        regtype,
                                                        warp_method,
                                                        args.residual)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
                         )

    if regtype == "model":
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(dict(
            copes=op.join(reg_base, "cope*.nii.gz"),
            varcopes=op.join(reg_base, "varcope*.nii.gz"),
            sumsquares=op.join(reg_base, "ss*.nii.gz"),
                                  ))
    else:
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = reg_templates.keys()

    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)

    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Define the registration data source node
    reg_source = Node(SelectFiles(reg_templates,
                                  force_lists=reg_lists,
                                  base_directory=analysis_dir),
                      "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source,
                                    reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source,
                                    reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"])

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
                         )
    ffx_lists = ffx_templates.keys()

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        bg = "{subject_id}/preproc/run_1/mean_func.nii.gz"
        reg = "{subject_id}/preproc/run_1/func2anat_tkreg.dat"
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(SelectFiles(ffx_templates,
                                  force_lists=ffx_lists,
                                  base_directory=analysis_dir),
                      "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source,
                                    ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source,
                                      ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([
        ("_smoothing_", ""), ("flamestats", "")
                                          ])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #21
0
def model_coefs(datasets, model, flat=True, exp_name=None):
    """Fit a model on all data and save the learned model weights.

    This does not work for datasets with > 1 frames.

    Parameters
    ----------
    datasets : list of dicts
        group mvpa datasets
    model : scikit-learn estimator
        decoding model
    flat : bool
        if False return in original data space (with voxels outside
        mask represented as NaN. otherwise return straight from model
    exp_name : string or None
        experiment name, otherwise uses project default

    Returns
    -------
    out_coefs : list of arrays
        model coefficients; form is determined by `flat` parameter

    """
    project = gather_project_info()
    if exp_name is None:
        exp_name = project["default_exp"]

    out_coefs = []

    # Iterate through the datasets
    for dset in datasets:
        subj = dset["subj"]

        # Get a header and affine matrix for the EPI space
        smoothed = "smoothed" if dset["smoothed"] else "unsmoothed"
        epi_file = op.join(project["analysis_dir"], exp_name, subj,
                           "preproc/run_1/example_func.nii.gz")
        epi_img = nib.load(epi_file)
        epi_header, epi_affine = epi_img.get_header(), epi_img.get_affine()

        # Check if we need to do anything
        decoder_hash = _hash_decoder(dset, model, None)
        coef_file = _results_fname(dset, model, None, False,
                                   False, False, exp_name)
        coef_file = coef_file.strip(".npz") + "_coef.npz"
        coef_nifti = coef_file.strip(".npz") + ".nii.gz"
        if op.exists(coef_file) and op.exists(coef_nifti):
            with np.load(coef_file) as res_obj:
                if decoder_hash == str(res_obj["hash"]):
                    if flat:
                        data = res_obj["data"]
                    else:
                        data = nib.load(coef_nifti).get_data()
                    out_coefs.append(data)
                    continue

        # Determine the mask
        mask = dset["mask"]

        # Get the mask dimensions
        x, y, z = mask.shape

        # Fit the model and extract the learned model weights
        model = model.fit(dset["X"], dset["y"])
        if hasattr(model, "estimators_"):
            coef = np.array([e.coef_.ravel() for e in model.estimators_])
        else:
            coef = model.coef_
        coef_data = np.zeros((x, y, z, len(coef))) * np.nan
        coef_data[mask] = coef.T

        # Save the data both as a npz and nifti
        coef_dict = dict(data=coef, hash=decoder_hash)
        np.savez(coef_file, **coef_dict)
        coef_nifti = coef_file.strip(".npz") + ".nii.gz"
        coef_img = nib.Nifti1Image(coef_data, epi_affine, epi_header)
        nib.save(coef_img, coef_nifti)

    return out_coefs
import numpy as np
import pandas as pd
from scipy import interpolate, linalg
from scipy.spatial.distance import cdist, pdist, squareform

import nibabel as nib
import nibabel.freesurfer as fs
from nibabel.affines import apply_affine

from moss.external.mvpasurf import Surface

import lyman
from surfutils import epi_to_surf_xfm

PROJECT = lyman.gather_project_info()


def backproject_label(label_file, subj, hemi):
    """Return label indices on individual subject surface."""
    label_verts = fs.read_label(label_file)

    # Define map of label on fsaverage surface
    label = np.zeros(163842, np.int)
    label[label_verts] = 1

    # Reverse normalize and convert to vertex indices
    subj_label = surface_transform(label, subj, hemi)
    subj_label_verts = np.argwhere(subj_label).squeeze()
    return subj_label_verts
Beispiel #23
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])


	### Set up group info
	## Regular design
	# Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
#     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
#     cov_col = 'mean_acc'

    # load in covariate (subids and value for each subject (in cov_col))
    cov = pd.read_csv(cov_filepath)
    cov = cov.loc[cov.subid.isin(subject_list)] # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] - cov[cov_col].mean()) / cov[cov_col].std() # zscore
    print cov.describe()

    cov_reg = [cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list], z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]


    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                             "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #24
0
import sys
import os.path as op
import argparse
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.stats import zscore
import nibabel as nib
import subprocess as sp
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import LeaveOneLabelOut
from nilearn.decoding import SearchLight

import lyman

project = lyman.gather_project_info()
data_dir = project["data_dir"]
analysis_dir = project["analysis_dir"]


def main(arglist):

    args = parse_args(arglist)

    if args.subjects is None:
        args.subjects = lyman.determine_subjects()

    for subj in args.subjects:

        print "Running subject", subj
import os
import sys
import os.path as op
import argparse
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.stats import zscore
import nibabel as nib
import subprocess as sp
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import LeaveOneLabelOut
from nilearn.decoding import SearchLight

import lyman
project = lyman.gather_project_info()
data_dir = project["data_dir"]
analysis_dir = project["analysis_dir"]


def main(arglist):

    args = parse_args(arglist)

    if args.subjects is None:
        args.subjects = lyman.determine_subjects()

    for subj in args.subjects:

        print "Running subject", subj
Beispiel #26
0
def main(arglist):

    args = parse_args(arglist)

    project = lyman.gather_project_info()
    data_dir = Path(project["data_dir"])
    anal_dir = Path(project["analysis_dir"])

    # Work out elements of the path to the data
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Start building the command line
    cmdline = ["freeview"]

    # Background hires anatomy
    anat_vol = data_dir / args.subject / "mri/brain.mgz"
    cmdline.extend(["-v", str(anat_vol)])

    # Get a reference to the functional-to-anatomical registration
    reg_file = (anal_dir / exp_name / args.subject / "reg" / "epi" /
                smoothing / "run_1" / "func2anat_tkreg.dat")

    # Load the mean functional volume in the background
    mean_vol = (anal_dir / exp_name / args.subject / "ffx" / "epi" /
                smoothing / "mean_func.nii.gz")

    mean_arg = (str(mean_vol) + ":reg=" + str(reg_file) + ":colormap=gecolor" +
                ":colorscale=0,30000" + ":visible=0" + ":sample=trilinear")
    cmdline.extend(["-v", mean_arg])

    # Find the statistic volume to compute the colormap parameters
    stat_vol = (anal_dir / exp_name / args.subject / "ffx" / "epi" /
                smoothing / args.contrast / "zstat1.nii.gz")

    # Determine limits for the statistical colormap
    stat = np.abs(nib.load(str(stat_vol)).get_data())
    if args.vlims is None:
        cmap_max = max(4.2, np.percentile(np.abs(stat[stat > 2.3]), 98))
        cmap_arg = "1.64,2.3,{:.1f}".format(cmap_max)
    else:
        cmap_arg = "{},{},{}".format(*args.vlims)

    # Load the statistical overlay
    stat_arg = (str(stat_vol) + ":reg=" + str(reg_file) + ":colormap=heat" +
                ":heatscale=" + cmap_arg + ":sample=trilinear")
    cmdline.extend(["-v", stat_arg])

    # Mesh overlay
    for mesh in ["smoothwm", "inflated"]:
        for hemi in ["lh", "rh"]:
            surf = data_dir / args.subject / "surf" / (hemi + "." + mesh)
            stat = (anal_dir / exp_name / args.subject / "ffx" / "epi" /
                    smoothing / args.contrast / (hemi + ".zstat1.mgz"))
            surf_arg = (str(surf) + ":edgecolor=limegreen" + ":overlay=" +
                        str(stat) + ":overlay_color=heat" +
                        ":overlay_method=piecewise" + ":overlay_threshold=" +
                        cmap_arg)
            if mesh == "smoothwm":
                surf_arg += ":hide_in_3d=true"
            cmdline.extend(["-f", surf_arg])

        cmdline.append("--hide-3d-slices")

    # Freeview spews a lot of garbage to the terminal; typcially silence that
    if not args.debug:
        cmdline.append("> /dev/null 2>&1")

    # Call out to freeview
    os.system(" ".join(cmdline))
Beispiel #27
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])


	### Set up group info
	## Regular design
    group_info = pd.read_csv(group_filepath)
    
	# Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [group_info[group_info.subid == x].reset_index().at[0,'group'] for x in subject_list]
    group_vector = [1 if sub == "group1" else 2 for sub in groups] # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
                                                                 ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(dict(
            varcopes=op.join(mfx_base, "varcope1.nii.gz"),
            dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(dict(
            reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                             "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(SelectFiles(templates,
                                     base_directory=anal_dir_base,
                                     sort_filelist=True),
                         "subject_id",
                         "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source,
            [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input,
            [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source,
            [("subject_id", "subject_id")]),
        (mfx_source, mfx_input,
            [("copes", "copes")])
                 ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input,
                [("varcopes", "varcopes"),
                 ("dofs", "dofs")]),
                     ])
    else:
        mfx.connect([
            (mfx_source, mfx_input,
                [("reg_file", "reg_file")]),
            (subj_source, mfx_input,
                [("subject_id", "subject_id")])
                     ])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
                                                      args.output,
                                                      space]),
                             substitutions=[("/stats", "/"),
                                            ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
                                      mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([
        (r"_l1_contrast_[-\w]*/", "/"),
        (r"_mni_hemi_[lr]h", "")
        ])
    mfx.connect(contrast_source, "l1_contrast",
                mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #28
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    group_info = pd.read_csv(group_filepath)

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [
        group_info[group_info.subid == x].reset_index().at[0, 'group']
        for x in subject_list
    ]
    group_vector = [1 if sub == "group1" else 2
                    for sub in groups]  # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[
        contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals
    ]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
Beispiel #29
0
    def __init__(self, subject_list, experiment, roi_name, orig_type, force_serial=False, debug=False):

        # Set up basic info
        self.subject_list = lyman.determine_subjects(subject_list)
        project = lyman.gather_project_info()
        self.experiment = experiment
        self.roi_name = roi_name
        self.orig_type = orig_type
        self.debug = debug
        if debug:
            print "Setting up for %d subjects" % len(subject_list)
            print "Experiment name:", experiment
            print "ROI name:", roi_name

        # Set up directories
        if project["default_exp"] is not None and experiment is None:
            experiment = project["default_exp"]
        self.experiment = experiment
        self.data_dir = project["data_dir"]
        self.anal_dir = project["analysis_dir"]

        # Set up temporary output
        self.temp_dir = mkdtemp()

        # Set the SUBJECTS_DIR variable for Freesurfer
        os.environ["SUBJECTS_DIR"] = self.data_dir

        # Set up parallel execution
        self.parallel = False
        if force_serial:
            self.map = map
        else:
            try:
                rc = Client()
                self.dv = rc[:]
                self.map = self.dv.map_async
                # Push SUBJECTS_DIR to engines
                self.dv.execute("import os")
                self.dv["data_dir"] = self.data_dir
                self.dv.execute("os.environ['SUBJECTS_DIR'] = data_dir")
                self.parallel = True

            except (TimeoutError, IOError):
                self.map = map
        if debug:
            print "Set to run in %s" % ("parallel" if self.parallel else "serial")

        # Set up some persistent templates
        self.epi_template = op.join(self.anal_dir, self.experiment, "%(subj)s", "preproc/run_1/mean_func.nii.gz")
        self.fov_template = op.join(self.anal_dir, self.experiment, "%(subj)s", "preproc/run_1/functional_mask.nii.gz")
        self.reg_template = op.join(self.anal_dir, self.experiment, "%(subj)s", "preproc/run_1/func2anat_tkreg.dat")
        self.out_template = op.join(self.data_dir, "%(subj)s", "masks/%s.nii.gz" % self.roi_name)
        if debug:
            print "EPI template: %s" % self.epi_template
            print "Reg template: %s" % self.reg_template
            print "Output template: %s" % self.out_template

        # Ensure the output directory will exist
        for subj in self.subject_list:
            mask_dir = op.join(self.data_dir, subj, "masks")
            if not op.exists(mask_dir):
                os.mkdir(mask_dir)