예제 #1
0
def create_confound_extraction_workflow(name="confounds", wm_components=6):
    """Extract nuisance variables from anatomical sources."""
    inputnode = Node(IdentityInterface(["timeseries", "brain_mask", "reg_file", "subject_id"]), "inputs")

    # Find the subject's Freesurfer segmentation
    # Grab the Freesurfer aparc+aseg file as an anatomical brain mask
    getaseg = Node(
        io.SelectFiles({"aseg": "{subject_id}/mri/aseg.mgz"}, base_directory=os.environ["SUBJECTS_DIR"]), "getaseg"
    )

    # Select and erode the white matter to get deep voxels
    selectwm = Node(fs.Binarize(erode=3, wm=True), "selectwm")

    # Transform the mask into functional space
    transform = MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"), ["reg_file", "source_file"], "transform")

    # Extract eigenvariates of the timeseries from WM and whole brain
    extract = MapNode(ExtractConfounds(n_components=wm_components), ["timeseries", "brain_mask", "wm_mask"], "extract")

    outputnode = Node(IdentityInterface(["confound_file"]), "outputs")

    confounds = Workflow(name)
    confounds.connect(
        [
            (inputnode, getaseg, [("subject_id", "subject_id")]),
            (getaseg, selectwm, [("aseg", "in_file")]),
            (selectwm, transform, [("binary_file", "target_file")]),
            (inputnode, transform, [("reg_file", "reg_file"), ("timeseries", "source_file")]),
            (transform, extract, [("transformed_file", "wm_mask")]),
            (inputnode, extract, [("timeseries", "timeseries"), ("brain_mask", "brain_mask")]),
            (extract, outputnode, [("out_file", "confound_file")]),
        ]
    )

    return confounds
예제 #2
0
파일: registration.py 프로젝트: toddt/lyman
def create_reg_workflow(name="reg", space="mni", regtype="model"):
    """Flexibly register files into one of several common spaces."""
    if regtype == "model":
        fields = ["copes", "varcopes", "ss_files"]
    elif regtype == "timeseries":
        fields = ["timeseries"]
    fields.extend(["masks", "affines"])

    if space == "mni":
        fields.append("warpfield")

    inputnode = Node(IdentityInterface(fields), "inputnode")

    func = globals()["%s_%s_transform" % (space, regtype)]

    transform = Node(Function(fields, ["out_files"],
                              func, imports),
                     "transform")

    regflow = Workflow(name=name)

    outputnode = Node(IdentityInterface(["out_files"]), "outputnode")
    for field in fields:
        regflow.connect(inputnode, field, transform, field)
    regflow.connect(transform, "out_files", outputnode, "out_files")

    return regflow, inputnode, outputnode
예제 #3
0
파일: preproc.py 프로젝트: toddt/lyman
def create_bbregister_workflow(name="bbregister",
                               contrast_type="t2",
                               partial_brain=False):
    """Find a linear transformation to align the EPI file with the anatomy."""
    in_fields = ["subject_id", "source_file"]
    if partial_brain:
        in_fields.append("whole_brain_template")
    inputnode = Node(IdentityInterface(in_fields), "inputs")

    # Estimate the registration to Freesurfer conformed space
    func2anat = MapNode(fs.BBRegister(contrast_type=contrast_type,
                                      init="fsl",
                                      epi_mask=True,
                                      registered_file=True,
                                      out_reg_file="func2anat_tkreg.dat",
                                      out_fsl_file="func2anat_flirt.mat"),
                        "source_file",
                        "func2anat")

    # Make an image for quality control on the registration
    report = MapNode(Function(["subject_id", "in_file"],
                              ["out_file"],
                              write_coreg_plot,
                              imports),
                           "in_file",
                           "coreg_report")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["tkreg_mat", "flirt_mat", "report"]),
                      "outputs")

    bbregister = Workflow(name=name)

    # Connect the registration
    bbregister.connect([
        (inputnode, func2anat,
            [("subject_id", "subject_id"),
             ("source_file", "source_file")]),
        (inputnode, report,
            [("subject_id", "subject_id")]),
        (func2anat, report,
            [("registered_file", "in_file")]),
        (func2anat, outputnode,
            [("out_reg_file", "tkreg_mat")]),
        (func2anat, outputnode,
            [("out_fsl_file", "flirt_mat")]),
        (report, outputnode,
            [("out_file", "report")]),
        ])

    # Possibly connect the full_fov image
    if partial_brain:
        bbregister.connect([
            (inputnode, func2anat,
                [("whole_brain_template", "intermediate_file")]),
                ])

    return bbregister
예제 #4
0
def create_surface_projection_workflow(name="surfproj", exp_info=None):
    """Project the group mask and thresholded zstat file onto the surface."""
    if exp_info is None:
        exp_info = lyman.default_experiment_parameters()

    inputnode = Node(IdentityInterface(["zstat_file", "mask_file"]), "inputs")

    # Sample the zstat image to the surface
    hemisource = Node(IdentityInterface(["mni_hemi"]), "hemisource")
    hemisource.iterables = ("mni_hemi", ["lh", "rh"])

    zstatproj = Node(freesurfer.SampleToSurface(
        sampling_method=exp_info["sampling_method"],
        sampling_range=exp_info["sampling_range"],
        sampling_units=exp_info["sampling_units"],
        smooth_surf=exp_info["surf_smooth"],
        subject_id="fsaverage",
        mni152reg=True,
        target_subject="fsaverage"),
        "zstatproj")

    # Sample the mask to the surface
    maskproj = Node(freesurfer.SampleToSurface(
        sampling_range=exp_info["sampling_range"],
        sampling_units=exp_info["sampling_units"],
        subject_id="fsaverage",
        mni152reg=True,
        target_subject="fsaverage"),
        "maskproj")
    if exp_info["sampling_method"] == "point":
        maskproj.inputs.sampling_method = "point"
    else:
        maskproj.inputs.sampling_method = "max"

    outputnode = Node(IdentityInterface(["surf_zstat",
                                         "surf_mask"]), "outputs")

    # Define and connect the workflow
    proj = Workflow(name)
    proj.connect([
        (inputnode, zstatproj,
            [("zstat_file", "source_file")]),
        (inputnode, maskproj,
            [("mask_file", "source_file")]),
        (hemisource, zstatproj,
            [("mni_hemi", "hemi")]),
        (hemisource, maskproj,
            [("mni_hemi", "hemi")]),
        (zstatproj, outputnode,
            [("out_file", "surf_zstat")]),
        (maskproj, outputnode,
            [("out_file", "surf_mask")]),
        ])

    return proj
예제 #5
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin='MultiProc')

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin='MultiProc')
예제 #6
0
def create_bbregister_workflow(name="bbregister", contrast_type="t2", partial_brain=False, init_with="fsl"):
    """Find a linear transformation to align the EPI file with the anatomy."""
    in_fields = ["subject_id", "timeseries"]
    if partial_brain:
        in_fields.append("whole_brain_template")
    inputnode = Node(IdentityInterface(in_fields), "inputs")

    # Take the mean over time to get a target volume
    meanvol = MapNode(fsl.MeanImage(), "in_file", "meanvol")

    # Do a rough skullstrip using BET
    skullstrip = MapNode(fsl.BET(), "in_file", "bet")

    # Estimate the registration to Freesurfer conformed space
    func2anat = MapNode(
        fs.BBRegister(
            contrast_type=contrast_type,
            init=init_with,
            epi_mask=True,
            registered_file=True,
            out_reg_file="func2anat_tkreg.dat",
            out_fsl_file="func2anat_flirt.mat",
        ),
        "source_file",
        "func2anat",
    )

    # Make an image for quality control on the registration
    report = MapNode(CoregReport(), "in_file", "coreg_report")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["tkreg_mat", "flirt_mat", "report"]), "outputs")

    bbregister = Workflow(name=name)

    # Connect the registration
    bbregister.connect(
        [
            (inputnode, func2anat, [("subject_id", "subject_id")]),
            (inputnode, report, [("subject_id", "subject_id")]),
            (inputnode, meanvol, [("timeseries", "in_file")]),
            (meanvol, skullstrip, [("out_file", "in_file")]),
            (skullstrip, func2anat, [("out_file", "source_file")]),
            (func2anat, report, [("registered_file", "in_file")]),
            (func2anat, outputnode, [("out_reg_file", "tkreg_mat")]),
            (func2anat, outputnode, [("out_fsl_file", "flirt_mat")]),
            (report, outputnode, [("out_file", "report")]),
        ]
    )

    # Possibly connect the full_fov image
    if partial_brain:
        bbregister.connect([(inputnode, func2anat, [("whole_brain_template", "intermediate_file")])])

    return bbregister
예제 #7
0
def create_filtering_workflow(name="filter",
                              hpf_cutoff=128,
                              TR=2,
                              output_name="timeseries"):
    """Scale and high-pass filter the timeseries."""
    inputnode = Node(IdentityInterface(["timeseries", "mask_file"]),
                     "inputs")

    # Grand-median scale within the brain mask
    scale = MapNode(ScaleTimeseries(statistic="median", target=10000),
                    ["in_file", "mask_file"],
                    "scale")

    # Gaussian running-line filter
    hpf_sigma = (hpf_cutoff / 2.0) / TR
    filter = MapNode(fsl.TemporalFilter(highpass_sigma=hpf_sigma),
                     "in_file",
                     "filter")

    # Possibly replace the mean
    # (In later versions of FSL, the highpass filter removes the
    # mean component. Put it back, but be flexible so this isn't
    # broken on older versions of FSL).
    replacemean = MapNode(ReplaceMean(output_name=output_name),
                          ["orig_file", "filtered_file"],
                          "replacemean")

    # Compute a final mean functional volume
    meanfunc = MapNode(fsl.MeanImage(out_file="mean_func.nii.gz"),
                       "in_file", "meanfunc")

    outputnode = Node(IdentityInterface(["timeseries",
                                         "mean_file"]), "outputs")

    filtering = Workflow(name)
    filtering.connect([
        (inputnode, scale,
            [("timeseries", "in_file"),
             ("mask_file", "mask_file")]),
        (scale, filter,
            [("out_file", "in_file")]),
        (scale, replacemean,
            [("out_file", "orig_file")]),
        (filter, replacemean,
            [("out_file", "filtered_file")]),
        (replacemean, meanfunc,
            [("out_file", "in_file")]),
        (replacemean, outputnode,
            [("out_file", "timeseries")]),
        (meanfunc, outputnode,
            [("out_file", "mean_file")]),
        ])

    return filtering
예제 #8
0
def workflow_spec(name="{workflow_name}", exp_info=None):
    """Return a Nipype workflow for MR processing.

    Parameters
    ----------
    name : string
        workflow object name
    exp_info : dict
        dictionary with experimental information
    """
    workflow = Workflow(name)

    if exp_info is None:
        exp_info = fitz.default_experiment_parameters()

    # Define the inputs for the preprocessing workflow
    in_fields = [""]  # "timeseries"]

    inputnode = Node(IdentityInterface(in_fields), "inputs")

    """
    # Define Actual Nipype Nodes, Workflows, etc.
    # e.g. The start of an example SPM preproc workflow
    # --------------------------------------------------

    slicetiming = pe.Node(interface=spm.SliceTiming(), name="slicetiming")
    slicetiming.inputs.ref_slice = 1
    realign = pe.Node(interface=spm.Realign(), name="realign")
    realign.inputs.register_to_mean = True
    """
    workflow.connect([
        """
        (inputnode, slicetiming,
            [('timeseries', 'in_files')]),
        (slicetiming, realign,
            [('timecorrected_files', 'in_files')]),
        """
    ])

    output_fields = [""]  # realigned_files", "realignment_parameters"]

    outputnode = Node(IdentityInterface(output_fields), "outputs")

    workflow.connect([
        """
        (realign, outputnode,
            [("realigned_files", "realigned_files"),
             ("realignment_parameters", "realignment_parameters")]),
        """
    ])

    # Return the workflow itself and input and output nodes.
    return workflow, inputnode, outputnode
예제 #9
0
    def test_execute(self, lyman_dir, execdir):

        info = frontend.info(lyman_dir=lyman_dir)

        def f(x):
            return x ** 2
        assert f(2) == 4

        n1 = Node(Function("x", "y", f), "n1")
        n2 = Node(Function("x", "y", f), "n2")

        wf = Workflow("test", base_dir=info.cache_dir)
        wf.connect(n1, "y", n2, "x")
        wf.inputs.n1.x = 2

        cache_dir = execdir.join("cache").join("test")

        class args(object):
            graph = False
            n_procs = 1
            debug = False
            clear_cache = True
            execute = True

        frontend.execute(wf, args, info)
        assert not cache_dir.exists()

        args.debug = True
        frontend.execute(wf, args, info)
        assert cache_dir.exists()

        args.debug = False
        info.remove_cache = False
        frontend.execute(wf, args, info)
        assert cache_dir.exists()

        args.execute = False
        res = frontend.execute(wf, args, info)
        assert res is None

        args.execute = True
        fname = str(execdir.join("graph").join("workflow.dot"))
        args.graph = fname
        res = frontend.execute(wf, args, info)
        assert res == fname[:-4] + ".svg"

        args.graph = True
        args.stage = "preproc"
        res = frontend.execute(wf, args, info)
        assert res == cache_dir.join("preproc.svg")
예제 #10
0
def create_unwarp_workflow(name="unwarp", fieldmap_pe=("y", "y-")):
    """Unwarp functional timeseries using reverse phase-blipped images."""
    inputnode = Node(IdentityInterface(["timeseries", "fieldmap"]), "inputs")

    # Calculate the shift field
    # Note that setting readout_times to 1 will give a fine
    # map of the field, but the units will be off
    # Since we don't write out the map of the field itself, it does
    # not seem worth it to add another parameter for the readout times.
    # (It does require that they are the same, but when wouldn't they be?)
    topup = MapNode(
        fsl.TOPUP(encoding_direction=fieldmap_pe, readout_times=[1] * len(fieldmap_pe)), ["in_file"], "topup"
    )

    # Unwarp the timeseries
    applytopup = MapNode(
        fsl.ApplyTOPUP(method="jac", in_index=[1]),
        ["in_files", "in_topup_fieldcoef", "in_topup_movpar", "encoding_file"],
        "applytopup",
    )

    # Make a figure summarize the unwarping
    report = MapNode(UnwarpReport(), ["orig_file", "corrected_file"], "unwarp_report")

    # Define the outputs
    outputnode = Node(IdentityInterface(["timeseries", "report"]), "outputs")

    # Define and connect the workflow
    unwarp = Workflow(name)
    unwarp.connect(
        [
            (inputnode, topup, [("fieldmap", "in_file")]),
            (inputnode, applytopup, [("timeseries", "in_files")]),
            (
                topup,
                applytopup,
                [
                    ("out_fieldcoef", "in_topup_fieldcoef"),
                    ("out_movpar", "in_topup_movpar"),
                    ("out_enc_file", "encoding_file"),
                ],
            ),
            (inputnode, report, [("fieldmap", "orig_file")]),
            (topup, report, [("out_corrected", "corrected_file")]),
            (applytopup, outputnode, [("out_corrected", "timeseries")]),
            (report, outputnode, [("out_file", "report")]),
        ]
    )

    return unwarp
예제 #11
0
def create_reg_workflow(name="reg", space="mni",
                        regtype="model", method="fsl",
                        residual=False, cross_exp=False):
    """Flexibly register files into one of several common spaces."""

    # Define the input fields flexibly
    if regtype == "model":
        fields = ["copes", "varcopes", "sumsquares"]
    elif regtype == "timeseries":
        fields = ["timeseries"]

    if cross_exp:
        fields.extend(["first_rigid"])

    fields.extend(["means", "masks", "rigids"])

    if space == "mni":
        fields.extend(["affine", "warpfield"])
    else:
        fields.extend(["tkreg_rigid"])

    inputnode = Node(IdentityInterface(fields), "inputnode")

    # Grap the correct interface class dynamically
    interface_name = "{}{}Registration".format(space.upper(),
                                               regtype.capitalize())
    reg_interface = globals()[interface_name]
    transform = Node(reg_interface(method=method), "transform")

    # Sanity check on inputs
    if regtype == "model" and residual:
        raise ValueError("residual and regtype=model does not make sense")

    # Set the kind of timeseries
    if residual:
        transform.inputs.residual = True

    outputnode = Node(IdentityInterface(["out_files"]), "outputnode")

    # Define the workflow
    regflow = Workflow(name=name)

    # Connect the inputs programatically
    for field in fields:
        regflow.connect(inputnode, field, transform, field)

    # The transform node only ever has one output
    regflow.connect(transform, "out_files", outputnode, "out_files")

    return regflow, inputnode, outputnode
예제 #12
0
def make_simple_workflow():

    wf = Workflow(name="test")

    node1 = Node(IdentityInterface(fields=["foo"]), name="node1")
    node2 = MapNode(IdentityInterface(fields=["foo"]),
                    name="node2", iterfield=["foo"])
    node3 = Node(IdentityInterface(fields=["foo"]), name="node3")

    wf.connect([
        (node1, node2, [("foo", "foo")]),
        (node2, node3, [("foo", "foo")]),
        ])

    return wf, node1, node2, node3
예제 #13
0
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    # test output of num_subnodes method when serial is True
    n1._serial = True
    yield assert_equal, n1.num_subnodes(), 1

    # test running the workflow on serial conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception as e:
        from nipype.pipeline.engine.base import logger
        logger.info('Exception: %s' % str(e))
        error_raised = True
    yield assert_false, error_raised

    os.chdir(cwd)
    rmtree(wd)
예제 #14
0
def create_realignment_workflow(name="realignment", temporal_interp=True, TR=2, slice_order="up", interleaved=True):
    """Motion and slice-time correct the timeseries and summarize."""
    inputnode = Node(IdentityInterface(["timeseries"]), "inputs")

    # Get the middle volume of each run for motion correction
    extractref = MapNode(ExtractRealignmentTarget(), "in_file", "extractref")

    # Motion correct to middle volume of each run
    mcflirt = MapNode(
        fsl.MCFLIRT(cost="normcorr", interpolation="spline", save_mats=True, save_rms=True, save_plots=True),
        ["in_file", "ref_file"],
        "mcflirt",
    )

    # Optionally emoporally interpolate to correct for slice time differences
    if temporal_interp:
        slicetime = MapNode(fsl.SliceTimer(time_repetition=TR), "in_file", "slicetime")

        if slice_order == "down":
            slicetime.inputs.index_dir = True
        elif slice_order != "up":
            raise ValueError("slice_order must be 'up' or 'down'")

        if interleaved:
            slicetime.inputs.interleaved = True

    # Generate a report on the motion correction
    mcreport = MapNode(RealignmentReport(), ["target_file", "realign_params", "displace_params"], "mcreport")

    # Define the outputs
    outputnode = Node(IdentityInterface(["timeseries", "example_func", "report", "motion_file"]), "outputs")

    # Define and connect the sub workflow
    realignment = Workflow(name)

    realignment.connect(
        [
            (inputnode, extractref, [("timeseries", "in_file")]),
            (inputnode, mcflirt, [("timeseries", "in_file")]),
            (extractref, mcflirt, [("out_file", "ref_file")]),
            (extractref, mcreport, [("out_file", "target_file")]),
            (mcflirt, mcreport, [("par_file", "realign_params"), ("rms_files", "displace_params")]),
            (extractref, outputnode, [("out_file", "example_func")]),
            (mcreport, outputnode, [("realign_report", "report"), ("motion_file", "motion_file")]),
        ]
    )

    if temporal_interp:
        realignment.connect(
            [
                (mcflirt, slicetime, [("out_file", "in_file")]),
                (slicetime, outputnode, [("slice_time_corrected_file", "timeseries")]),
            ]
        )
    else:
        realignment.connect([(mcflirt, outputnode, [("out_file", "timeseries")])])

    return realignment
예제 #15
0
파일: preproc.py 프로젝트: toddt/lyman
def create_filtering_workflow(name="filter",
                              hpf_cutoff=128,
                              TR=2,
                              output_name="timeseries"):
    """Scale and high-pass filter the timeseries."""
    inputnode = Node(IdentityInterface(["timeseries", "mask_file"]),
                     "inputs")

    # Grand-median scale within the brain mask
    scale = MapNode(Function(["in_file",
                              "mask_file"],
                             ["out_file"],
                             scale_timeseries,
                             imports),
                    ["in_file", "mask_file"],
                    "scale")

    # Gaussian running-line filter
    hpf_sigma = (hpf_cutoff / 2.0) / TR
    filter = MapNode(fsl.TemporalFilter(highpass_sigma=hpf_sigma,
                                        out_file=output_name + ".nii.gz"),
                     "in_file",
                     "filter")

    outputnode = Node(IdentityInterface(["timeseries"]), "outputs")

    filtering = Workflow(name)
    filtering.connect([
        (inputnode, scale,
            [("timeseries", "in_file"),
             ("mask_file", "mask_file")]),
        (scale, filter,
            [("out_file", "in_file")]),
        (filter, outputnode,
            [("out_file", "timeseries")]),
        ])

    return filtering
예제 #16
0
def create_workflow_to_resample_baw_files(name="ResampleBAWOutputs"):
    """
    This function...

    :param name:
    :return:
    """
    workflow = Workflow(name)
    inputs_to_resample = ["t1_file", "t2_file", "hncma_file", "abc_file"]
    other_inputs = ["reference_file", "acpc_transform"]
    label_maps = ["hncma_file", "abc_file"]
    input_spec = Node(
        IdentityInterface(inputs_to_resample + other_inputs), name="input_spec"
    )
    output_spec = Node(IdentityInterface(inputs_to_resample), name="output_spec")
    for input in inputs_to_resample:
        node = Node(BRAINSResample(), "Resample_{0}".format(input))
        node.inputs.pixelType = "short"
        node.inputs.inverseTransform = True
        node.inputs.outputVolume = input + ".nii.gz"
        if input in label_maps:
            node.inputs.interpolationMode = "NearestNeighbor"
        workflow.connect(
            [
                (
                    input_spec,
                    node,
                    [
                        ("reference_file", "referenceVolume"),
                        ("acpc_transform", "warpTransform"),
                        ("{0}".format(input), "inputVolume"),
                    ],
                ),
                (node, output_spec, [("outputVolume", "{0}".format(input))]),
            ]
        )
    return workflow
예제 #17
0
def workflow_spec(name="onset", exp_info=None):
    # Default experiment parameters
    if exp_info is None:
        exp_info = fitz.default_experiment_parameters

    # Define constant inputs
    inputs = ["design_file"]

    # Define the workflow inputs
    inputnode = Node(IdentityInterface(inputs), "inputs")

    onsetsetup = Node(OnsetSetup(), "onsetsetup")
    onsetsetup.inputs.exp_info = exp_info
    onsetsetup.inputs.conditions = exp_info['conditions']
    onsetsetup.inputs.condition_col = exp_info['condition_col']
    onsetsetup.inputs.duration_col = exp_info['duration_col']
    onsetsetup.inputs.onset_col = exp_info['onset_col']
    onsetsetup.inputs.run_col = exp_info['run_col']
    onsetsetup.inputs.pmod_cols = exp_info['pmod_cols']
    onsetsetup.inputs.pmod_conditions = exp_info['pmod_conditions']
    onsetsetup.inputs.concatenate_runs = exp_info['concatenate_runs']

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["design_mats"]),
                      "outputs")

    # Define the workflow and connect the nodes
    onsetFlow = Workflow(name=name)
    onsetFlow.connect([
        (inputnode, onsetsetup,
            [("design_file", "design_file")]),
        (onsetsetup, outputnode,
            [("design_mats", "design_mats")])
    ])

    return onsetFlow, inputnode, outputnode
예제 #18
0
def create_workflow_to_mask_white_matter(name):
    """
    This function...

    :param name:
    :return:
    """
    workflow = Workflow(name)

    input_spec = create_identity_interface_node(["t1_file", "white"], "input_spec")

    mask_white_matter = Node(Mesh2Mask(), "MaskWhiteMatter")
    mask_white_matter.inputs.output_image = "white.nii.gz"

    workflow.connect(input_spec, "white", mask_white_matter, "input_mesh")
    workflow.connect(input_spec, "t1_file", mask_white_matter, "input_image")

    output_spec = create_identity_interface_node(["white_mask"], "output_spec")
    workflow.connect(mask_white_matter, "output_image", output_spec, "white_mask")

    return workflow
예제 #19
0
    if delete_tmp_files is None:
        os.remove(tck_file)
        
    return output_file
        
convertNode = Node(Function(input_names = ['tck_file', 'image_file', 'output_file', 'delete_tmp_files'],
                            output_names = ['output_file'],
                           function = convert2trk),
                   name = 'tck2trk')


# ### Define the workflow

# In[16]:

wf = Workflow('MRTRIX_tracking')

wf.connect([
        (inputNode, trackingNode, [('spherical_harmonics_image', 'in_file'),
                                  ('seedmask', 'seed_file'),
                                  ('targetmask', 'include_file'),
                                  ('wmmask_1mm', 'mask_file'),
                                  ('seed_count', 'desired_number_of_tracks'),
                                  (('tracks_dir', fileNameBuild, 'seedmask'), 'out_file')]),
        (trackingNode, convertNode, [('tracked', 'tck_file')]),
        (inputNode, convertNode, [('seedmask', 'image_file'),
                                 (('tracks_dir', fileNameBuildTRK, 'seedmask'), 'output_file')]),
        (convertNode, outputNode, [('output_file', 'tck_file')])
    ])

예제 #20
0
def create_fs_reg_workflow(name="registration"):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------

    ::

        name : name of workflow (default: 'registration')

    Inputs::

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.target_image : registration target

    Outputs::

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space

    Example
    -------

    """

    register = Workflow(name=name)

    inputnode = Node(
        interface=IdentityInterface(
            fields=["source_files", "mean_image", "subject_id", "subjects_dir", "target_image"]
        ),
        name="inputspec",
    )

    outputnode = Node(
        interface=IdentityInterface(
            fields=[
                "func2anat_transform",
                "out_reg_file",
                "anat2target_transform",
                "transforms",
                "transformed_mean",
                "transformed_files",
                "min_cost_file",
                "anat2target",
                "aparc",
                "mean2anat_mask",
            ]
        ),
        name="outputspec",
    )

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name="fssource")
    fssource.run_without_submitting = True
    register.connect(inputnode, "subject_id", fssource, "subject_id")
    register.connect(inputnode, "subjects_dir", fssource, "subjects_dir")

    convert = Node(freesurfer.MRIConvert(out_type="nii"), name="convert")
    register.connect(fssource, "T1", convert, "in_file")

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(registered_file=True), name="bbregister")
    bbregister.inputs.init = "fsl"
    bbregister.inputs.contrast_type = "t2"
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, "subject_id", bbregister, "subject_id")
    register.connect(inputnode, "mean_image", bbregister, "source_file")
    register.connect(inputnode, "subjects_dir", bbregister, "subjects_dir")

    # Create a mask of the median coregistered to the anatomical image
    mean2anat_mask = Node(fsl.BET(mask=True), name="mean2anat_mask")
    register.connect(bbregister, "registered_file", mean2anat_mask, "in_file")

    """
    use aparc+aseg's brain mask
    """

    binarize = Node(fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), name="binarize_aparc")
    register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, "in_file")

    stripper = Node(fsl.ApplyMask(), name="stripper")
    register.connect(binarize, "binary_file", stripper, "mask_file")
    register.connect(convert, "out_file", stripper, "in_file")

    """
    Apply inverse transform to aparc file
    """
    aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True, interp="nearest"), name="aparc_inverse_transform")
    register.connect(inputnode, "subjects_dir", aparcxfm, "subjects_dir")
    register.connect(bbregister, "out_reg_file", aparcxfm, "reg_file")
    register.connect(fssource, ("aparc_aseg", get_aparc_aseg), aparcxfm, "target_file")
    register.connect(inputnode, "mean_image", aparcxfm, "source_file")

    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name="convert2itk")
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, "out_fsl_file", convert2itk, "transform_file")
    register.connect(inputnode, "mean_image", convert2itk, "source_file")
    register.connect(stripper, "out_file", convert2itk, "reference_file")

    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = Node(ants.Registration(), name="antsRegister")
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ["Rigid", "Affine", "SyN"]
    reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ["Mattes"] * 2 + [["Mattes", "CC"]]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ["Regular"] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.0e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ["vox"] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.args = "--float"
    reg.inputs.output_warped_image = "output_warped_image.nii.gz"
    reg.inputs.num_threads = 4
    reg.plugin_args = {"qsub_args": "-pe orte 4", "sbatch_args": "--mem=6G -c 4"}
    register.connect(stripper, "out_file", reg, "moving_image")
    register.connect(inputnode, "target_image", reg, "fixed_image")

    """
    Concatenate the affine and ants transforms into a list
    """

    pickfirst = lambda x: x[0]

    merge = Node(Merge(2), iterfield=["in2"], name="mergexfm")
    register.connect(convert2itk, "itk_transform", merge, "in2")
    register.connect(reg, ("composite_transform", pickfirst), merge, "in1")

    """
    Transform the mean image. First to anatomical and then to target
    """
    warpmean = Node(ants.ApplyTransforms(), name="warpmean")
    warpmean.inputs.input_image_type = 0
    warpmean.inputs.interpolation = "Linear"
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.inputs.terminal_output = "file"
    warpmean.inputs.args = "--float"
    # warpmean.inputs.num_threads = 4
    # warpmean.plugin_args = {'sbatch_args': '--mem=4G -c 4'}

    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = pe.MapNode(ants.ApplyTransforms(), iterfield=["input_image"], name="warpall")
    warpall.inputs.input_image_type = 0
    warpall.inputs.interpolation = "Linear"
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = "file"
    warpall.inputs.args = "--float"
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {"sbatch_args": "--mem=6G -c 2"}

    """
    Assign all the output files
    """

    register.connect(warpmean, "output_image", outputnode, "transformed_mean")
    register.connect(warpall, "output_image", outputnode, "transformed_files")

    register.connect(inputnode, "target_image", warpmean, "reference_image")
    register.connect(inputnode, "mean_image", warpmean, "input_image")
    register.connect(merge, "out", warpmean, "transforms")
    register.connect(inputnode, "target_image", warpall, "reference_image")
    register.connect(inputnode, "source_files", warpall, "input_image")
    register.connect(merge, "out", warpall, "transforms")

    """
    Assign all the output files
    """

    register.connect(reg, "warped_image", outputnode, "anat2target")
    register.connect(aparcxfm, "transformed_file", outputnode, "aparc")
    register.connect(bbregister, "out_fsl_file", outputnode, "func2anat_transform")
    register.connect(bbregister, "out_reg_file", outputnode, "out_reg_file")
    register.connect(bbregister, "min_cost_file", outputnode, "min_cost_file")
    register.connect(mean2anat_mask, "mask_file", outputnode, "mean2anat_mask")
    register.connect(reg, "composite_transform", outputnode, "anat2target_transform")
    register.connect(merge, "out", outputnode, "transforms")

    return register
예제 #21
0
def prep(zfrqs, dummies=0, pca_retain=0, name='CEST_prep'):
    inputnode = Node(IdentityInterface(fields=['zspec_file', 'ref_file']),
                     name='inputnode')
    outputnode = Node(IdentityInterface(
        fields=['zspec_file', 'f0_map', 'mask_file', 'ref_file', 'DS', 'MT']),
                      name='outputnode')

    moco = Node(MCFLIRT(cost='mutualinfo', mean_vol=True), name='moco')
    mask = Node(BET(mask=True, no_output=True), name='mask')

    if (dummies > 0):
        ref_index = dummies - 1
        zspec_select = Node(Select(volumes=list(range(dummies, len(zfrqs))),
                                   out_file='zspec.nii.gz'),
                            name='zspec_select')
        zfrqs = np.array(zfrqs[dummies:])
    else:
        ref_index = 0
        zfrqs = np.array(zfrqs)

    zspec_ref = Node(Select(volumes=[
        ref_index,
    ], out_file='reference.nii.gz'),
                     name='zspec_ref')
    zspec_norm = Node(ImageMaths(op_string='-div', out_file='zspec.nii.gz'),
                      name='zspec_norm')

    f0_indices = (np.abs(zfrqs) > 7) | (np.abs(zfrqs) < 1.1)
    sat_frqs = zfrqs[f0_indices]
    sat_angles = np.repeat(180.0, len(f0_indices))
    f0_select = Node(Select(volumes=np.where(f0_indices)[0].tolist(),
                            out_file='background_zspec.nii.gz'),
                     name='f0_select')
    sequence = {
        'MTSat': {
            'pulse': {
                'p1': 0.4,
                'p2': 0.3,
                'bandwidth': 0.39
            },
            'Trf': 0.02,
            'TR': 4,
            'FA': 5,
            'sat_f0': sat_frqs.tolist(),
            'sat_angle': sat_angles.tolist()
        }
    }
    two_pools = [{
        'name': 'DS',
        'df0': [0, -2.5, 2.5],
        'fwhm': [1.0, 1.e-6, 3.0],
        'A': [0.2, 1.e-3, 1.0],
        'use_bandwidth': True
    }, {
        'name': 'MT',
        'df0': [-2.5, -5.0, -0.5],
        'fwhm': [50.0, 35.0, 200.0],
        'A': [0.3, 1.e-3, 1.0]
    }]
    f0_fit = Node(Lorentzian(sequence=sequence, pools=two_pools, verbose=True),
                  name='f0_fit')

    out_frqs = np.sort(zfrqs)
    f0_correct = Node(ZSpec(in_freqs=zfrqs.tolist(),
                            out_freqs=out_frqs.tolist(),
                            verbose=True),
                      name='f0_correct')

    prep = Workflow(name=name)
    prep.connect([(inputnode, moco, [('zspec_file', 'in_file'),
                                     ('ref_file', 'ref_file')]),
                  (moco, zspec_ref, [('out_file', 'in_file')]),
                  (moco, mask, [('mean_img', 'in_file')]),
                  (zspec_ref, zspec_norm, [('out_file', 'in_file2')]),
                  (zspec_norm, f0_select, [('out_file', 'in_file')]),
                  (f0_select, f0_fit, [('out_file', 'in_file')]),
                  (mask, f0_fit, [('mask_file', 'mask_file')]),
                  (zspec_norm, f0_correct, [('out_file', 'in_file')]),
                  (f0_fit, f0_correct, [('DS_f0', 'f0_map')]),
                  (mask, f0_correct, [('mask_file', 'mask_file')]),
                  (moco, outputnode, [('mean_img', 'ref_file')]),
                  (mask, outputnode, [('out_file', 'mask_file')]),
                  (f0_fit, outputnode, [('DS_f0', 'f0_map'), ('DS_A', 'DS'),
                                        ('MT_A', 'MT')])])
    if (dummies > 0):
        prep.connect([(moco, zspec_select, [('out_file', 'in_file')]),
                      (zspec_select, zspec_norm, [('out_file', 'in_file')])])
    else:
        prep.connect([(moco, zspec_norm, [('out_file', 'in_file')])])

    if pca_retain > 0:
        f0_pca = Node(PCA(retain=pca_retain, projections_file='proj.nii.gz'),
                      name='f0_pca')
        prep.connect([(f0_correct, f0_pca, [('out_file', 'in_file')]),
                      (f0_pca, outputnode, [('out_file', 'zspec_file')])])
    else:
        prep.connect([(f0_correct, outputnode, [('out_file', 'zspec_file')])])

    return (prep, out_frqs)
예제 #22
0
def run_bianca_wf(masterfile,
                  out_dir,
                  wd_dir,
                  crash_dir,
                  df,
                  training_subject_idx,
                  query_subject_idx,
                  name="bianca",
                  n_cpu=4,
                  save_classifier=False,
                  trained_classifier_file=None):
    """

    :param masterfile: str
    :param out_dir:
    :param wd_dir:
    :param crash_dir:
    :param df: df
    :param training_subject_idx: training_subject_idx: list of ints, python-style 0-based; training subjects in df
    :param query_subject_idx: list of ints, python-style 0-based; querysubjects in df
    :param name:
    :param n_cpu:
    :param save_classifier: bool
    :param trained_classifier_file: file previously saved with save_classifier; if given, training subjects
    are ignored and classifier file is used in prediction
    :return: None
    """

    if save_classifier and trained_classifier_file:
        raise RuntimeError(
            "save_classifier and trained_classifier_file cannot be set at the same time"
        )
    if trained_classifier_file:
        trained_classifier_file = str(trained_classifier_file)
    #####
    # masterfile information
    expected_header = [
        'flair', 't1w', 'manual_mask', 'mat', 'subject', 'session'
    ]
    assert df.columns.tolist(
    ) == expected_header, f"masterfile columns are off. columns should be \
    {expected_header} but are {df.columns}"

    featuresubset = "1,2"
    brainmaskfeaturenum = "2"
    labelfeaturenum = "3"
    matfeaturenum = "4"

    ######
    # workflow
    wf = Workflow(name=name)

    ######
    # subject info
    inputnode = Node(niu.IdentityInterface(fields=['query_subject_idx']),
                     name='inputnode')
    inputnode.iterables = [("query_subject_idx", query_subject_idx)]
    inputnode.synchronize = True

    def get_query_info_fnc(df, query_subject_idx):
        def get_subjects_info(df, idx):
            return df.iloc[idx].subject.tolist()[0], df.iloc[
                idx].session.tolist()[0], df.iloc[idx].flair.tolist()[0]

        query_subject, query_session, query_flair = get_subjects_info(
            df, [query_subject_idx])
        query_subject_num = query_subject_idx + 1
        return query_subject, query_session, query_flair, query_subject_num

    query_info = Node(niu.Function(input_names=["df", "query_subject_idx"],
                                   output_names=[
                                       'query_subject', 'query_session',
                                       'query_flair', 'query_subject_num'
                                   ],
                                   function=get_query_info_fnc),
                      name="query_info")
    query_info.inputs.df = df
    wf.connect(inputnode, "query_subject_idx", query_info, "query_subject_idx")

    def get_training_info_fnc(df, query_subject_idx, training_subject_idx):
        import numpy as np
        training_subject_idx_clean = training_subject_idx.tolist()
        if query_subject_idx in training_subject_idx_clean:
            training_subject_idx_clean.remove(query_subject_idx)
        training_subjects = df.iloc[training_subject_idx_clean].subject.tolist(
        )
        training_sessions = df.iloc[training_subject_idx_clean].session.tolist(
        )
        training_subject_nums_str = ",".join(
            (np.array(training_subject_idx_clean) + 1).astype(str).tolist())
        return training_subject_idx_clean, training_subject_nums_str, training_subjects, training_sessions

    training_info = Node(niu.Function(
        input_names=["df", "query_subject_idx", "training_subject_idx"],
        output_names=[
            "training_subject_idx", "training_subject_nums_str",
            "training_subjects", "training_sessions"
        ],
        function=get_training_info_fnc),
                         name="training_info")
    training_info.inputs.df = df
    training_info.inputs.training_subject_idx = training_subject_idx
    wf.connect(inputnode, "query_subject_idx", training_info,
               "query_subject_idx")

    bianca = Node(BIANCA(), name="bianca")
    bianca.inputs.masterfile = str(masterfile)
    bianca.inputs.featuresubset = featuresubset
    bianca.inputs.brainmaskfeaturenum = brainmaskfeaturenum
    bianca.inputs.matfeaturenum = matfeaturenum
    bianca.inputs.save_classifier = save_classifier
    wf.connect(query_info, "query_subject_num", bianca, "querysubjectnum")

    if trained_classifier_file:
        bianca.inputs.trained_classifier_file = trained_classifier_file
    else:
        bianca.inputs.labelfeaturenum = labelfeaturenum
        wf.connect(training_info, "training_subject_nums_str", bianca,
                   "trainingnums")

    def classifier_info_fct(masterfile,
                            query_subject,
                            query_session,
                            query_flair,
                            training_subjects=None,
                            training_sessions=None,
                            classifier_file=None):
        d = {
            "masterfile": str(masterfile),
            "query_subject_session": [query_subject, query_session],
            "query_flair": query_flair,
        }
        if training_subjects:
            d["training_subjects_sessions"] = list(
                zip(training_subjects, training_sessions))
        else:
            d["classifier_file"] = classifier_file
        return d

    classifier_info = Node(niu.Function(input_names=[
        "masterfile", "query_subject", "query_session", "query_flair",
        "training_subjects", "training_sessions", "classifier_file"
    ],
                                        output_names=["meta_dict"],
                                        function=classifier_info_fct),
                           name="classifier_info")
    classifier_info.inputs.masterfile = masterfile
    wf.connect(query_info, "query_subject", classifier_info, "query_subject")
    wf.connect(query_info, "query_session", classifier_info, "query_session")
    wf.connect(query_info, "query_flair", classifier_info, "query_flair")
    if trained_classifier_file:
        classifier_info.inputs.classifier_file = trained_classifier_file
    else:
        wf.connect(training_info, "training_subjects", classifier_info,
                   "training_subjects")
        wf.connect(training_info, "training_sessions", classifier_info,
                   "training_sessions")

    ds = Node(DerivativesDataSink(base_directory=str(out_dir.parent),
                                  out_path_base=str(out_dir.name)),
              name="ds")
    ds.inputs.suffix = "LPM"
    wf.connect(bianca, "out_file", ds, "in_file")
    wf.connect(query_info, "query_flair", ds, "source_file")
    wf.connect(classifier_info, "meta_dict", ds, "meta_dict")

    if save_classifier:
        ds_clf = Node(DerivativesDataSink(base_directory=str(out_dir.parent),
                                          out_path_base=str(out_dir.name)),
                      name="ds_clf")
        ds_clf.inputs.suffix = "classifier"
        wf.connect(bianca, "classifier_file", ds_clf, "in_file")
        wf.connect(query_info, "query_flair", ds_clf, "source_file")

        ds_clf_labels = Node(DerivativesDataSink(
            base_directory=str(out_dir.parent),
            out_path_base=str(out_dir.name)),
                             name="ds_clf_labels")
        ds_clf_labels.inputs.suffix = "classifier_labels"
        wf.connect(bianca, "classifier_labels_file", ds_clf_labels, "in_file")
        wf.connect(query_info, "query_flair", ds_clf_labels, "source_file")

    wf.base_dir = wd_dir
    wf.config.remove_unnecessary_outputs = False
    wf.config["execution"]["crashdump_dir"] = crash_dir
    wf.config["monitoring"]["enabled"] = "true"
    # wf.write_graph("workflow_graph.png", graph2use="exec")
    # wf.write_graph("workflow_graph_c.png", graph2use="colored")
    wf.run(plugin='MultiProc', plugin_args={'n_procs': n_cpu})
예제 #23
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
예제 #24
0
def amide_noe(zfrqs, name='Amide_NOE'):
    inputnode = Node(IdentityInterface(fields=['zspec_file', 'mask_file']),
                     name='inputnode')
    outputnode = Node(
        IdentityInterface(fields=['diff_file', 'DS', 'MT', 'Amide', 'NOE']),
        name='outputnode')
    # Repeat 2-pool fit
    f0_indices = (np.abs(zfrqs) > 9.9) | (np.abs(zfrqs) < 1.1)
    sequence = {
        'MTSat': {
            'pulse': {
                'p1': 0.4,
                'p2': 0.3,
                'bandwidth': 0.39
            },
            'Trf': 0.02,
            'TR': 4,
            'FA': 5,
            'sat_f0': zfrqs[f0_indices].tolist(),
            'sat_angle': np.repeat(180.0, len(f0_indices)).tolist()
        }
    }
    two_pools = [{
        'name': 'DS',
        'df0': [0, -2.5, 2.5],
        'fwhm': [1.0, 1.e-6, 3.0],
        'A': [0.2, 1.e-3, 1.0],
        'use_bandwidth': True
    }, {
        'name': 'MT',
        'df0': [-2.5, -5.0, -0.5],
        'fwhm': [50.0, 35.0, 200.0],
        'A': [0.3, 1.e-3, 1.0]
    }]
    backg_select = Node(Select(volumes=np.where(f0_indices)[0].tolist(),
                               out_file='bg_zspec.nii.gz'),
                        name='backg_select')
    backg_fit = Node(Lorentzian(sequence=sequence,
                                pools=two_pools,
                                verbose=True),
                     name='backg_fit')
    # Simulate data for all frequencies
    sequence['MTSat']['sat_f0'] = zfrqs.tolist()
    sequence['MTSat']['sat_angle'] = np.repeat(180.0, len(zfrqs)).tolist()
    backg_sim = Node(LorentzianSim(sequence=sequence,
                                   pools=two_pools,
                                   noise=0,
                                   in_file='backg_sim.nii.gz',
                                   verbose=True),
                     name='backg_sim')
    backg_sub = Node(ImageMaths(op_string='-sub',
                                out_file='no_backg_sub.nii.gz'),
                     name='backg_sub')

    an_pools = [{
        'name': 'Amide',
        'df0': [3.5, 2.0, 6.0],
        'fwhm': [2.0, 0.4, 4.0],
        'A': [0.2, 1.e-3, 0.2],
        'use_bandwidth': True
    }, {
        'name': 'NOE',
        'df0': [-4.0, -6.0, -2.0],
        'fwhm': [2.0, 0.4, 4.0],
        'A': [0.2, 1.e-3, 0.2],
        'use_bandwidth': True
    }]
    f0_indices = (np.abs(zfrqs) > 0.99) & (np.abs(zfrqs) < 10.1)
    sequence['MTSat']['sat_f0'] = zfrqs[f0_indices].tolist()
    sequence['MTSat']['sat_angle'] = np.repeat(180.0, len(f0_indices)).tolist()
    an_select = Node(Select(volumes=np.where(f0_indices)[0].tolist(),
                            out_file='fg_zspec.nii.gz'),
                     name='an_select')

    an_fit = Node(Lorentzian(sequence=sequence,
                             pools=an_pools,
                             Zref=0.0,
                             additive=True,
                             verbose=True),
                  name='an_pool')

    outputnode = Node(IdentityInterface(
        fields=['zspec', 'f0_map', 'mask', 'DS', 'MT', 'Amide', 'NOE']),
                      name='outputnode')
    wf = Workflow(name=name)
    wf.connect([
        (inputnode, backg_select, [('zspec_file', 'in_file')]),
        (backg_select, backg_fit, [('out_file', 'in_file')]),
        (inputnode, backg_fit, [('mask_file', 'mask_file')]),
        (backg_fit, backg_sim, [('DS_f0', 'DS_f0'), ('DS_fwhm', 'DS_fwhm'),
                                ('DS_A', 'DS_A'), ('MT_f0', 'MT_f0'),
                                ('MT_fwhm', 'MT_fwhm'), ('MT_A', 'MT_A')]),
        (inputnode, backg_sub, [('zspec_file', 'in_file2')]),
        (backg_sim, backg_sub, [('out_file', 'in_file')]),
        (backg_sub, an_select, [('out_file', 'in_file')]),
        (an_select, an_fit, [('out_file', 'in_file')]),
        (inputnode, an_fit, [('mask_file', 'mask_file')]),
        (backg_sub, outputnode, [('out_file', 'diff_file')]),
        (backg_fit, outputnode, [('DS_A', 'DS'), ('MT_A', 'MT')]),
        (an_fit, outputnode, [('Amide_A', 'Amide'), ('NOE_A', 'NOE')])
    ])
    return wf
예제 #25
0
modelspec.inputs.time_repetition = 1.  # make sure its with a dot
modelspec.inputs.high_pass_filter_cutoff = 128.

################################################
#modelspec.inputs.subject_info = subjectinfo(subject_id) # run per subject

level1design = pe.Node(interface=spm.Level1Design(),
                       name="level1design")  #, base_dir = '/media/Data/work')
level1design.inputs.timing_units = modelspec.inputs.output_units
level1design.inputs.interscan_interval = 1.
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
level1design.inputs.model_serial_correlations = 'AR(1)'

#######################################################################################################################
# Initiation of a workflow
wfSPM = Workflow(name="l1spm", base_dir=output_dir)
wfSPM.connect([
    (infosource, selectfiles, [('subject_id', 'subject_id')]),
    (selectfiles, runinfo, [('events', 'events_file'),
                            ('regressors', 'regressors_file')]),
    (selectfiles, extract, [('func', 'in_file')]),
    (extract, smooth, [('roi_file', 'in_files')]),
    (smooth, runinfo, [('smoothed_files', 'in_file')]),
    (smooth, modelspec, [('smoothed_files', 'functional_runs')]),
    (runinfo, modelspec, [('info', 'subject_info'),
                          ('realign_file', 'realignment_parameters')]),
])
wfSPM.connect([(modelspec, level1design, [("session_info", "session_info")])])

##########################################################################3
# calculating the minimum across time points on merged mask image
minmask = Node(fsl.MinImage(), name="minmask")

# creating datasink to collect outputs
datasink = Node(
    DataSink(base_directory=os.path.join(outDir, 'Flanker_Cope4_Level2')),
    name='datasink')

###########
#
# SETTING UP THE WORKFLOW NODES
#
###########

# creating the workflow
secondLevel = Workflow(name="secondLevel", base_dir=outDir)

# connecting nodes
secondLevel.connect(level2design, 'design_mat', flameo, 'design_file')
secondLevel.connect(level2design, 'design_con', flameo, 't_con_file')
secondLevel.connect(level2design, 'design_grp', flameo, 'cov_split_file')
secondLevel.connect(copemerge, 'merged_file', flameo, 'cope_file')
secondLevel.connect(varcopemerge, 'merged_file', flameo, 'var_cope_file')
secondLevel.connect(maskmerge, 'merged_file', minmask, 'in_file')
secondLevel.connect(minmask, 'out_file', flameo, 'mask_file')
secondLevel.connect(flameo, 'stats_dir', datasink, 'stats_dir')

# running the workflow
secondLevel.run()
예제 #27
0
def create_dti():
    # main workflow for preprocessing diffusion data
    # fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # Initiation of a workflow
    dwi_preproc = Workflow(name="dwi_preproc")
    # inputnode
    inputnode = Node(IdentityInterface(fields=[
        'subject_id', 'freesurfer_dir', 'aseg', 'dwi', 'dwi_b0', 'dwi_mag',
        'dwi_ph', 'bvals', 'bvecs', 'dwi_dwelltime', 'te_diff'
    ]),
                     name='inputnode')
    # output node
    outputnode = Node(IdentityInterface(fields=[
        'bo_brain', "bo_brainmask", 'dwi_denoised', 'dwi_unringed',
        "mag2b0mat", "mag2b0", "fmap", "eddy_corr", "rotated_bvecs",
        "total_movement_rms", "outlier_report", "cnr_maps", "residuals",
        "shell_params", "eddy_params", 'dti_fa', 'dti_md', 'dti_l1', 'dti_l2',
        'dti_l3', 'dti_v1', 'dti_v2', 'dti_v3', 'fa2anat', 'fa2anat_mat',
        'fa2anat_dat'
    ]),
                      name='outputnode')
    '''
    workflow to run distortion correction
    -------------------------------------
    '''
    distor_corr = create_distortion_correct()
    '''
    tensor fitting
    --------------
    '''
    dti = Node(fsl.DTIFit(), name='dti')

    #connecting the nodes
    dwi_preproc.connect([
        (inputnode, distor_corr, [('dwi', 'inputnode.dwi')]),
        (inputnode, distor_corr, [('dwi_b0', 'inputnode.dwi_b0')]),
        (inputnode, distor_corr, [('dwi_mag', 'inputnode.dwi_mag')]),
        (inputnode, distor_corr, [('dwi_ph', 'inputnode.dwi_ph')]),
        (inputnode, distor_corr, [("bvals", "inputnode.bvals")]),
        (inputnode, distor_corr, [("bvecs", "inputnode.bvecs")]),
        (inputnode, distor_corr, [("dwi_dwelltime", "inputnode.dwi_dwelltime")
                                  ]),
        (inputnode, distor_corr, [("te_diff", "inputnode.te_diff")]),
        (distor_corr, outputnode, [('outputnode.bo_brain', 'bo_brain')]),
        (distor_corr, outputnode, [('outputnode.bo_brainmask', 'bo_brainmask')
                                   ]),
        (distor_corr, outputnode, [('outputnode.dwi_denoised', 'dwi_denoised')
                                   ]),
        (distor_corr, outputnode, [('outputnode.dwi_unringed', 'dwi_unringed')
                                   ]),
        (distor_corr, outputnode, [('outputnode.fmap', 'fmap')]),
        (distor_corr, outputnode, [('outputnode.mag2b0mat', 'mag2b0mat')]),
        (distor_corr, outputnode, [('outputnode.mag2b0', 'mag2b0')]),
        (distor_corr, outputnode, [('outputnode.eddy_corr', 'eddy_corr')]),
        (distor_corr, outputnode, [('outputnode.rotated_bvecs',
                                    'rotated_bvecs')]),
        (distor_corr, outputnode, [('outputnode.total_movement_rms',
                                    'total_movement_rms')]),
        (distor_corr, outputnode, [('outputnode.cnr_maps', 'cnr_maps')]),
        (distor_corr, outputnode, [('outputnode.residuals', 'residuals')]),
        (distor_corr, outputnode, [('outputnode.shell_params', 'shell_params')
                                   ]),
        (distor_corr, outputnode, [('outputnode.eddy_params', 'eddy_params')]),
        (distor_corr, outputnode, [('outputnode.outlier_report',
                                    'outlier_report')]),
        (distor_corr, dti, [("outputnode.rotated_bvecs", "bvecs")]),
        (distor_corr, dti, [('outputnode.bo_brainmask', 'mask')]),
        (distor_corr, dti, [('outputnode.eddy_corr', 'dwi')]),
        (distor_corr, dti, [("outputnode.bvals", "bvals")]),
        (dti, outputnode, [('FA', 'dti_fa')]),
        (dti, outputnode, [('MD', 'dti_md')]),
        (dti, outputnode, [('L1', 'dti_l1')]),
        (dti, outputnode, [('L2', 'dti_l2')]),
        (dti, outputnode, [('L3', 'dti_l3')]),
        (dti, outputnode, [('V1', 'dti_v1')]),
        (dti, outputnode, [('V2', 'dti_v2')]),
        (dti, outputnode, [('V3', 'dti_v3')])
    ])
    '''
    coregistration of FA and T1
    ------------------------------------
    '''

    # linear registration with bbregister
    bbreg = Node(fs.BBRegister(contrast_type='t1',
                               out_fsl_file='fa2anat.mat',
                               out_reg_file='fa2anat.dat',
                               registered_file='fa2anat_bbreg.nii.gz',
                               init='fsl'),
                 name='bbregister')

    # connecting the nodes
    dwi_preproc.connect([
        (inputnode, bbreg, [('subject_id', 'subject_id')]),
        (inputnode, bbreg, [('freesurfer_dir', 'subjects_dir')]),
        (dti, bbreg, [("FA", "source_file")]),
        (bbreg, outputnode, [('out_fsl_file', 'fa2anat_mat'),
                             ('out_reg_file', 'fa2anat_dat'),
                             ('registered_file', 'fa2anat')])
    ])

    return dwi_preproc
def create_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------
        name : name of workflow (default: 'registration')

    Inputs:

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.anatomical_image : anatomical image to coregister to
        inputspec.target_image : registration target

    Outputs:

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space

    Example
    -------
        See code below
    """

    register = Workflow(name=name)

    inputnode = Node(
        interface=IdentityInterface(fields=[
            'source_files', 'mean_image', 'subject_id', 'subjects_dir',
            'target_image'
        ]),
        name='inputspec')

    outputnode = Node(
        interface=IdentityInterface(fields=[
            'func2anat_transform', 'out_reg_file', 'anat2target_transform',
            'transforms', 'transformed_mean', 'segmentation_files',
            'anat2target', 'aparc', 'min_cost_file'
        ]),
        name='outputspec')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name='fssource')
    fssource.run_without_submitting = True
    register.connect(inputnode, 'subject_id', fssource, 'subject_id')
    register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')

    convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert")
    register.connect(fssource, 'T1', convert, 'in_file')

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(), name='bbregister')
    bbregister.inputs.init = 'fsl'
    bbregister.inputs.contrast_type = 't2'
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
    register.connect(inputnode, 'mean_image', bbregister, 'source_file')
    register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
    """
    Estimate the tissue classes from the anatomical image. But use aparc+aseg's brain mask
    """

    binarize = Node(
        fs.Binarize(min=0.5, out_type="nii.gz", dilate=1),
        name="binarize_aparc")
    register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize,
                     "in_file")
    stripper = Node(fsl.ApplyMask(), name='stripper')
    register.connect(binarize, "binary_file", stripper, "mask_file")
    register.connect(convert, 'out_file', stripper, 'in_file')

    fast = Node(fsl.FAST(), name='fast')
    register.connect(stripper, 'out_file', fast, 'in_files')
    """
    Binarize the segmentation
    """

    binarize = MapNode(
        fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
        iterfield=['in_file'],
        name='binarize')
    register.connect(fast, 'partial_volume_files', binarize, 'in_file')
    """
    Apply inverse transform to take segmentations to functional space
    """

    applyxfm = MapNode(
        freesurfer.ApplyVolTransform(inverse=True, interp='nearest'),
        iterfield=['target_file'],
        name='inverse_transform')
    register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
    register.connect(binarize, 'out_file', applyxfm, 'target_file')
    register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
    """
    Apply inverse transform to aparc file
    """

    aparcxfm = Node(
        freesurfer.ApplyVolTransform(inverse=True, interp='nearest'),
        name='aparc_inverse_transform')
    register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
    register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm,
                     'target_file')
    register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template

        * All parameters are set using the example from:
          #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
        * This is currently set to perform a very quick registration. However,
          the registration can be made significantly more accurate for cortical
          structures by increasing the number of iterations.
    """

    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.float = True
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {'sbatch_args': '-c%d' % 4}
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image', reg, 'fixed_image')

    """
    Concatenate the affine and ants transforms into a list
    """

    merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, ('composite_transform', pickfirst), merge, 'in1')

    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 3
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.terminal_output = 'file'
    warpmean.inputs.args = '--float'
    warpmean.inputs.num_threads = 4
    warpmean.plugin_args = {'sbatch_args': '-c%d' % 4}

    register.connect(inputnode, 'target_image', warpmean, 'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')

    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(applyxfm, 'transformed_file', outputnode,
                     'segmentation_files')
    register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc')
    register.connect(bbregister, 'out_fsl_file', outputnode,
                     'func2anat_transform')
    register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')
    register.connect(merge, 'out', outputnode, 'transforms')
    register.connect(bbregister, 'min_cost_file', outputnode, 'min_cost_file')

    return register
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(
        Rename(format_string='rest_%(run)02d'),
        iterfield=['in_file', 'run'],
        name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign")
    realign.inputs.slice_times = slice_times
    realign.inputs.tr = TR
    realign.inputs.slice_info = 2
    realign.plugin_args = {'sbatch_args': '-c%d' % 4}

    # Compute TSNR on realigned data regressing polynomials up to order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(realign, "out_file", tsnr, "in_file")

    # Compute the median image across runs
    calc_median = Node(CalculateMedian(), name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')

    """
    Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file

    """Quantify TSNR in each freesurfer ROI
    """

    get_roi_tsnr = MapNode(
        fs.SegStats(default_color_table=True),
        iterfield=['in_file'],
        name='get_aparc_tsnr')
    get_roi_tsnr.inputs.avgwf_txt_file = True
    wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
    wf.connect(registration, 'outputspec.aparc', get_roi_tsnr,
               'segmentation_file')

    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'NiPy'

    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([
        (name_unique, realign, [('out_file', 'in_file')]),
        (realign, art, [('out_file', 'realigned_files')]),
        (realign, art, [('par_file', 'realignment_parameters')]),
    ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')

    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(
        Function(
            input_names=['motion_params', 'order', 'derivatives'],
            output_names=['out_files'],
            function=motion_regressors,
            imports=imports),
        name='getmotionregress')
    wf.connect(realign, 'par_file', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(
        Function(
            input_names=[
                'motion_params', 'comp_norm', 'outliers', 'detrend_poly'
            ],
            output_names=['out_files'],
            function=build_filter1,
            imports=imports),
        name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(
        fsl.GLM(
            out_f_name='F_mcart.nii.gz',
            out_pf_name='pF_mcart.nii.gz',
            demean=True),
        iterfield=['in_file', 'design', 'out_res_name'],
        name='filtermotion')

    wf.connect(realign, 'out_file', filter1, 'in_file')
    wf.connect(realign, ('out_file', rename, '_filtermotart'), filter1,
               'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(
        ACompCor(),
        iterfield=['realigned_file', 'extra_regressors'],
        name='makecompcorrfilter')
    createfilter2.inputs.components_file = 'noise_components.txt'
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration,
               ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(
        fsl.GLM(out_f_name='F.nii.gz', out_pf_name='pF.nii.gz', demean=True),
        iterfield=['in_file', 'design', 'out_res_name'],
        name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'components_file', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(
        Function(
            input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
            output_names=['out_files'],
            function=bandpass_filter,
            imports=imports),
        name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')
    """Smooth the functional data using
    :class:`nipype.interfaces.fsl.IsotropicSmooth`.
    """

    smooth = MapNode(
        interface=fsl.IsotropicSmooth(), name="smooth", iterfield=["in_file"])
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_file')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'out_file', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(
        ants.ApplyTransforms(), iterfield=['input_image'], name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {'sbatch_args': '-c%d' % 2}

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(
        freesurfer.SegStats(default_color_table=True),
        iterfield=['in_file', 'summary_file', 'avgwf_txt_file'],
        name='aparc_ts')
    sampleaparc.inputs.segment_id = (
        [8] + list(range(10, 14)) + [17, 18, 26, 47] + list(range(49, 55)) +
        [58] + list(range(1001, 1036)) + list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc', sampleaparc,
               'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        import os
        out_names = []
        for filename in files:
            path, name, _ = split_filename(filename)
            out_names.append(os.path.join(path, name + suffix))
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc,
               'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc,
               'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(
        freesurfer.SampleToSurface(),
        iterfield=['source_file'],
        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(
        Function(
            input_names=['left', 'right'],
            output_names=['out_file'],
            function=combine_hemi,
            imports=imports),
        iterfield=['left', 'right'],
        name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(
        Function(
            input_names=['timeseries_file', 'label_file', 'indices'],
            output_names=['out_file'],
            function=extract_subrois,
            imports=imports),
        iterfield=['timeseries_file'],
        name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [
        ('_target_subject_', ''),
        ('_filtermotart_cleaned_bp_trans_masked', ''),
        ('_filtermotart_cleaned_bp', ''),
    ]
    substitutions += [("_smooth%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_ts_masker%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_getsubcortts%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_combiner%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filtermotion%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filter_noise_nosmooth%d" % i, "")
                      for i in range(11)[::-1]]
    substitutions += [("_makecompcorfilter%d" % i, "")
                      for i in range(11)[::-1]]
    substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1))
                      for i in range(11)[::-1]]

    substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"),
                      ("T1_out_brain_pve_1_maths_warped",
                       "compcor_gm"), ("T1_out_brain_pve_2_maths_warped",
                                       "compcor_wm"),
                      ("output_warped_image_maths",
                       "target_brain_mask"), ("median_brain_mask",
                                              "native_brain_mask"), ("corr_",
                                                                     "")]

    regex_subs = [
        ('_combiner.*/sar', '/smooth/'),
        ('_combiner.*/ar', '/unsmooth/'),
        ('_aparc_ts.*/sar', '/smooth/'),
        ('_aparc_ts.*/ar', '/unsmooth/'),
        ('_getsubcortts.*/sar', '/smooth/'),
        ('_getsubcortts.*/ar', '/unsmooth/'),
        ('series/sar', 'series/smooth/'),
        ('series/ar', 'series/unsmooth/'),
        ('_inverse_transform./', ''),
    ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink,
               'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(registration, 'outputspec.min_cost_file', datasink,
               'resting.qa.mincost')
    wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map')
    wf.connect([(get_roi_tsnr, datasink,
                 [('avgwf_txt_file', 'resting.qa.tsnr'),
                  ('summary_file', 'resting.qa.tsnr.@summary')])])

    wf.connect(bandpass, 'out_files', datasink,
               'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'components_file', datasink,
               'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
예제 #30
0
파일: preproc.py 프로젝트: sgagnon/lyman
def create_skullstrip_workflow(name="skullstrip"):
    """Remove non-brain voxels from the timeseries."""

    # Define the workflow inputs
    inputnode = Node(
        IdentityInterface(["subject_id", "timeseries", "reg_file"]), "inputs")

    # Mean the timeseries across the fourth dimension
    origmean = MapNode(fsl.MeanImage(), "in_file", "origmean")

    # Grab the Freesurfer aparc+aseg file as an anatomical brain mask
    getaseg = Node(
        io.SelectFiles({"aseg": "{subject_id}/mri/aparc+aseg.mgz"},
                       base_directory=os.environ["SUBJECTS_DIR"]), "getaseg")

    # Threshold the aseg volume to get a boolean mask
    makemask = Node(fs.Binarize(dilate=4, min=0.5), "makemask")

    # Transform the brain mask into functional space
    transform = MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                        ["reg_file", "source_file"], "transform")

    # Convert the mask to nifti and rename
    convertmask = MapNode(fs.MRIConvert(out_file="functional_mask.nii.gz"),
                          "in_file", "convertmask")

    # Use the mask to skullstrip the timeseries
    stripts = MapNode(fs.ApplyMask(), ["in_file", "mask_file"], "stripts")

    # Use the mask to skullstrip the mean image
    stripmean = MapNode(fs.ApplyMask(), ["in_file", "mask_file"], "stripmean")

    # Generate images summarizing the skullstrip and resulting data
    reportmask = MapNode(MaskReport(), ["mask_file", "orig_file", "mean_file"],
                         "reportmask")

    # Define the workflow outputs
    outputnode = Node(
        IdentityInterface(["timeseries", "mean_file", "mask_file", "report"]),
        "outputs")

    # Define and connect the workflow
    skullstrip = Workflow(name)

    skullstrip.connect([
        (inputnode, origmean, [("timeseries", "in_file")]),
        (inputnode, getaseg, [("subject_id", "subject_id")]),
        (origmean, transform, [("out_file", "source_file")]),
        (getaseg, makemask, [("aseg", "in_file")]),
        (makemask, transform, [("binary_file", "target_file")]),
        (inputnode, transform, [("reg_file", "reg_file")]),
        (transform, stripts, [("transformed_file", "mask_file")]),
        (transform, stripmean, [("transformed_file", "mask_file")]),
        (inputnode, stripts, [("timeseries", "in_file")]),
        (origmean, stripmean, [("out_file", "in_file")]),
        (stripmean, reportmask, [("out_file", "mean_file")]),
        (origmean, reportmask, [("out_file", "orig_file")]),
        (transform, reportmask, [("transformed_file", "mask_file")]),
        (transform, convertmask, [("transformed_file", "in_file")]),
        (stripts, outputnode, [("out_file", "timeseries")]),
        (stripmean, outputnode, [("out_file", "mean_file")]),
        (convertmask, outputnode, [("out_file", "mask_file")]),
        (reportmask, outputnode, [("out_files", "report")]),
    ])

    return skullstrip
예제 #31
0
파일: preproc.py 프로젝트: sgagnon/lyman
def create_preprocessing_workflow(name="preproc", exp_info=None):
    """Return a Nipype workflow for fMRI preprocessing.

    This mostly follows the preprocessing in FSL, although some
    of the processing has been moved into pure Python.

    Parameters
    ----------
    name : string
        workflow object name
    exp_info : dict
        dictionary with experimental information

    """
    preproc = Workflow(name)

    if exp_info is None:
        exp_info = lyman.default_experiment_parameters()

    # Define the inputs for the preprocessing workflow
    in_fields = ["timeseries", "subject_id"]

    if exp_info["whole_brain_template"]:
        in_fields.append("whole_brain")

    if exp_info["fieldmap_template"]:
        in_fields.append("fieldmap")

    inputnode = Node(IdentityInterface(in_fields), "inputs")

    # Remove equilibrium frames and convert to float
    prepare = MapNode(PrepTimeseries(), "in_file", "prep_timeseries")
    prepare.inputs.frames_to_toss = exp_info["frames_to_toss"]

    # Unwarp using fieldmap images
    if exp_info["fieldmap_template"]:
        unwarp = create_unwarp_workflow(fieldmap_pe=exp_info["fieldmap_pe"])

    # Motion and slice time correct
    realign = create_realignment_workflow(
        temporal_interp=exp_info["temporal_interp"],
        TR=exp_info["TR"],
        slice_order=exp_info["slice_order"],
        interleaved=exp_info["interleaved"])

    # Estimate a registration from funtional to anatomical space
    coregister = create_bbregister_workflow(partial_brain=bool(
        exp_info["whole_brain_template"]),
                                            init_with=exp_info["coreg_init"])

    # Skullstrip the brain using the Freesurfer segmentation
    skullstrip = create_skullstrip_workflow()

    # Smooth intelligently in the volume
    susan = create_susan_smooth()
    susan.inputs.inputnode.fwhm = exp_info["smooth_fwhm"]

    # Scale and filter the timeseries
    filter_smooth = create_filtering_workflow("filter_smooth",
                                              exp_info["hpf_cutoff"],
                                              exp_info["TR"],
                                              "smoothed_timeseries")

    filter_rough = create_filtering_workflow("filter_rough",
                                             exp_info["hpf_cutoff"],
                                             exp_info["TR"],
                                             "unsmoothed_timeseries")

    # Automatically detect motion and intensity outliers
    artifacts = MapNode(ArtifactDetection(),
                        ["timeseries", "mask_file", "motion_file"],
                        "artifacts")
    artifacts.inputs.intensity_thresh = exp_info["intensity_threshold"]
    artifacts.inputs.motion_thresh = exp_info["motion_threshold"]
    artifacts.inputs.spike_thresh = exp_info["spike_threshold"]

    # Extract nuisance variables from anatomical sources
    confounds = create_confound_extraction_workflow("confounds",
                                                    exp_info["wm_components"])

    # Save the experiment info for this run
    saveparams = MapNode(SaveParameters(exp_info=exp_info), "in_file",
                         "saveparams")

    preproc.connect([
        (inputnode, prepare, [("timeseries", "in_file")]),
        (realign, artifacts, [("outputs.motion_file", "motion_file")]),
        (realign, coregister, [("outputs.timeseries", "inputs.timeseries")]),
        (inputnode, coregister, [("subject_id", "inputs.subject_id")]),
        (realign, skullstrip, [("outputs.timeseries", "inputs.timeseries")]),
        (inputnode, skullstrip, [("subject_id", "inputs.subject_id")]),
        (coregister, skullstrip, [("outputs.tkreg_mat", "inputs.reg_file")]),
        (skullstrip, artifacts, [("outputs.mask_file", "mask_file")]),
        (skullstrip, susan, [("outputs.mask_file", "inputnode.mask_file"),
                             ("outputs.timeseries", "inputnode.in_files")]),
        (susan, filter_smooth, [("outputnode.smoothed_files",
                                 "inputs.timeseries")]),
        (skullstrip, filter_smooth, [("outputs.mask_file", "inputs.mask_file")
                                     ]),
        (skullstrip, filter_rough, [("outputs.timeseries", "inputs.timeseries")
                                    ]),
        (skullstrip, filter_rough, [("outputs.mask_file", "inputs.mask_file")
                                    ]),
        (filter_rough, artifacts, [("outputs.timeseries", "timeseries")]),
        (filter_rough, confounds, [("outputs.timeseries", "inputs.timeseries")
                                   ]),
        (inputnode, confounds, [("subject_id", "inputs.subject_id")]),
        (skullstrip, confounds, [("outputs.mask_file", "inputs.brain_mask")]),
        (coregister, confounds, [("outputs.tkreg_mat", "inputs.reg_file")]),
        (inputnode, saveparams, [("timeseries", "in_file")]),
    ])

    # Optionally add a connection for unwarping
    if bool(exp_info["fieldmap_template"]):
        preproc.connect([
            (inputnode, unwarp, [("fieldmap", "inputs.fieldmap")]),
            (prepare, unwarp, [("out_file", "inputs.timeseries")]),
            (unwarp, realign, [("outputs.timeseries", "inputs.timeseries")])
        ])
    else:
        preproc.connect([
            (prepare, realign, [("out_file", "inputs.timeseries")]),
        ])

    # Optionally connect the whole brain template
    if bool(exp_info["whole_brain_template"]):
        preproc.connect([(inputnode, coregister, [
            ("whole_brain_template", "inputs.whole_brain_template")
        ])])

    # Define the outputs of the top-level workflow
    output_fields = [
        "smoothed_timeseries", "unsmoothed_timeseries", "example_func",
        "mean_func", "functional_mask", "realign_report", "mask_report",
        "artifact_report", "confound_file", "flirt_affine", "tkreg_affine",
        "coreg_report", "json_file"
    ]

    if bool(exp_info["fieldmap_template"]):
        output_fields.append("unwarp_report")

    outputnode = Node(IdentityInterface(output_fields), "outputs")

    preproc.connect([
        (realign, outputnode, [("outputs.example_func", "example_func"),
                               ("outputs.report", "realign_report")]),
        (skullstrip, outputnode, [("outputs.mask_file", "functional_mask"),
                                  ("outputs.report", "mask_report")]),
        (artifacts, outputnode, [("out_files", "artifact_report")]),
        (coregister, outputnode, [("outputs.tkreg_mat", "tkreg_affine"),
                                  ("outputs.flirt_mat", "flirt_affine"),
                                  ("outputs.report", "coreg_report")]),
        (filter_smooth, outputnode, [("outputs.timeseries",
                                      "smoothed_timeseries")]),
        (filter_rough, outputnode, [("outputs.timeseries",
                                     "unsmoothed_timeseries"),
                                    ("outputs.mean_file", "mean_func")]),
        (confounds, outputnode, [("outputs.confound_file", "confound_file")]),
        (saveparams, outputnode, [("json_file", "json_file")]),
    ])

    if bool(exp_info["fieldmap_template"]):
        preproc.connect([
            (unwarp, outputnode, [("outputs.report", "unwarp_report")]),
        ])

    return preproc, inputnode, outputnode
예제 #32
0
파일: preproc.py 프로젝트: sgagnon/lyman
def create_realignment_workflow(name="realignment",
                                temporal_interp=True,
                                TR=2,
                                slice_order="up",
                                interleaved=True):
    """Motion and slice-time correct the timeseries and summarize."""
    inputnode = Node(IdentityInterface(["timeseries"]), "inputs")

    # Get the middle volume of each run for motion correction
    extractref = MapNode(ExtractRealignmentTarget(), "in_file", "extractref")

    # Motion correct to middle volume of each run
    mcflirt = MapNode(
        fsl.MCFLIRT(cost="normcorr",
                    interpolation="spline",
                    save_mats=True,
                    save_rms=True,
                    save_plots=True), ["in_file", "ref_file"], "mcflirt")

    # Optionally emoporally interpolate to correct for slice time differences
    if temporal_interp:
        slicetime = MapNode(fsl.SliceTimer(time_repetition=TR), "in_file",
                            "slicetime")

        if slice_order == "down":
            slicetime.inputs.index_dir = True
        elif slice_order != "up":
            raise ValueError("slice_order must be 'up' or 'down'")

        if interleaved:
            slicetime.inputs.interleaved = True

    # Generate a report on the motion correction
    mcreport = MapNode(RealignmentReport(),
                       ["target_file", "realign_params", "displace_params"],
                       "mcreport")

    # Define the outputs
    outputnode = Node(
        IdentityInterface(
            ["timeseries", "example_func", "report", "motion_file"]),
        "outputs")

    # Define and connect the sub workflow
    realignment = Workflow(name)

    realignment.connect([
        (inputnode, extractref, [("timeseries", "in_file")]),
        (inputnode, mcflirt, [("timeseries", "in_file")]),
        (extractref, mcflirt, [("out_file", "ref_file")]),
        (extractref, mcreport, [("out_file", "target_file")]),
        (mcflirt, mcreport, [("par_file", "realign_params"),
                             ("rms_files", "displace_params")]),
        (extractref, outputnode, [("out_file", "example_func")]),
        (mcreport, outputnode, [("realign_report", "report"),
                                ("motion_file", "motion_file")]),
    ])

    if temporal_interp:
        realignment.connect([(mcflirt, slicetime, [("out_file", "in_file")]),
                             (slicetime, outputnode,
                              [("slice_time_corrected_file", "timeseries")])])
    else:
        realignment.connect([(mcflirt, outputnode, [("out_file", "timeseries")
                                                    ])])

    return realignment
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign")
    realign.inputs.slice_times = slice_times
    realign.inputs.tr = TR
    realign.inputs.slice_info = 2
    realign.plugin_args = {'sbatch_args': '-c%d' % 4}

    # Compute TSNR on realigned data regressing polynomials up to order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(realign, "out_file", tsnr, "in_file")

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')

    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file

    """Quantify TSNR in each freesurfer ROI
    """

    get_roi_tsnr = MapNode(fs.SegStats(default_color_table=True),
                           iterfield=['in_file'], name='get_aparc_tsnr')
    get_roi_tsnr.inputs.avgwf_txt_file = True
    wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
    wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, 'segmentation_file')

    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'NiPy'

    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([(name_unique, realign, [('out_file', 'in_file')]),
                (realign, art, [('out_file', 'realigned_files')]),
                (realign, art, [('par_file', 'realignment_parameters')]),
                ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')
    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(input_names=['motion_params', 'order',
                                        'derivatives'],
                           output_names=['out_files'],
                           function=motion_regressors,
                           imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'par_file', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
                                               'outliers', 'detrend_poly'],
                                  output_names=['out_files'],
                                  function=build_filter1,
                                  imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii.gz',
                              out_pf_name='pF_mcart.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(realign, 'out_file', filter1, 'in_file')
    wf.connect(realign, ('out_file', rename, '_filtermotart'),
               filter1, 'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(ACompCor(),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.components_file = 'noise_components.txt'
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration, ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii.gz',
                              out_pf_name='pF.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'),
               filter2, 'out_res_name')
    wf.connect(createfilter2, 'components_file', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(input_names=['files', 'lowpass_freq',
                                          'highpass_freq', 'fs'],
                             output_names=['out_files'],
                             function=bandpass_filter,
                             imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')

    """Smooth the functional data using
    :class:`nipype.interfaces.fsl.IsotropicSmooth`.
    """

    smooth = MapNode(interface=fsl.IsotropicSmooth(), name="smooth", iterfield=["in_file"])
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_file')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'out_file', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')

    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(), iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {'sbatch_args': '-c%d' % 2}

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
                          iterfield=['in_file', 'summary_file',
                                     'avgwf_txt_file'],
                          name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) + [17, 18, 26, 47] +
                                     list(range(49, 55)) + [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc',
               sampleaparc, 'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        import os
        out_names = []
        for filename in files:
            path, name, _ = split_filename(filename)
            out_names.append(os.path.join(path, name + suffix))
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'),
               sampleaparc, 'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'),
               sampleaparc, 'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
                                           'indices'],
                              output_names=['out_file'],
                              function=extract_subrois,
                              imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [('_target_subject_', ''),
                     ('_filtermotart_cleaned_bp_trans_masked', ''),
                     ('_filtermotart_cleaned_bp', ''),
                     ]
    substitutions += [("_smooth%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_ts_masker%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_getsubcortts%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_combiner%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filtermotion%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filter_noise_nosmooth%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_makecompcorfilter%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1)) for i in range(11)[::-1]]

    substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"),
                      ("T1_out_brain_pve_1_maths_warped", "compcor_gm"),
                      ("T1_out_brain_pve_2_maths_warped", "compcor_wm"),
                      ("output_warped_image_maths", "target_brain_mask"),
                      ("median_brain_mask", "native_brain_mask"),
                      ("corr_", "")]

    regex_subs = [('_combiner.*/sar', '/smooth/'),
                  ('_combiner.*/ar', '/unsmooth/'),
                  ('_aparc_ts.*/sar', '/smooth/'),
                  ('_aparc_ts.*/ar', '/unsmooth/'),
                  ('_getsubcortts.*/sar', '/smooth/'),
                  ('_getsubcortts.*/ar', '/unsmooth/'),
                  ('series/sar', 'series/smooth/'),
                  ('series/ar', 'series/unsmooth/'),
                  ('_inverse_transform./', ''),
                  ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink, 'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink, 'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(registration, 'outputspec.min_cost_file', datasink, 'resting.qa.mincost')
    wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map')
    wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'resting.qa.tsnr'),
                                          ('summary_file', 'resting.qa.tsnr.@summary')])])

    wf.connect(bandpass, 'out_files', datasink, 'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files',
               datasink, 'resting.regress.@regressors')
    wf.connect(createfilter2, 'components_file',
               datasink, 'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file',
               datasink, 'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file',
               datasink, 'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file',
               datasink, 'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file',
               datasink2, 'resting.parcellations.grayo.@surface')
    return wf
예제 #34
0
from nipype import Node, Workflow
from nipype.interfaces import fsl
from nipype.interfaces.fsl import BET
from pathlib import Path
import os

ROOT_DIR = Path(os.path.dirname(os.path.abspath(__file__))).parent

in_file = os.path.join(ROOT_DIR, 'data', '031768_t1w_deface_stx.nii.gz')
# workflow
skullstrip = Node(fsl.BET(in_file=in_file, mask=True), name="skullstrip")

smooth = Node(fsl.IsotropicSmooth(in_file=in_file, fwhm=4), name="smooth")

mask = Node(fsl.ApplyMask(), name="mask")

wf = Workflow(name="smoothflow",
              base_dir=os.path.join(ROOT_DIR, 'nipy_pipeline'))

# First the "simple", but more restricted method
wf.connect(skullstrip, "mask_file", mask, "mask_file")
# Now the more complicated method
wf.connect([(smooth, mask, [("out_file", "in_file")])])
wf.run()
예제 #35
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons"""
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1]
    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.config["execution"]["crashdump_dir"] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), "_0x*.json"))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp:
        fp.write("dummy file")
    w1.config["execution"].update(**{"stop_on_first_rerun": True})

    w1.run()
예제 #36
0
import glob
from nipype import Node, Workflow, MapNode
from nipype.algorithms.misc import Gunzip

img_files = glob.glob('/home/or/kpe_conn/fsl/maths/hippRight_ses2-1_*.nii.gz')

gunzip = MapNode(Gunzip(), name='gunzip',
                 iterfield=['in_file'])
gunzip.inputs.in_file = img_files

# use if comparing same session:
onesamplettestdes = Node(spm.OneSampleTTestDesign(),
                         name="onesampttestdes")


tTest= Workflow(name='oneSampleTtest_total')
tTest.base_dir = ('/media/Data/work/amg_coordsseedBased')

# EstimateModel - estimates the model
level2estimate = Node(spm.EstimateModel(estimation_method={'Classical': 1}),
                      name="level2estimate")

# EstimateContrast - estimates group contrast
level2conestimate = Node(spm.EstimateContrast(group_contrast=True),
                         name="level2conestimate")
cont1 = ['Group', 'T', ['mean'], [1]]
level2conestimate.inputs.contrasts = [cont1]

# Threshold - thresholds contrasts
level2thresh = Node(spm.Threshold(contrast_index=1,
                              use_topo_fdr=True,
예제 #37
0
def run(base_dir, TR):
    template = d.tpm
    matlab_cmd = '/home/cpulido/Documentos/programas/spm/spm12/run_spm12.sh /opt/mcr/v95/ script'
    spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)

    print('SPM version: ' + str(spm.SPMCommand().version))

    #base_dir = '/home/colciencias/test/base/'
    structural_dir = opj(base_dir, 'struc')
    experiment_dir = opj(base_dir, 'output/')
    output_dir = 'datasink'
    working_dir = 'workingdir'

    subject_list = ['1']

    # list of subject identifiers

    fwhm = 8  # Smoothing widths to apply (Gaussian kernel size)
    #TR = 3                          # Repetition time
    init_volume = 5  # Firts volumen identification which will use in the pipeline
    iso_size = 2  # Isometric resample of functional images to voxel size (in mm)

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=init_volume,
                              t_size=-1,
                              output_type='NIFTI'),
                   name="extract")

    # MCFLIRT - motion correction
    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="motion_correction")

    # SliceTimer - correct for slice wise acquisition
    slicetimer = Node(SliceTimer(index_dir=False,
                                 interleaved=True,
                                 output_type='NIFTI',
                                 time_repetition=TR),
                      name="slice_timing_correction")

    # Smooth - image smoothing
    smooth = Node(spm.Smooth(fwhm=fwhm), name="smooth")

    n4bias = Node(N4Bias(out_file='t1_n4bias.nii.gz'), name='n4bias')

    descomposition = Node(Descomposition(n_components=20,
                                         low_pass=0.1,
                                         high_pass=0.01,
                                         tr=TR),
                          name='descomposition')

    # Artifact Detection - determines outliers in functional images
    art = Node(ArtifactDetect(norm_threshold=2,
                              zintensity_threshold=3,
                              mask_type='spm_global',
                              parameter_source='FSL',
                              use_differences=[True, False],
                              plot_type='svg'),
               name="artifact_detection")

    extract_confounds_ws_csf = Node(
        ExtractConfounds(out_file='ev_without_gs.csv'),
        name='extract_confounds_ws_csf')

    extract_confounds_gs = Node(ExtractConfounds(out_file='ev_with_gs.csv',
                                                 delimiter=','),
                                name='extract_confounds_global_signal')

    signal_extraction = Node(SignalExtraction(
        time_series_out_file='time_series.csv',
        correlation_matrix_out_file='correlation_matrix.png',
        atlas_identifier='cort-maxprob-thr25-2mm',
        tr=TR,
        plot=True),
                             name='signal_extraction')

    art_remotion = Node(ArtifacRemotion(out_file='fmri_art_removed.nii'),
                        name='artifact_remotion')

    # BET - Skullstrip anatomical anf funtional images
    bet_t1 = Node(BET(frac=0.5, robust=True, mask=True,
                      output_type='NIFTI_GZ'),
                  name="bet_t1")

    #     bet_fmri = Node(BET(frac=0.6, functional = True, output_type='NIFTI_GZ'), name="bet_fmri")

    # FAST - Image Segmentation
    segmentation = Node(FAST(output_type='NIFTI'), name="segmentation")

    # Normalize - normalizes functional and structural images to the MNI template
    normalize_fmri = Node(Normalize12(jobtype='estwrite',
                                      tpm=template,
                                      write_voxel_sizes=[2, 2, 2],
                                      write_bounding_box=[[-90, -126, -72],
                                                          [90, 90, 108]]),
                          name="normalize_fmri")

    gunzip = Node(Gunzip(), name="gunzip")

    normalize_t1 = Node(Normalize12(
        jobtype='estwrite',
        tpm=template,
        write_voxel_sizes=[iso_size, iso_size, iso_size],
        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                        name="normalize_t1")

    normalize_masks = Node(Normalize12(
        jobtype='estwrite',
        tpm=template,
        write_voxel_sizes=[iso_size, iso_size, iso_size],
        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                           name="normalize_masks")

    # Threshold - Threshold WM probability image
    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="wm_mask_threshold")

    # FLIRT - pre-alignment of functional images to anatomical images
    coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'),
                     name="linear_warp_estimation")

    # FLIRT - coregistration of functional images to anatomical images with BBR
    coreg_bbr = Node(FLIRT(dof=6,
                           cost='bbr',
                           schedule=opj(os.getenv('FSLDIR'),
                                        'etc/flirtsch/bbr.sch'),
                           output_type='NIFTI_GZ'),
                     name="nonlinear_warp_estimation")

    # Apply coregistration warp to functional images
    applywarp = Node(FLIRT(interp='spline',
                           apply_isoxfm=iso_size,
                           output_type='NIFTI'),
                     name="registration_fmri")

    # Apply coregistration warp to mean file
    applywarp_mean = Node(FLIRT(interp='spline',
                                apply_isoxfm=iso_size,
                                output_type='NIFTI_GZ'),
                          name="registration_mean_fmri")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    anat_file = opj(structural_dir, '{subject_id}', 't1.nii')
    func_file = opj('{subject_id}', 'fmri.nii')

    templates = {'anat': anat_file, 'func': func_file}

    selectfiles = Node(SelectFiles(templates, base_directory=base_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Create a coregistration workflow
    coregwf = Workflow(name='coreg_fmri_to_t1')
    coregwf.base_dir = opj(experiment_dir, working_dir)

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the coregistration workflow
    coregwf.connect([
        (bet_t1, n4bias, [('out_file', 'in_file')]),
        (n4bias, segmentation, [('out_file', 'in_files')]),
        (segmentation, threshold, [(('partial_volume_files', get_latest),
                                    'in_file')]),
        (n4bias, coreg_pre, [('out_file', 'reference')]),
        (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
        (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
        (n4bias, applywarp, [('out_file', 'reference')]),
        (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
        (n4bias, applywarp_mean, [('out_file', 'reference')]),
    ])

    ## Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-')]
    #                 ('_fwhm_', 'fwhm-'),
    #                 ('_roi', ''),
    #                 ('_mcf', ''),
    #                 ('_st', ''),
    #                 ('_flirt', ''),
    #                 ('.nii_mean_reg', '_mean'),
    #                 ('.nii.par', '.par'),
    #                 ]
    #subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]

    #substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Connect all components of the preprocessing workflow
    preproc.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id')]),
        (selectfiles, extract, [('func', 'in_file')]),
        (extract, mcflirt, [('roi_file', 'in_file')]),
        (mcflirt, slicetimer, [('out_file', 'in_file')]),
        (selectfiles, coregwf, [('anat', 'bet_t1.in_file'),
                                ('anat', 'nonlinear_warp_estimation.reference')
                                ]),
        (mcflirt, coregwf, [('mean_img', 'linear_warp_estimation.in_file'),
                            ('mean_img', 'nonlinear_warp_estimation.in_file'),
                            ('mean_img', 'registration_mean_fmri.in_file')]),
        (slicetimer, coregwf, [('slice_time_corrected_file',
                                'registration_fmri.in_file')]),
        (coregwf, art, [('registration_fmri.out_file', 'realigned_files')]),
        (mcflirt, art, [('par_file', 'realignment_parameters')]),
        (art, art_remotion, [('outlier_files', 'outlier_files')]),
        (coregwf, art_remotion, [('registration_fmri.out_file', 'in_file')]),
        (coregwf, gunzip, [('n4bias.out_file', 'in_file')]),
        (selectfiles, normalize_fmri, [('anat', 'image_to_align')]),
        (art_remotion, normalize_fmri, [('out_file', 'apply_to_files')]),
        (selectfiles, normalize_t1, [('anat', 'image_to_align')]),
        (gunzip, normalize_t1, [('out_file', 'apply_to_files')]),
        (selectfiles, normalize_masks, [('anat', 'image_to_align')]),
        (coregwf, normalize_masks, [(('segmentation.partial_volume_files',
                                      get_wm_csf), 'apply_to_files')]),
        (normalize_fmri, smooth, [('normalized_files', 'in_files')]),
        (smooth, extract_confounds_ws_csf, [('smoothed_files', 'in_file')]),
        (normalize_masks, extract_confounds_ws_csf, [('normalized_files',
                                                      'list_mask')]),
        (mcflirt, extract_confounds_ws_csf, [('par_file', 'file_concat')]),
        (smooth, extract_confounds_gs, [('smoothed_files', 'in_file')]),
        (normalize_t1, extract_confounds_gs,
         [(('normalized_files', change_to_list), 'list_mask')]),
        (extract_confounds_ws_csf, extract_confounds_gs, [('out_file',
                                                           'file_concat')]),
        (smooth, signal_extraction, [('smoothed_files', 'in_file')]),
        (extract_confounds_gs, signal_extraction, [('out_file',
                                                    'confounds_file')]),
        #(extract_confounds_ws_csf, signal_extraction, [('out_file', 'confounds_file')]),
        (smooth, descomposition, [('smoothed_files', 'in_file')]),
        (extract_confounds_ws_csf, descomposition, [('out_file',
                                                     'confounds_file')]),
        (extract_confounds_gs, datasink,
         [('out_file', 'preprocessing.@confounds_with_gs')]),
        (extract_confounds_ws_csf, datasink,
         [('out_file', 'preprocessing.@confounds_without_gs')]),
        (smooth, datasink, [('smoothed_files', 'preprocessing.@smoothed')]),
        (normalize_fmri, datasink, [('normalized_files',
                                     'preprocessing.@fmri_normalized')]),
        (normalize_t1, datasink, [('normalized_files',
                                   'preprocessing.@t1_normalized')]),
        (normalize_masks, datasink, [('normalized_files',
                                      'preprocessing.@masks_normalized')]),
        (signal_extraction, datasink, [('time_series_out_file',
                                        'preprocessing.@time_serie')]),
        (signal_extraction, datasink, [('correlation_matrix_out_file',
                                        'preprocessing.@correlation_matrix')]),
        (descomposition, datasink, [('out_file',
                                     'preprocessing.@descomposition')]),
        (descomposition, datasink,
         [('plot_files', 'preprocessing.@descomposition_plot_files')]),
        (descomposition, datasink,
         [('time_series', 'preprocessing.@descomposition_time_series')])
    ])
    #preproc.write_graph(graph2use='colored', format='png', simple_form=True)
    preproc.run()
    os.system('rm -rf %s' % opj(base_dir, 'output/') + working_dir)
예제 #38
0
def get_nuisance_regressors_wf(outdir, timepoints, subject_id, global_signal=False, order=0, derivatives=1, comp=3):
    """Creates nipype workflow for nuisance correction composed by:
        Intercept + Drift (cosine transform) + Motion Correction + WhiteMatter&CSF Nuisance Regressors (CompCor)
    Parameters
    ----------
    outdir:
    subject_id:
    global_signal:
    timepoints:
    order:
    derivatives:
    comp:
        
    Returns
    -------
    wf_reg:
    """
    from nipype.algorithms import confounds
    from nipype import Workflow, Node
    from nipype.interfaces import utility, fsl
    import os
    
    if global_signal: 
        gb='_GB'
    else:
        gb=''
        
    wf_reg=Workflow(name=subject_id+gb,base_dir=outdir);
    
    print ("Setting INPUT node...");
    node_input = Node(utility.IdentityInterface(fields=[
           "realign_movpar_txt",        
           'rfmri_unwarped_imgs',
           'mask_wm',
           'mask_csf',
           'global_mask_img',
           'bold_img'
           ]),
            name='input_node'
    ) 
    
    #Merging wm and csf masks
    node_merge_wm_csf = Node(utility.base.Merge(2),name='Merge_wm_csf')
    
    #AcompCor
    node_ACompCor=Node(confounds.ACompCor( 
            num_components=3,
            #save_pre_filter='high_pass_filter.txt',       
            pre_filter=False,
           # high_pass_cutoff=128,
            repetition_time=0.8,
            merge_method='none',
            #use_regress_poly=False,
            #realigned_file= fMRI_BOLD_unwarped,
           # mask_files='/institut/processed_data/BBHI_func/output2/sub-41064/GetMasksInT1Space/binarize_mask/MNI152_WM_09_warp_thresh.nii.gz',
             ),
    name="AcompCor_mask")
    #node_ACompCor.inputs.save_pre_filter=os.path.join(os.path.join(os.path.join(wf_reg.base_dir,wf_reg.name),node_ACompCor.name), 'high_pass_filter.txt')  

    #cosine_filter    
    node_cosine_filter_reg=Node(utility.Function(input_names=["timepoints", "timestep","period_cut","output_dir"],
                             output_names=["cosine_filter_txt"],
                             function=cosine_filter_txt), 
                                name="cosine_filter")    
    node_cosine_filter_reg.inputs.output_dir=os.path.join(os.path.join(os.path.join(wf_reg.base_dir,wf_reg.name)),node_cosine_filter_reg.name) 
    node_cosine_filter_reg.inputs.timepoints=timepoints
    node_cosine_filter_reg.inputs.timestep=0.8
    #node_cosine_filter_reg.overwrite=True
    
    #global_signal    
#    if global_signal :
#        node_global_signal=Node(utility.Function(input_names=["timeseries_file", "label_file", "filename"],
#                                 output_names=["global_signal_txt"],
#                                 function=extract_subrois), 
#                                    name="global_signal")    
#        node_global_signal.inputs.filename=os.path.join(os.path.join(os.path.join(os.path.join(wf_reg.base_dir,wf_reg.name)),node_global_signal.name),'global_signal.txt') 
#        #node_global_signal.overwrite=True

    #motion regressors
    motion_regressors_interface = utility.Function(input_names=["realign_movpar_txt", "output_dir","order","derivatives"],
                             output_names=["motion_reg_txt"],
                             function=motion_regressors)
    node_motion_regressors=Node(motion_regressors_interface, name="motion_regressors_txt")    
    node_motion_regressors.inputs.output_dir=os.path.join(os.path.join(os.path.join(wf_reg.base_dir,wf_reg.name)),node_motion_regressors.name) 
    #node_motion_regressors.overwrite=True
    
    
    #merges all regressors     
    node_merge_txts = Node(utility.base.Merge(4),name='Merge_txt_inputs')    
    
    node_merge_regressors = Node(utility.Function(input_names=["nuisance_txts", "output_dir"],
                             output_names=["nuisance_txt"],
                             function=merge_nuisance_regressors),
    name="merge_nuisance_txt")
    node_merge_regressors.inputs.output_dir=os.path.join(os.path.join(wf_reg.base_dir,wf_reg.name),node_merge_regressors.name) 
    
    node_filter_regressor = Node(fsl.FilterRegressor(
            #design_file (-d) nuissance_txt
            filter_all=True,
            #in_file (-i) bold after SPM coregistration to T1
            #out_file
            ),
    name="filter_regressors_bold")
    
    
    node_output = Node(utility.IdentityInterface(fields=[
        'nuisance_txt', 
        'bold_nuisance_filtered'
    ]),
    name='output_node') 
    
    wf_reg.connect([ (node_input, node_merge_wm_csf, [('mask_wm','in1'),
                                                      ('mask_csf', 'in2')]),
                     (node_input, node_ACompCor,[('rfmri_unwarped_imgs', 'realigned_file')]),
                     (node_merge_wm_csf, node_ACompCor, [('out', 'mask_files')]),
                     (node_input, node_motion_regressors,[('realign_movpar_txt', 'realign_movpar_txt')]),                     
                     
                     (node_motion_regressors,node_merge_txts, [('motion_reg_txt', 'in1')]),
                     (node_ACompCor,node_merge_txts, [('components_file', 'in2')]),
                     (node_cosine_filter_reg,node_merge_txts, [('cosine_filter_txt', 'in3')]),
                     (node_merge_txts, node_merge_regressors, [('out', 'nuisance_txts')]),
                     ])   
#    if global_signal:       
#         wf_reg.connect([
#                         (node_input, node_global_signal,[('rfmri_unwarped_imgs', 'timeseries_file'),
#                                                     ('global_mask_img', 'label_file')]),    
#                        (node_global_signal, node_merge_txts, [('global_signal_txt', 'in4')])                
#                         ])
    
    wf_reg.connect([    (node_merge_regressors, node_filter_regressor, [('nuisance_txt','design_file')]),
                        (node_input, node_filter_regressor, [('bold_img','in_file')]),
                        (node_filter_regressor, node_output, [('out_file','bold_nuisance_filtered')]),
                        (node_merge_regressors, node_output,[('nuisance_txt', 'nuisance_txt')])                
                         ])
    return wf_reg
예제 #39
0
# FINDING CLUSTERS IN THE ANALYSIS RESULTS
# cluster node
cluster = Node(fsl.Cluster(in_file=imgZStat,
                           threshold=zThresh,
                           out_index_file=True,
                           out_threshold_file=True,
                           out_localmax_txt_file=True),
               name='cluster')

# data sink node
datasink = Node(DataSink(base_directory=statsDir),
                name='datasink')

# workflow connecting clustering to the datasink
clusterWF = Workflow(name="clusterWF", base_dir=outDir)
clusterWF.connect(cluster, 'index_file', datasink, 'index_file')
clusterWF.connect(cluster, 'threshold_file', datasink, 'threshold_file')
clusterWF.connect(cluster, 'localmax_txt_file', datasink, 'localmax_txt_file')
clusterWF.run()


# LOADING CLUSTER MAXIMA TABLE
fMaxTable = os.path.join(statsDir,'localmax_txt_file/zstat1_localmax.txt')
maxData = pd.read_csv(fMaxTable, sep='\t')   # reading the maxima file as a dataframe
maxData.dropna(how='all', axis=1, inplace=True)  # removing empty columns
print(maxData)


# CALCULATING CLUSTER SIZES
fClusInd = os.path.join(statsDir,'index_file/zstat1_index.nii.gz')
예제 #40
0
def init_single_ses_anat_preproc_wf(subject,
                                    session,
                                    bids_dir,
                                    smriprep_dir,
                                    out_dir,
                                    omp_nthreads=1,
                                    name='anat_preproc_wf',
                                    smriprep06=False):
    wf = Workflow(name=name)

    if smriprep06:

        def subject_info_fnc(bids_dir, smriprep_dir, subject, session):
            from pathlib import Path
            tpl_t1w = str(
                Path(
                    smriprep_dir,
                    f"sub-{subject}/anat/sub-{subject}_desc-preproc_T1w.nii.gz"
                ))
            tpl_brainmask = str(
                Path(
                    smriprep_dir,
                    f"sub-{subject}/anat/sub-{subject}_desc-brain_mask.nii.gz")
            )

            t1ws = list(
                Path(bids_dir).glob(
                    f"sub-{subject}/ses-{session}/anat/*_T1w.nii.gz"))
            assert len(
                t1ws) > 0, f"Expected at least one file, but found {t1ws}"
            t1ws.sort()

            xfms = list(
                Path(smriprep_dir).glob(
                    f"sub-{subject}/ses-{session}/anat/*_run-*_T1w_space-orig_target-T1w_affine.txt"
                ))
            xfms.sort()

            for f in t1ws:
                if not f.is_file():
                    raise FileNotFoundError(f)
            t1ws = [str(o) for o in t1ws]  # as Path is not taken everywhere
            xfms = [str(o) for o in xfms]
            return tpl_t1w, tpl_brainmask, t1ws, xfms

    else:

        def subject_info_fnc(bids_dir, smriprep_dir, subject, session):
            from pathlib import Path
            tpl_t1w = str(
                Path(smriprep_dir,
                     f"sub-{subject}/anat/sub-{subject}_T1w_preproc.nii.gz"))
            tpl_brainmask = str(
                Path(smriprep_dir,
                     f"sub-{subject}/anat/sub-{subject}_T1w_brainmask.nii.gz"))
            if not Path(tpl_t1w).is_file():
                tpl_t1w = str(
                    list(
                        Path(smriprep_dir).glob(
                            f"sub-{subject}/ses*/anat/sub-{subject}*_T1w_preproc.nii.gz"
                        ))[0])
                tpl_brainmask = str(
                    list(
                        Path(smriprep_dir).glob(
                            f"sub-{subject}/ses*/anat/sub-{subject}*_T1w_brainmask.nii.gz"
                        ))[0])
                if not Path(tpl_t1w).is_file():
                    raise FileNotFoundError(tpl_t1w)

            t1ws = list(
                Path(bids_dir).glob(
                    f"sub-{subject}/ses-{session}/anat/*_T1w.nii.gz"))
            assert len(
                t1ws) > 0, f"Expected at least one file, but found {t1ws}"
            t1ws.sort()

            xfms = list(
                Path(smriprep_dir).glob(
                    f"sub-{subject}/ses-{session}/anat/*_run-*_T1w_space-orig_target-T1w_affine.txt"
                ))
            xfms.sort()

            for f in t1ws + xfms:
                if not f.is_file():
                    raise FileNotFoundError(f)
            t1ws = [str(o) for o in t1ws]  # as Path is not taken everywhere
            xfms = [str(o) for o in xfms]
            return tpl_t1w, tpl_brainmask, t1ws, xfms

    grabber = pe.Node(niu.Function(
        input_names=["bids_dir", "smriprep_dir", "subject", "session"],
        output_names=["tpl_t1w", "tpl_brainmask", "t1ws", "xfms"],
        function=subject_info_fnc),
                      name="grabber")
    grabber.inputs.bids_dir = bids_dir
    grabber.inputs.smriprep_dir = smriprep_dir
    grabber.inputs.subject = subject
    grabber.inputs.session = session

    t1w_biascorr = pe.MapNode(ants.N4BiasFieldCorrection(
        save_bias=False, num_threads=omp_nthreads),
                              iterfield=["input_image"],
                              name="t1w_biascorr")
    wf.connect(grabber, "t1ws", t1w_biascorr, "input_image")

    t1w_tpl_space = pe.MapNode(fsl.FLIRT(dof=6),
                               iterfield=["in_file"],
                               name="t1w_tpl_space")
    wf.connect(grabber, "tpl_t1w", t1w_tpl_space, "reference")
    wf.connect(t1w_biascorr, "output_image", t1w_tpl_space, "in_file")

    merge_t1w = pe.Node(fsl.Merge(dimension="t"), name='merge_t1w')
    wf.connect(t1w_tpl_space, "out_file", merge_t1w, "in_files")

    mean_t1w = pe.Node(fsl.MeanImage(), name='mean_t1w')
    wf.connect(merge_t1w, "merged_file", mean_t1w, "in_file")

    t1w_brain_tpl_space = pe.Node(fsl.ApplyMask(), name="t1w_brain_tpl_space")
    wf.connect(mean_t1w, "out_file", t1w_brain_tpl_space, "in_file")
    wf.connect(grabber, "tpl_brainmask", t1w_brain_tpl_space, "mask_file")

    ds = init_t1w_derivatives_wf(bids_dir, out_dir)
    ds.inputs.inputnode.subject = subject
    ds.inputs.inputnode.session = session
    wf.connect(mean_t1w, "out_file", ds, "inputnode.t1w_template_space")
    wf.connect(t1w_brain_tpl_space, "out_file", ds,
               "inputnode.t1w_brain_template_space")

    return wf
예제 #41
0
def group_multregress_openfmri(dataset_dir, model_id=None, task_id=None, l1output_dir=None, out_dir=None, 
                               no_reversal=False, plugin=None, plugin_args=None, flamemodel='flame1',
                               nonparametric=False, use_spm=False):

    meta_workflow = Workflow(name='mult_regress')
    meta_workflow.base_dir = work_dir
    for task in task_id:
        task_name = get_taskname(dataset_dir, task)
        cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
        regressors_needed, contrasts, groups, subj_list = get_sub_vars(dataset_dir, task_name, model_id)
        for idx, contrast in enumerate(contrasts):
            wk = Workflow(name='model_%03d_task_%03d_contrast_%s' % (model_id, task, contrast[0][0]))

            info = Node(util.IdentityInterface(fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
                        name='infosource')
            info.inputs.model_id = model_id
            info.inputs.task_id = task
            info.inputs.dataset_dir = dataset_dir
            
            dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                                  outfields=['copes', 'varcopes']), name='grabber')
            dg.inputs.template = os.path.join(l1output_dir,
                                              'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
            if use_spm:
                dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', 'spm/',
                                                     '', 'cope_id', '']]
                dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', 'spm/',
                                                        'var', 'cope_id', '.gz']]
            else:
                dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', '', '', 
                                                     'cope_id', '.gz']]
                dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', '',
                                                        'var', 'cope_id', '.gz']]
            dg.iterables=('cope_id', cope_ids)
            dg.inputs.sort_filelist = False

            wk.connect(info, 'model_id', dg, 'model_id')
            wk.connect(info, 'task_id', dg, 'task_id')

            model = Node(MultipleRegressDesign(), name='l2model')
            model.inputs.groups = groups
            model.inputs.contrasts = contrasts[idx]
            model.inputs.regressors = regressors_needed[idx]
            
            mergecopes = Node(Merge(dimension='t'), name='merge_copes')
            wk.connect(dg, 'copes', mergecopes, 'in_files')
            
            if flamemodel != 'ols':
                mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
                wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')
            
            mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
            flame = Node(FLAMEO(), name='flameo')
            flame.inputs.mask_file =  mask_file
            flame.inputs.run_mode = flamemodel
            #flame.inputs.infer_outliers = True

            wk.connect(model, 'design_mat', flame, 'design_file')
            wk.connect(model, 'design_con', flame, 't_con_file')
            wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
            if flamemodel != 'ols':
                wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
            wk.connect(model, 'design_grp', flame, 'cov_split_file')
            
            if nonparametric:
                palm = Node(Function(input_names=['cope_file', 'design_file', 'contrast_file', 
                                                  'group_file', 'mask_file', 'cluster_threshold'],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                            name='palm')
                palm.inputs.cluster_threshold = 3.09
                palm.inputs.mask_file = mask_file
                palm.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
                wk.connect(model, 'design_mat', palm, 'design_file')
                wk.connect(model, 'design_con', palm, 'contrast_file')
                wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
                wk.connect(model, 'design_grp', palm, 'group_file')
                
            smoothest = Node(SmoothEstimate(), name='smooth_estimate')
            wk.connect(flame, 'zstats', smoothest, 'zstat_file')
            smoothest.inputs.mask_file = mask_file
        
            cluster = Node(Cluster(), name='cluster')
            wk.connect(smoothest,'dlh', cluster, 'dlh')
            wk.connect(smoothest, 'volume', cluster, 'volume')
            cluster.inputs.connectivity = 26
            cluster.inputs.threshold = 2.3
            cluster.inputs.pthreshold = 0.05
            cluster.inputs.out_threshold_file = True
            cluster.inputs.out_index_file = True
            cluster.inputs.out_localmax_txt_file = True
            
            wk.connect(flame, 'zstats', cluster, 'in_file')
    
            ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                           name='z2pval')
            wk.connect(flame, 'zstats', ztopval,'in_file')
            
            sinker = Node(DataSink(), name='sinker')
            sinker.inputs.base_directory = os.path.join(out_dir, 'task%03d' % task, contrast[0][0])
            sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                           ('_maths_', '_reversed_')]
            
            wk.connect(flame, 'zstats', sinker, 'stats')
            wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
            wk.connect(cluster, 'index_file', sinker, 'stats.@index')
            wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
            if nonparametric:
                wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')

            if not no_reversal:
                zstats_reverse = Node( BinaryMaths()  , name='zstats_reverse')
                zstats_reverse.inputs.operation = 'mul'
                zstats_reverse.inputs.operand_value = -1
                wk.connect(flame, 'zstats', zstats_reverse, 'in_file')
                
                cluster2=cluster.clone(name='cluster2')
                wk.connect(smoothest, 'dlh', cluster2, 'dlh')
                wk.connect(smoothest, 'volume', cluster2, 'volume')
                wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')
                
                ztopval2 = ztopval.clone(name='ztopval2')
                wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')
                
                wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
                wk.connect(cluster2, 'threshold_file', sinker, 'stats.@neg_thr')
                wk.connect(cluster2, 'index_file',sinker, 'stats.@neg_index')
                wk.connect(cluster2, 'localmax_txt_file', sinker, 'stats.@neg_localmax')
            meta_workflow.add_nodes([wk])
    return meta_workflow
예제 #42
0
segstatPost = Node(Function(input_names = ['aparc_stats'],
                           output_names = ['clearedFileName'],
                           function = segstat_shaping),
                  name = 'segstat_Postprocesser')


# ### Compute the FC
compFCNode = Node(Function(input_names = ['path', 'subName', 'avgwf_txt_file', 'summary_file_cleared'],
                          output_names = ['matfile_name'],
                          function = compute_functional_connectivity), 
                 name = 'compute_FC')


# ## Define the Workflow
wf = Workflow('fMRI_Processing')

# wf.connect([(inputNode, rawFinderNode, [('raw_files', 'root_paths')])])

wf.connect([(inputNode, folderMaker, [('subject_folder', 'path_name')])])


# wf.connect([(rawFinderNode, convertNode, [(('out_paths', selectFromList, 0), 'in_file')]),
wf.connect([(inputNode, convertNode, [(('raw_files', selectFromList, 0), 'in_file')]),
           (folderMaker, convertNode, [(('folder_path', fileNameBuilder, fileNames['bold_file']), 'out_file')])])

wf.connect([(convertNode, featNode, [('out_file', 'bold_file')]),
           (folderMaker, featNode, [('folder_path', 'bold_folder')]),
           (inputNode, featNode, [('brainmask', 'brainmask_file')])])

wf.connect([(featNode, exfunc2anat, [(('feat_dir', featFileNameBuilder, 'mean_func.nii.gz'), 'in_file')]),
예제 #43
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    wd = str(tmpdir)
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
예제 #44
0
        t_min=4,  # first 4 volumes are deleted
        t_size=-1),
    name="extract")

# creating motion correction node
mcflirt = Node(
    fsl.MCFLIRT(save_rms=True,
                save_plots=True),  # saving displacement parameters
    name="mcflirt")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=os.path.join(outDir, 'Results')),
                name='datasink')

# creating a workflow
moCor = Workflow(name="MoCor", base_dir=outDir)

# connecting the nodes
moCor.connect(extract, 'roi_file', mcflirt, 'in_file')

# output to datasink
moCor.connect(mcflirt, 'out_file', datasink, 'out_file')  # corrected fMRI
moCor.connect(mcflirt, 'par_file', datasink, 'par_file')  # motion parameter
moCor.connect(mcflirt, 'rms_files', datasink, 'rms_files')  # relative motion

# writing out graph
moCor.write_graph(graph2use='orig', dotfilename='graph_orig.dot')

# showing the graph
plt.figure(figsize=[4, 4])
img = mpimg.imread(os.path.join(outDir, "moCor", "graph_orig.png"))
예제 #45
0
def create_volume_mixedfx_workflow(name="volume_group",
                                   subject_list=None,
                                   regressors=None,
                                   contrasts=None,
                                   exp_info=None):

    # Handle default arguments
    if subject_list is None:
        subject_list = []
    if regressors is None:
        regressors = dict(group_mean=[])
    if contrasts is None:
        contrasts = [["group_mean", "T", ["group_mean"], [1]]]
    if exp_info is None:
        exp_info = lyman.default_experiment_parameters()

    # Define workflow inputs
    inputnode = Node(IdentityInterface(["l1_contrast",
                                        "copes",
                                        "varcopes",
                                        "dofs"]),
                     "inputnode")

    # Merge the fixed effect summary images into one 4D image
    merge = Node(MergeAcrossSubjects(), "merge")

    # Make a simple design
    design = Node(fsl.MultipleRegressDesign(regressors=regressors,
                                            contrasts=contrasts),
                  "design")

    # Fit the mixed effects model
    flameo = Node(fsl.FLAMEO(run_mode=exp_info["flame_mode"]), "flameo")

    # Estimate the smoothness of the data
    smoothest = Node(fsl.SmoothEstimate(), "smoothest")

    # Correct for multiple comparisons
    cluster = Node(fsl.Cluster(threshold=exp_info["cluster_zthresh"],
                               pthreshold=exp_info["grf_pthresh"],
                               out_threshold_file=True,
                               out_index_file=True,
                               out_localmax_txt_file=True,
                               peak_distance=exp_info["peak_distance"],
                               use_mm=True),
                   "cluster")

    # Project the mask and thresholded zstat onto the surface
    surfproj = create_surface_projection_workflow(exp_info=exp_info)

    # Segment the z stat image with a watershed algorithm
    watershed = Node(Watershed(), "watershed")

    # Make static report images in the volume
    report = Node(MFXReport(), "report")
    report.inputs.subjects = subject_list

    # Save the experiment info
    saveparams = Node(SaveParameters(exp_info=exp_info), "saveparams")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["copes",
                                         "varcopes",
                                         "mask_file",
                                         "flameo_stats",
                                         "thresh_zstat",
                                         "surf_zstat",
                                         "surf_mask",
                                         "cluster_image",
                                         "seg_file",
                                         "peak_file",
                                         "lut_file",
                                         "report",
                                         "json_file"]),
                      "outputnode")

    # Define and connect up the workflow
    group = Workflow(name)
    group.connect([
        (inputnode, merge,
            [("copes", "cope_files"),
             ("varcopes", "varcope_files"),
             ("dofs", "dof_files")]),
        (inputnode, saveparams,
            [("copes", "in_file")]),
        (merge, flameo,
            [("cope_file", "cope_file"),
             ("varcope_file", "var_cope_file"),
             ("dof_file", "dof_var_cope_file"),
             ("mask_file", "mask_file")]),
        (design, flameo,
            [("design_con", "t_con_file"),
             ("design_grp", "cov_split_file"),
             ("design_mat", "design_file")]),
        (flameo, smoothest,
            [("zstats", "zstat_file")]),
        (merge, smoothest,
            [("mask_file", "mask_file")]),
        (smoothest, cluster,
            [("dlh", "dlh"),
             ("volume", "volume")]),
        (flameo, cluster,
            [("zstats", "in_file")]),
        (cluster, watershed,
            [("threshold_file", "zstat_file"),
             ("localmax_txt_file", "localmax_file")]),
        (merge, report,
            [("mask_file", "mask_file"),
             ("cope_file", "cope_file")]),
        (flameo, report,
            [("zstats", "zstat_file")]),
        (cluster, report,
            [("threshold_file", "zstat_thresh_file"),
             ("localmax_txt_file", "localmax_file")]),
        (watershed, report,
            [("seg_file", "seg_file")]),
        (merge, surfproj,
            [("mask_file", "inputs.mask_file")]),
        (cluster, surfproj,
            [("threshold_file", "inputs.zstat_file")]),
        (merge, outputnode,
            [("cope_file", "copes"),
             ("varcope_file", "varcopes"),
             ("mask_file", "mask_file")]),
        (flameo, outputnode,
            [("stats_dir", "flameo_stats")]),
        (cluster, outputnode,
            [("threshold_file", "thresh_zstat"),
             ("index_file", "cluster_image")]),
        (watershed, outputnode,
            [("seg_file", "seg_file"),
             ("peak_file", "peak_file"),
             ("lut_file", "lut_file")]),
        (surfproj, outputnode,
            [("outputs.surf_zstat", "surf_zstat"),
             ("outputs.surf_mask", "surf_mask")]),
        (report, outputnode,
            [("out_files", "report")]),
        (saveparams, outputnode,
            [("json_file", "json_file")]),
        ])

    return group, inputnode, outputnode
예제 #46
0
파일: prep.py 프로젝트: ywangc/QUIT
def COMPOSER(verbose=False, is_bruker=False):
    inputnode = Node(IdentityInterface(fields=['in_file', 'ref_file']),
                     name='input')
    outputnode = Node(IdentityInterface(fields=['out_file']), name='output')
    wf = Workflow(name='COMPOSER')

    in_mag = Node(Complex(magnitude_out_file='in_mag.nii.gz', verbose=verbose),
                  name='in_magnitude')
    ref_mag = Node(Complex(magnitude_out_file='ref_mag.nii.gz',
                           verbose=verbose),
                   name='ref_magnitude')
    if is_bruker:
        wf.connect([(inputnode, in_mag, [('in_file', 'realimag')])])
        wf.connect([(inputnode, ref_mag, [('ref_file', 'realimag')])])
    else:
        wf.connect([(inputnode, in_mag, [('in_file', 'complex')])])
        wf.connect([(inputnode, ref_mag, [('ref_file', 'complex')])])

    in_mean = Node(maths.MeanImage(), name='in_mean')
    ref_mean = Node(maths.MeanImage(), name='ref_mean')
    wf.connect([(in_mag, in_mean, [('magnitude_out_file', 'in_file')]),
                (ref_mag, ref_mean, [('magnitude_out_file', 'in_file')])])

    register = Node(Registration(dimension=3,
                                 initial_moving_transform_com=1,
                                 transforms=['Rigid'],
                                 metric=['Mattes'],
                                 metric_weight=[1],
                                 transform_parameters=[(0.1, )],
                                 number_of_iterations=[[1000, 500, 250]],
                                 collapse_output_transforms=False,
                                 initialize_transforms_per_stage=False,
                                 radius_or_number_of_bins=[32],
                                 sampling_strategy=['Regular', None],
                                 sampling_percentage=[0.25, None],
                                 convergence_threshold=[1.e-6],
                                 smoothing_sigmas=[[4, 2, 1]],
                                 shrink_factors=[[8, 4, 2]],
                                 sigma_units=['vox'],
                                 output_warped_image=True,
                                 verbose=True),
                    name='register')
    wf.connect([(in_mean, register, [('out_file', 'moving_image')]),
                (ref_mean, register, [('out_file', 'fixed_image')])])

    if is_bruker:
        resample = Node(ApplyTransforms(dimension=3, input_image_type=3),
                        name='resample_reference')
        in_x = Node(Complex(complex_out_file='in_x.nii.gz', verbose=verbose),
                    name='in_x')
        ref_x = Node(Complex(complex_out_file='ref_x.nii.gz', verbose=verbose),
                     name='ref_x')
        cc = Node(CoilCombine(), name='cc')
        wf.connect([(inputnode, resample, [('ref_file', 'input_image')]),
                    (in_mean, resample, [('out_file', 'reference_image')]),
                    (register, resample, [('reverse_transforms', 'transforms')
                                          ]),
                    (inputnode, in_x, [('in_file', 'realimag')]),
                    (resample, ref_x, [('output_image', 'realimag')]),
                    (in_x, cc, [('complex_out_file', 'in_file')]),
                    (ref_x, cc, [('complex_out_file', 'composer_file')]),
                    (cc, outputnode, [('out_file', 'out_file')])])
    else:
        raise ('Not Yet Supported')

    return wf
예제 #47
0
파일: template.py 프로젝트: mwaskom/lyman
def define_template_workflow(info, subjects, qc=True):

    # --- Workflow parameterization

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subjects))

    # Data input
    template_input = Node(TemplateInput(data_dir=info.data_dir),
                          "template_input")

    # --- Definition of functional template space

    crop_image = Node(fs.ApplyMask(args="-bb 4"), "crop_image")

    zoom_image = Node(fs.MRIConvert(resample_type="cubic",
                                    out_type="niigz",
                                    vox_size=info.voxel_size,
                                    ),
                      "zoom_image")

    reorient_image = Node(fsl.Reorient2Std(out_file="anat.nii.gz"),
                          "reorient_image")

    generate_reg = Node(fs.Tkregister2(fsl_out="anat2func.mat",
                                       reg_file="anat2func.dat",
                                       reg_header=True),
                        "generate_reg")

    invert_reg = Node(fs.Tkregister2(reg_file="func2anat.dat",
                                     reg_header=True),
                      "invert_reg")

    # --- Identification of surface vertices

    hemi_source = Node(IdentityInterface(["hemi"]), "hemi_source",
                       iterables=("hemi", ["lh", "rh"]))

    tag_surf = Node(fs.Surface2VolTransform(surf_name="graymid",
                                            transformed_file="ribbon.nii.gz",
                                            vertexvol_file="vertices.nii.gz",
                                            mkmask=True),
                    "tag_surf")

    mask_cortex = Node(MaskWithLabel(fill_value=-1), "mask_cortex")

    combine_hemis = JoinNode(fsl.Merge(dimension="t",
                                       merged_file="surf.nii.gz"),
                             name="combine_hemis",
                             joinsource="hemi_source",
                             joinfield="in_files")

    make_ribbon = Node(MakeRibbon(), "make_ribbon")

    # --- Segementation of anatomical tissue in functional space

    transform_wmparc = Node(fs.ApplyVolTransform(inverse=True,
                                                 interp="nearest",
                                                 args="--keep-precision"),
                            "transform_wmparc")

    anat_segment = Node(AnatomicalSegmentation(), "anat_segment")

    # --- Template QC

    template_qc = Node(TemplateReport(), "template_qc")

    # --- Workflow ouptut

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    template_output = Node(DataSink(base_directory=info.proc_dir,
                                    parameterization=False),
                           "template_output")

    # === Assemble pipeline

    workflow = Workflow(name="template", base_dir=info.cache_dir)

    processing_edges = [

        (subject_source, template_input,
            [("subject", "subject")]),
        (template_input, crop_image,
            [("norm_file", "in_file"),
             ("wmparc_file", "mask_file")]),
        (crop_image, zoom_image,
            [("out_file", "in_file")]),
        (zoom_image, reorient_image,
            [("out_file", "in_file")]),

        (subject_source, generate_reg,
            [("subject", "subject_id")]),
        (template_input, generate_reg,
            [("norm_file", "moving_image")]),
        (reorient_image, generate_reg,
            [("out_file", "target_image")]),

        (subject_source, invert_reg,
            [("subject", "subject_id")]),
        (template_input, invert_reg,
            [("norm_file", "target_image")]),
        (reorient_image, invert_reg,
            [("out_file", "moving_image")]),

        (hemi_source, tag_surf,
            [("hemi", "hemi")]),
        (invert_reg, tag_surf,
            [("reg_file", "reg_file")]),
        (reorient_image, tag_surf,
            [("out_file", "template_file")]),
        (template_input, mask_cortex,
            [("label_files", "label_files")]),
        (hemi_source, mask_cortex,
            [("hemi", "hemi")]),
        (tag_surf, mask_cortex,
            [("vertexvol_file", "in_file")]),
        (mask_cortex, combine_hemis,
            [("out_file", "in_files")]),
        (combine_hemis, make_ribbon,
            [("merged_file", "in_file")]),

        (reorient_image, transform_wmparc,
            [("out_file", "source_file")]),
        (template_input, transform_wmparc,
            [("wmparc_file", "target_file")]),
        (invert_reg, transform_wmparc,
            [("reg_file", "reg_file")]),
        (reorient_image, anat_segment,
            [("out_file", "anat_file")]),
        (transform_wmparc, anat_segment,
            [("transformed_file", "wmparc_file")]),
        (combine_hemis, anat_segment,
            [("merged_file", "surf_file")]),

        (template_input, template_output,
            [("output_path", "container")]),
        (reorient_image, template_output,
            [("out_file", "@anat")]),
        (generate_reg, template_output,
            [("fsl_file", "@anat2func")]),
        (anat_segment, template_output,
            [("seg_file", "@seg"),
             ("lut_file", "@lut"),
             ("edge_file", "@edge"),
             ("mask_file", "@mask")]),
        (combine_hemis, template_output,
            [("merged_file", "@surf")]),
        (make_ribbon, template_output,
            [("out_file", "@ribon")]),

    ]
    workflow.connect(processing_edges)

    # Optionally connect QC nodes

    qc_edges = [

        (reorient_image, template_qc,
            [("out_file", "anat_file")]),
        (combine_hemis, template_qc,
            [("merged_file", "surf_file")]),
        (anat_segment, template_qc,
            [("lut_file", "lut_file"),
             ("seg_file", "seg_file"),
             ("edge_file", "edge_file"),
             ("mask_file", "mask_file")]),

        (subject_source, save_info,
            [("subject", "parameterization")]),
        (save_info, template_output,
            [("info_file", "qc.@info_json")]),

        (template_qc, template_output,
            [("seg_plot", "qc.@seg_plot"),
             ("mask_plot", "qc.@mask_plot"),
             ("edge_plot", "qc.@edge_plot"),
             ("surf_plot", "qc.@surf_plot"),
             ("anat_plot", "qc.@anat_plot")]),

    ]
    if qc:
        workflow.connect(qc_edges)

    return workflow
# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=outDir), name='datasink')

## Use the following DataSink output substitutions
substitutions = [('_subject_id_', '/sub-')]

datasink.inputs.substitutions = substitutions

###########
#
# SETTING UP THE WORKFLOW NODES
#
###########

# creating the workflow
firstLevel = Workflow(name="Level1", base_dir=outDir)

# connecting nodes
firstLevel.connect(sf, 'func', susan, 'in_file')
firstLevel.connect(sf, 'mask', applymask, 'mask_file')
firstLevel.connect(sf, 'events', taskevents, 'fileEvent')
firstLevel.connect(susan, 'smoothed_file', applymask, 'in_file')
firstLevel.connect(applymask, 'out_file', modelspec, 'functional_runs')
firstLevel.connect(taskevents, 'subject_info', modelspec, 'subject_info')
firstLevel.connect(modelspec, 'session_info', level1design, 'session_info')
firstLevel.connect(taskevents, 'contrast_list', level1design, 'contrasts')
firstLevel.connect(level1design, 'fsf_files', modelgen, 'fsf_file')
firstLevel.connect(level1design, 'ev_files', modelgen, 'ev_files')
firstLevel.connect(level1design, 'fsf_files', feat, 'fsf_file')
firstLevel.connect(feat, 'feat_dir', datasink, 'feat_dir')
firstLevel.connect(applymask, 'out_file', datasink, 'preproc_out_file')
def group_onesample_openfmri(dataset_dir,model_id=None,task_id=None,l1output_dir=None,out_dir=None, no_reversal=False):

    wk = Workflow(name='one_sample')
    wk.base_dir = os.path.abspath(work_dir)

    info = Node(util.IdentityInterface(fields=['model_id','task_id','dataset_dir']),
                                        name='infosource')
    info.inputs.model_id=model_id
    info.inputs.task_id=task_id
    info.inputs.dataset_dir=dataset_dir
    
    num_copes=contrasts_num(model_id,task_id,dataset_dir)

    dg = Node(DataGrabber(infields=['model_id','task_id','cope_id'], 
                          outfields=['copes', 'varcopes']),name='grabber')
    dg.inputs.template = os.path.join(l1output_dir,'model%03d/task%03d/*/%scopes/mni/%scope%02d.nii.gz')
    dg.inputs.template_args['copes'] = [['model_id','task_id','', '', 'cope_id']]
    dg.inputs.template_args['varcopes'] = [['model_id','task_id','var', 'var', 'cope_id']]
    dg.iterables=('cope_id',num_copes)

    dg.inputs.sort_filelist = True

    wk.connect(info,'model_id',dg,'model_id')
    wk.connect(info,'task_id',dg,'task_id')

    model = Node(L2Model(), name='l2model')

    wk.connect(dg, ('copes', get_len), model, 'num_copes')

    mergecopes = Node(Merge(dimension='t'), name='merge_copes')
    wk.connect(dg, 'copes', mergecopes, 'in_files')

    mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
    wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

    mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
    flame = Node(FLAMEO(), name='flameo')
    flame.inputs.mask_file =  mask_file
    flame.inputs.run_mode = 'flame1'

    wk.connect(model, 'design_mat', flame, 'design_file')
    wk.connect(model, 'design_con', flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
    wk.connect(model, 'design_grp', flame, 'cov_split_file')

    smoothest = Node(SmoothEstimate(), name='smooth_estimate') 
    wk.connect(flame, 'zstats', smoothest, 'zstat_file')
    smoothest.inputs.mask_file = mask_file

  
    cluster = Node(Cluster(), name='cluster')
    wk.connect(smoothest,'dlh', cluster, 'dlh')
    wk.connect(smoothest, 'volume', cluster, 'volume')
    cluster.inputs.connectivity = 26
    cluster.inputs.threshold=2.3
    cluster.inputs.pthreshold = 0.05
    cluster.inputs.out_threshold_file = True
    cluster.inputs.out_index_file = True
    cluster.inputs.out_localmax_txt_file = True

    wk.connect(flame, 'zstats', cluster, 'in_file')
	 
    ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                   name='z2pval')
    wk.connect(flame, 'zstats', ztopval,'in_file')
    
    

    sinker = Node(DataSink(), name='sinker')  
    sinker.inputs.base_directory = os.path.abspath(out_dir)
    sinker.inputs.substitutions = [('_cope_id', 'contrast'),
			            ('_maths__', '_reversed_')]
    
    wk.connect(flame, 'zstats', sinker, 'stats')
    wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
    wk.connect(cluster, 'index_file', sinker, 'stats.@index')
    wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
    
    if no_reversal == False:
        zstats_reverse = Node( BinaryMaths()  , name='zstats_reverse')
        zstats_reverse.inputs.operation = 'mul'
        zstats_reverse.inputs.operand_value= -1
        wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

        cluster2=cluster.clone(name='cluster2')
        wk.connect(smoothest,'dlh',cluster2,'dlh')
        wk.connect(smoothest,'volume',cluster2,'volume')
        wk.connect(zstats_reverse,'out_file',cluster2,'in_file')
   
        ztopval2 = ztopval.clone(name='ztopval2')
        wk.connect(zstats_reverse,'out_file',ztopval2,'in_file')

        wk.connect(zstats_reverse,'out_file',sinker,'stats.@neg')
        wk.connect(cluster2,'threshold_file',sinker,'stats.@neg_thr')
        wk.connect(cluster2,'index_file',sinker,'stats.@neg_index')
        wk.connect(cluster2,'localmax_txt_file',sinker,'stats.@neg_localmax')

    return wk
예제 #50
0
    subject_directory = os.path.dirname(tissue_classify_directory)
    hncma_atlas = os.path.join(subject_directory, "WarpedAtlas2Subject",
                               "hncma_atlas.nii.gz")
    direction_files = dict()
    for name in ["rho", "phi", "theta"]:
        direction_files[name] = os.path.join(subject_directory,
                                             "WarpedAtlas2Subject",
                                             "{0}.nii.gz".format(name))

    lh_white_surface_file = os.path.join(subject_directory, "FreeSurfer",
                                         "surf", "lh.white")
    rh_white_surface_file = os.path.join(subject_directory, "FreeSurfer",
                                         "surf", "rh.white")

    logb_wf = create_logismosb_machine_learning_workflow()
    wf = Workflow("MachineLearning_Baseline_{0}".format(session_id))
    datasink = Node(DataSink(), name="DataSink")
    datasink.inputs.base_directory = os.path.join(results_dir, session_id)
    for hemisphere in ("lh", "rh"):
        for matter in ("gm", "wm"):
            wf.connect(
                logb_wf,
                "output_spec.{0}_{1}surface_file".format(hemisphere, matter),
                datasink, "EdgePrediction.@{0}_{1}".format(hemisphere, matter))

    logb_wf.inputs.input_spec.t1_file = t1_file
    logb_wf.inputs.input_spec.orig_t1 = t1_file
    logb_wf.inputs.input_spec.t2_file = t2_file
    logb_wf.inputs.input_spec.posteriors = posterior_files
    logb_wf.inputs.input_spec.hncma_file = hncma_atlas
    logb_wf.inputs.input_spec.abc_file = abc_file
예제 #51
0
def init_t1w_derivatives_wf(bids_root, output_dir, name='t1w_derivatives_wf'):
    """Set up a battery of datasinks to store derivatives in the right location."""
    base_directory = str(output_dir.parent)
    out_path_base = str(output_dir.name)

    wf = Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject', 'session', 't1w_template_space', 't1w_brain_template_space'
    ]),
                        name='inputnode')

    def generic_bids_file_fct(bids_root, subject, session):
        from pathlib import Path
        return Path(
            bids_root
        ) / f"sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_T1w.nii.gz"

    generic_bids_file = pe.Node(niu.Function(
        input_names=["bids_root", "subject", "session"],
        output_names=["out_file"],
        function=generic_bids_file_fct),
                                name='generic_bids_file')
    generic_bids_file.inputs.bids_root = bids_root
    wf.connect(inputnode, "subject", generic_bids_file, "subject")
    wf.connect(inputnode, "session", generic_bids_file, "session")

    ds_t1w_preproc = pe.Node(DerivativesDataSink(base_directory=base_directory,
                                                 out_path_base=out_path_base,
                                                 keep_dtype=True,
                                                 compress=True,
                                                 space="tpl"),
                             name='ds_t1w_preproc',
                             run_without_submitting=True)
    wf.connect(generic_bids_file, "out_file", ds_t1w_preproc, "source_file")
    wf.connect(inputnode, "t1w_template_space", ds_t1w_preproc, "in_file")

    ds_t1w_brain = pe.Node(DerivativesDataSink(base_directory=base_directory,
                                               out_path_base=out_path_base,
                                               keep_dtype=True,
                                               compress=True,
                                               space="tpl",
                                               desc="brain"),
                           name='ds_t1w_brain',
                           run_without_submitting=True)
    wf.connect(generic_bids_file, "out_file", ds_t1w_brain, "source_file")
    wf.connect(inputnode, "t1w_brain_template_space", ds_t1w_brain, "in_file")

    return wf
def create_distortion_correct():
    # fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # initiate workflow
    distor_correct = Workflow(name='distor_correct')
    # input node
    inputnode = Node(util.IdentityInterface(
        fields=['dwi', 'dwi_ap', 'dwi_pa', 'bvals', 'bvecs']),
                     name='inputnode')
    # output node
    outputnode = Node(util.IdentityInterface(fields=[
        'bo_brain', "bo_brainmask", 'noise', 'dwi_denoised', "dwi_unringed",
        "dwi_appa", "topup_bo", "topup_corr", "topup_field", "topup_fieldcoef",
        "eddy_corr", "rotated_bvecs", "total_movement_rms", "outlier_report",
        "cnr_maps", "residuals", "shell_params", "eddy_params"
    ]),
                      name='outputnode')

    # to define the path in the current directory
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    ''
    # noise reduction on all images
    ''
    denoise = Node(DWIdenoise(noise='noise.nii.gz'), name="denoise")

    ''
    # artefact removal
    ''
    # run unring: remove the ringing artefacts
    unring = Node(MRdegibbs(), name="unring")

    ''
    # topup and eddy
    ''
    # merge AP PA files together

    b0_comb = Node(util.Merge(2), name='b0_comb')
    merger = Node(fsl.Merge(), name='merger')
    merger.inputs.dimension = 't'
    merger.inputs.merged_file = 'dwi_appa.nii.gz'
    distor_correct.connect([(inputnode, b0_comb, [('dwi_ap', 'in1')]),
                            (inputnode, b0_comb, [('dwi_pa', 'in2')]),
                            (b0_comb, merger, [('out', 'in_files')])])

    # topup
    config = os.path.join(__location__, 'b02b0.cnf')
    acqparams = os.path.join(__location__, 'acqparams_dwi.txt')
    topup = Node(fsl.TOPUP(), name='topup')
    topup.inputs.config = config  #use optimised parameters
    topup.inputs.encoding_file = acqparams
    # topup.inputs.out_base = 'diff_topup'

    # skullstrip process using bet
    # mean of all b0 unwarped images
    maths = Node(fsl.ImageMaths(op_string='-Tmean'), name="maths")

    # create a brain mask from the b0 unwarped
    bet = Node(interface=fsl.BET(), name='bet')
    bet.inputs.mask = True
    bet.inputs.frac = 0.2
    bet.inputs.robust = True

    # eddy motion correction
    indx = os.path.join(__location__, 'index.txt')
    eddy = Node(fsl.epi.Eddy(), name="eddy")
    eddy.inputs.num_threads = 8  ## total number of CPUs to use
    #eddy.inputs.args = '--cnr_maps --residuals'
    eddy.inputs.repol = True
    eddy.inputs.in_acqp = acqparams
    eddy.inputs.in_index = indx
    eddy.inputs.cnr_maps = True
    eddy.inputs.residuals = True

    ''
    # connect the nodes
    ''
    distor_correct.connect([
        (merger, topup, [("merged_file", "in_file")]),
        (topup, outputnode, [('out_corrected', 'topup_bo')]),
        (topup, outputnode, [('out_fieldcoef', 'topup_fieldcoef')]),
        (topup, outputnode, [('out_field', 'topup_field')]),
        (topup, maths, [('out_corrected', 'in_file')]),
        (maths, outputnode, [('out_file', 'dwi_appa')]),
        (maths, bet, [("out_file", "in_file")]),
        (bet, outputnode, [("mask_file", "bo_brainmask")]),
        (bet, outputnode, [("out_file", "bo_brain")]),
        (bet, eddy, [("mask_file", "in_mask")]),
        (inputnode, eddy, [("bvecs", "in_bvec")]),
        (inputnode, eddy, [("bvals", "in_bval")]),
        (topup, eddy, [("out_fieldcoef", "in_topup_fieldcoef")]),
        (topup, eddy, [("out_movpar", "in_topup_movpar")]),
        (inputnode, denoise, [('dwi', 'in_file')]),
        (denoise, outputnode, [('out_file', 'dwi_denoised')]),
        (denoise, unring, [('out_file', 'in_file')]),
        (unring, outputnode, [('out_file', 'dwi_unringed')]),
        (unring, eddy, [("out_file", "in_file")]),
        (eddy, outputnode, [("out_corrected", "eddy_corr")]),
        (eddy, outputnode, [("out_parameter", "eddy_params")]),
        (eddy, outputnode, [("out_rotated_bvecs", "rotated_bvecs")]),
        (eddy, outputnode, [("out_movement_rms", "total_movement_rms")]),
        (eddy, outputnode, [("out_shell_alignment_parameters", "shell_params")
                            ]),
        (eddy, outputnode, [("out_outlier_report", "outlier_report")]),
        (eddy, outputnode, [("out_cnr_maps", "cnr_maps")]),
        (eddy, outputnode, [("out_residuals", "residuals")])
    ])

    return distor_correct
예제 #53
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
예제 #54
0
파일: model.py 프로젝트: fagan2888/lyman
def define_model_results_workflow(info, subjects, qc=True):

    # TODO I am copying a lot from above ...

    # --- Workflow parameterization and data input

    # We just need two levels of iterables here: one subject-level and
    # one "flat" run-level iterable (i.e. all runs collapsing over
    # sessions). Unlike in the model fit workflow, we always want to process
    # all sessions.

    scan_info = info.scan_info
    experiment = info.experiment_name
    model = info.model_name

    iterables = generate_iterables(scan_info, experiment, subjects)
    subject_iterables, run_iterables = iterables

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subject_iterables))

    run_source = Node(IdentityInterface(["subject", "run"]),
                      name="run_source",
                      itersource=("subject_source", "subject"),
                      iterables=("run", run_iterables))

    data_input = Node(ModelResultsInput(experiment=experiment,
                                        model=model,
                                        proc_dir=info.proc_dir),
                      "data_input")

    # --- Run-level contrast estimation

    estimate_contrasts = Node(EstimateContrasts(info=info.trait_get()),
                              "estimate_contrasts")

    # --- Subject-level contrast estimation

    model_results = JoinNode(ModelResults(info=info.trait_get()),
                             name="model_results",
                             joinsource="run_source",
                             joinfield=["contrast_files",
                                        "variance_files",
                                        "name_files"])

    # --- Data output

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    run_output = Node(DataSink(base_directory=info.proc_dir,
                               parameterization=False),
                      "run_output")

    results_path = Node(ModelResultsPath(proc_dir=info.proc_dir,
                                         experiment=experiment,
                                         model=model),
                        "results_path")

    subject_output = Node(DataSink(base_directory=info.proc_dir,
                                   parameterization=False),
                          "subject_output")

    # === Assemble pipeline

    cache_base = op.join(info.cache_dir, experiment)
    workflow = Workflow(name="model_results", base_dir=cache_base)

    # Connect processing nodes

    processing_edges = [

        (subject_source, run_source,
            [("subject", "subject")]),
        (subject_source, data_input,
            [("subject", "subject")]),
        (run_source, data_input,
            [("run", "run_tuple")]),

        (data_input, estimate_contrasts,
            [("mask_file", "mask_file"),
             ("beta_file", "beta_file"),
             ("error_file", "error_file"),
             ("ols_file", "ols_file"),
             ("model_file", "model_file")]),

        (subject_source, model_results,
            [("subject", "subject")]),
        (data_input, model_results,
            [("anat_file", "anat_file")]),
        (estimate_contrasts, model_results,
            [("contrast_file", "contrast_files"),
             ("variance_file", "variance_files"),
             ("name_file", "name_files")]),

        (run_source, save_info,
            [("run", "parameterization")]),
        (save_info, run_output,
            [("info_file", "qc.@info_json")]),

        (data_input, run_output,
            [("output_path", "container")]),
        (estimate_contrasts, run_output,
            [("contrast_file", "@contrast"),
             ("variance_file", "@variance"),
             ("tstat_file", "@tstat"),
             ("name_file", "@names")]),

        (subject_source, results_path,
            [("subject", "subject")]),
        (results_path, subject_output,
            [("output_path", "container")]),
        (model_results, subject_output,
            [("result_directories", "@results")]),

    ]
    workflow.connect(processing_edges)

    return workflow
예제 #55
0
cleanFaNode = mrmultNode.clone('multiplyFA_Mask')

thresholdFANode = Node(mrtrix.Threshold(), name = 'threshold_FA')
thresholdFANode.inputs.absolute_threshold_value = absolute_threshold_value

# Response function coefficient
estResponseNode = Node(mrtrix.EstimateResponseForSH(), name = 'estimate_deconv_response')

# CSD computation
csdNode = Node(mrtrix.ConstrainedSphericalDeconvolution(), name = 'compute_CSD')


# ### Connect the Nodes in the workflow

wf = Workflow(name = 'MRTrix_preproc')

wf.connect([
        (inputNode, fsl2mrtrixNode, [('bval_file', 'bval_file'),
                                        ('bvec_file', 'bvec_file'),
                                        (('tracking_dir', fileNameBuilder, fileNames['gradFile']), 'out_encoding_file')]),
        (inputNode, dwi2tensorNode, [('dwi_file', 'in_file'),
                                    (('tracking_dir', fileNameBuilder, fileNames['dtFile']), 'out_filename')]),
        (fsl2mrtrixNode, dwi2tensorNode, [('encoding_file', 'encoding_file')]),
        (dwi2tensorNode, tensor2faNode, [('tensor', 'in_file')]),
        (inputNode, tensor2faNode, [(('tracking_dir', fileNameBuilder, fileNames['faFile']), 'out_filename')]),
        (tensor2faNode, mrmultNode, [('FA', 'in1')]),
        (inputNode, mrmultNode, [('wmmask', 'in2')]),
        (inputNode, mrmultNode, [(('tracking_dir', fileNameBuilder, fileNames['faFile']), 'out_file')]),
        (dwi2tensorNode, tensor2vectorNode, [('tensor', 'in_file')]),
        (inputNode, tensor2vectorNode, [(('tracking_dir', fileNameBuilder, fileNames['evFile']), 'out_filename')]),
예제 #56
0
파일: RunFS.py 프로젝트: sjh26/BRAINSTools
from nipype.interfaces.io import FreeSurferSource

connection = sqlite3.connect(
    "/Shared/johnsonhj/HDNI/20151001_AtrophySimulation/results.db")
cursor = connection.cursor()

num_threads = 12

base_dir = "/Shared/sinapse/CACHE/20161010_AtrophySimulation_Baseline_CACHE"
for row in cursor.execute(
        "SELECT t1_image_file, t2_image_file, session_id FROM input"):
    session_id = str(row[2])
    t1_file = str(row[0])
    t2_file = str(row[1])

    wf = Workflow(name="FreeSurfer_{0}".format(session_id))

    subject_directory = os.path.dirname(os.path.dirname(t1_file))

    recon_all = Node(ReconAll(), "ReconAll")
    recon_all.inputs.T1_files = [t1_file]
    recon_all.inputs.T2_file = t2_file
    recon_all.inputs.openmp = num_threads
    recon_all.inputs.subject_id = "FreeSurfer"
    recon_all.inputs.flags = "-no-isrunning"
    recon_all.inputs.subjects_dir = os.path.join(
        "/Shared/sinapse/CACHE/20161010_AtrophySimulation_Baseline",
        session_id)
    recon_all.plugin_args = plugin_args = {
        "qsub_args": "-q HJ,UI,all.q,COE -pe smp {0}".format(num_threads),
        "overwrite": True,
def create_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------

        name : name of workflow (default: 'registration')

    Inputs::

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.anatomical_image : anatomical image to coregister to
        inputspec.target_image : registration target

    Outputs::

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = Workflow(name=name)

    inputnode = Node(interface=IdentityInterface(fields=['source_files',
                                                         'mean_image',
                                                         'subject_id',
                                                         'subjects_dir',
                                                         'target_image']),
                     name='inputspec')

    outputnode = Node(interface=IdentityInterface(fields=['func2anat_transform',
                                                          'out_reg_file',
                                                          'anat2target_transform',
                                                          'transforms',
                                                          'transformed_mean',
                                                          'segmentation_files',
                                                          'anat2target',
                                                          'aparc',
                                                          'min_cost_file'
                                                          ]),
                      name='outputspec')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(),
                    name='fssource')
    fssource.run_without_submitting = True
    register.connect(inputnode, 'subject_id', fssource, 'subject_id')
    register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')

    convert = Node(freesurfer.MRIConvert(out_type='nii'),
                   name="convert")
    register.connect(fssource, 'T1', convert, 'in_file')

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(),
                      name='bbregister')
    bbregister.inputs.init = 'fsl'
    bbregister.inputs.contrast_type = 't2'
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
    register.connect(inputnode, 'mean_image', bbregister, 'source_file')
    register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')

    """
    Estimate the tissue classes from the anatomical image. But use aparc+aseg's brain mask
    """

    binarize = Node(fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), name="binarize_aparc")
    register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, "in_file")
    stripper = Node(fsl.ApplyMask(), name='stripper')
    register.connect(binarize, "binary_file", stripper, "mask_file")
    register.connect(convert, 'out_file', stripper, 'in_file')

    fast = Node(fsl.FAST(), name='fast')
    register.connect(stripper, 'out_file', fast, 'in_files')

    """
    Binarize the segmentation
    """

    binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
                       iterfield=['in_file'],
                       name='binarize')
    register.connect(fast, 'partial_volume_files', binarize, 'in_file')

    """
    Apply inverse transform to take segmentations to functional space
    """

    applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
                                                    interp='nearest'),
                       iterfield=['target_file'],
                       name='inverse_transform')
    register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
    register.connect(binarize, 'out_file', applyxfm, 'target_file')
    register.connect(inputnode, 'mean_image', applyxfm, 'source_file')

    """
    Apply inverse transform to aparc file
    """

    aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                 interp='nearest'),
                    name='aparc_inverse_transform')
    register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
    register.connect(fssource, ('aparc_aseg', get_aparc_aseg),
                     aparcxfm, 'target_file')
    register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')

    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')

    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.float = True
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {'sbatch_args': '-c%d' % 4}
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image', reg, 'fixed_image')

    """
    Concatenate the affine and ants transforms into a list
    """

    merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')

    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 3
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.inputs.terminal_output = 'file'
    warpmean.inputs.args = '--float'
    warpmean.inputs.num_threads = 4
    warpmean.plugin_args = {'sbatch_args': '-c%d' % 4}

    register.connect(inputnode, 'target_image', warpmean, 'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')

    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(applyxfm, 'transformed_file',
                     outputnode, 'segmentation_files')
    register.connect(aparcxfm, 'transformed_file',
                     outputnode, 'aparc')
    register.connect(bbregister, 'out_fsl_file',
                     outputnode, 'func2anat_transform')
    register.connect(bbregister, 'out_reg_file',
                     outputnode, 'out_reg_file')
    register.connect(reg, 'composite_transform',
                     outputnode, 'anat2target_transform')
    register.connect(merge, 'out', outputnode, 'transforms')
    register.connect(bbregister, 'min_cost_file',
                     outputnode, 'min_cost_file')

    return register
예제 #58
0
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
예제 #59
0
        '{contrast_id}.nii')
}

selectfiles = MapNode(SelectFiles(
    templates,
    base_directory='/home/rj299/scratch60/mdm_analysis/work/',
    sort_filelist=True),
                      name="selectfiles",
                      iterfield=['subject_id'])

datasink = Node(nio.DataSink(
    base_directory=
    '/home/rj299/scratch60/mdm_analysis/output/imaging/Sink_resp_sv/'),
                name="datasink")

l2analysis = Workflow(name='l2spm_sv_glm_heightp05')

l2analysis.base_dir = '/home/rj299/scratch60/mdm_analysis/work/'

l2analysis.connect([
    (infosource, selectfiles, [('contrast_id', 'contrast_id'),
                               ('subject_id', 'subject_id')]),
    (selectfiles, onesamplettestdes, [('cons', 'in_files')]),
    (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                         ('beta_images', 'beta_images'),
                                         ('residual_image', 'residual_image')
                                         ]),
    (level2conestimate, level2thresh, [
        ('spm_mat_file', 'spm_mat_file'),
        ('spmT_images', 'stat_image'),
예제 #60
0
파일: model.py 프로젝트: fagan2888/lyman
def define_model_fit_workflow(info, subjects, sessions, qc=True):

    # --- Workflow parameterization and data input

    # We just need two levels of iterables here: one subject-level and
    # one "flat" run-level iterable (i.e. all runs collapsing over
    # sessions). But we want to be able to specify sessions to process.

    scan_info = info.scan_info
    experiment = info.experiment_name
    model = info.model_name

    iterables = generate_iterables(scan_info, experiment, subjects, sessions)
    subject_iterables, run_iterables = iterables

    subject_source = Node(IdentityInterface(["subject"]),
                          name="subject_source",
                          iterables=("subject", subject_iterables))

    run_source = Node(IdentityInterface(["subject", "run"]),
                      name="run_source",
                      itersource=("subject_source", "subject"),
                      iterables=("run", run_iterables))

    data_input = Node(ModelFitInput(experiment=experiment,
                                    model=model,
                                    proc_dir=info.proc_dir),
                      "data_input")

    # --- Data filtering and model fitting

    fit_model = Node(ModelFit(data_dir=info.data_dir,
                              info=info.trait_get()),
                     "fit_model")

    # --- Data output

    save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")

    data_output = Node(DataSink(base_directory=info.proc_dir,
                                parameterization=False),
                       "data_output")

    # === Assemble pipeline

    cache_base = op.join(info.cache_dir, experiment)
    workflow = Workflow(name="model_fit", base_dir=cache_base)

    # Connect processing nodes

    processing_edges = [

        (subject_source, run_source,
            [("subject", "subject")]),
        (subject_source, data_input,
            [("subject", "subject")]),
        (run_source, data_input,
            [("run", "run_tuple")]),

        (data_input, fit_model,
            [("subject", "subject"),
             ("session", "session"),
             ("run", "run"),
             ("seg_file", "seg_file"),
             ("surf_file", "surf_file"),
             ("edge_file", "edge_file"),
             ("mask_file", "mask_file"),
             ("ts_file", "ts_file"),
             ("noise_file", "noise_file"),
             ("mc_file", "mc_file")]),

        (data_input, data_output,
            [("output_path", "container")]),
        (fit_model, data_output,
            [("mask_file", "@mask"),
             ("beta_file", "@beta"),
             ("error_file", "@error"),
             ("ols_file", "@ols"),
             ("resid_file", "@resid"),
             ("model_file", "@model")]),

    ]
    workflow.connect(processing_edges)

    qc_edges = [

        (run_source, save_info,
            [("run", "parameterization")]),
        (save_info, data_output,
            [("info_file", "qc.@info_json")]),

        (fit_model, data_output,
            [("model_plot", "qc.@model_plot"),
             ("nuisance_plot", "qc.@nuisance_plot"),
             ("resid_plot", "qc.@resid_plot"),
             ("error_plot", "qc.@error_plot")]),

    ]
    if qc:
        workflow.connect(qc_edges)

    return workflow