Пример #1
0
def init(name, language):
    path, full_name, organization, workflow = normalize_path(name)

    click.echo(f'Initializing workflow: {path.name} in {path.parent}')
    path.mkdir(parents=True, exist_ok=True)
    sp.run(['git', '-C', str(path), 'init'], check=True)

    try:
        git_vars = git_variables(path, 'user.name', 'user.email')
    except KeyError:
        username = click.prompt("Enter package author name")
        email = click.prompt("Enter package author email")
    else:
        username = git_vars['user.name']
        email = git_vars['user.email']

    mapping = {
        'USERNAME': username,
        'USEREMAIL': email,
        'ORGANIZATION': organization,
        'WORKFLOW': workflow,
        'FULLNAME': full_name,
    }

    copytree(pkgr_fn('niflow_manager', 'data/templates/base'),
             path,
             mapping=mapping)

    if language is not None:
        language_dir = Path(
            pkgr_fn('niflow_manager', f'data/templates/{language}'))
        try:
            copytree(language_dir, path, mapping=mapping)
        except FileNotFoundError:
            raise ValueError(f"Unknown language: {language}")
Пример #2
0
def init(name, language, bids_version):
    path, full_name, organization, workflow = normalize_path(name)

    click.echo(f"Initializing workflow: {path.name} in {path.parent}")
    path.mkdir(parents=True, exist_ok=True)
    sp.run(["git", "-C", str(path), "init"], check=True)

    try:
        git_vars = git_variables(path, "user.name", "user.email")
    except KeyError:
        username = click.prompt("Enter package author name")
        email = click.prompt("Enter package author email")
    else:
        username = git_vars["user.name"]
        email = git_vars["user.email"]

    mapping = {
        "USERNAME": username,
        "USEREMAIL": email,
        "ORGANIZATION": organization,
        "WORKFLOW": workflow,
        "FULLNAME": full_name,
    }

    copytree(pkgr_fn("niflow_manager", "data/templates/base"), path, mapping=mapping)

    if language is not None:
        language_dir = Path(pkgr_fn("niflow_manager", f"data/templates/{language}"))
        try:
            copytree(language_dir, path, mapping=mapping)
        except FileNotFoundError:
            raise ValueError(f"Unknown language: {language}")

    if bids_version is not None:
        if language is None:
            raise ValueError(
                "BIDS App templates are language-specific; please specify --language"
            )
        bids_app_dir = Path(
            pkgr_fn(
                "niflow_manager", f"data/templates/{language}-bidsapp-{bids_version}"
            )
        )
        try:
            copytree(bids_app_dir, path, policy=CopyPolicy.OVERWRITE, mapping=mapping)
        except FileNotFoundError:
            raise ValueError(f"No BIDS App template for language: {language}")
Пример #3
0
    def _run_interface(self, runtime):

        # Gather inputs for TOPUP
        topup_prefix = op.join(runtime.cwd, "topup_")
        topup_datain_file, topup_imain_file, topup_text, b0_csv, topup0, eddy0 = \
            get_best_b0_topup_inputs_from(
                dwi_file=self.inputs.dwi_file,
                bval_file=self.inputs.bval_file,
                b0_threshold=self.inputs.b0_threshold,
                cwd=runtime.cwd,
                bids_origin_files=self.inputs.original_files,
                epi_fmaps=self.inputs.epi_fmaps,
                max_per_spec=self.inputs.topup_max_b0s_per_spec,
                topup_requested=self.inputs.topup_requested)
        self._results['topup_datain'] = topup_datain_file
        self._results['topup_imain'] = topup_imain_file
        self._results['topup_report'] = topup_text
        self._results['b0_csv'] = b0_csv
        self._results['topup_first'] = topup0
        self._results['eddy_first'] = eddy0

        # If there are an odd number of slices, use b02b0_1.cnf
        example_b0 = nb.load(self.inputs.dwi_file)
        self._results['topup_config'] = 'b02b0.cnf'
        if 1 in (example_b0.shape[0] % 2, example_b0.shape[1] % 2,
                 example_b0.shape[2] % 2):
            LOGGER.warning(
                "Using slower b02b0_1.cnf because an axis has an odd number of slices"
            )
            self._results['topup_config'] = pkgr_fn('qsiprep.data',
                                                    'b02b0_1.cnf')

        # For the apply topup report:
        pre_topup_image = index_img(topup_imain_file, 0)
        pre_topup_image_file = topup_prefix + "pre_image.nii.gz"
        pre_topup_image.to_filename(pre_topup_image_file)
        self._results['pre_topup_image'] = pre_topup_image_file

        # Gather inputs for eddy
        eddy_prefix = op.join(runtime.cwd, "eddy_")
        acqp_file, index_file = eddy_inputs_from_dwi_files(
            self.inputs.original_files, eddy_prefix)
        self._results['eddy_acqp'] = acqp_file
        self._results['eddy_indices'] = index_file

        # these have already had HMC, SDC applied
        self._results['forward_transforms'] = []
        self._results['forward_warps'] = []
        return runtime
Пример #4
0
    def _run_interface(self, runtime):
        rpe_b0 = self.inputs.rpe_b0
        original_files = self.inputs.original_files
        b0_indices = self.inputs.b0_indices
        dwi_files = self.inputs.dwi_files

        # Should we get metadata from the rpe_b0?
        rpe_files = []
        if isdefined(rpe_b0):
            rpe_files = [rpe_b0]

        # Gather inputs for TOPUP
        topup_prefix = op.join(runtime.cwd, "topup_")
        # get original files for their bids metadata
        b0_source_images = [original_files[idx]
                            for idx in b0_indices] + rpe_files
        # this will be all the original b=0 files and the b=0 files from the rpe series
        b0_images = self.inputs.b0_images + rpe_files
        topup_datain_file, topup_imain_file = topup_inputs_from_dwi_files(
            b0_source_images, b0_images, topup_prefix, runtime.cwd)
        self._results['topup_datain'] = topup_datain_file
        self._results['topup_imain'] = topup_imain_file

        # If there are an odd number of slices, use b02b0_1.cnf
        example_b0 = nb.load(b0_images[0])
        self._results['topup_config'] = 'b02b0.cnf'
        if 1 in (example_b0.shape[0] % 2, example_b0.shape[1] % 2,
                 example_b0.shape[2] % 2):
            LOGGER.warning(
                "Using slower b02b0_1.cnf because an axis has an odd number of slices"
            )
            self._results['topup_config'] = pkgr_fn('qsiprep.data',
                                                    'b02b0_1.cnf')

        # For the apply topup report:
        eddy_prefix = op.join(runtime.cwd, "eddy_")
        self._results['pre_topup_image'] = b0_images[0]

        # Gather inputs for eddy
        acqp_file, index_file = eddy_inputs_from_dwi_files(
            original_files, dwi_files, eddy_prefix)
        self._results['eddy_acqp'] = acqp_file
        self._results['eddy_indices'] = index_file

        # these have already had HMC, SDC applied
        self._results['forward_transforms'] = []
        self._results['forward_warps'] = []
        return runtime
Пример #5
0
def test_applytfms(tmpdir, ext, copy_dtype, in_dtype):
    import numpy as np
    import nibabel as nb
    from pkg_resources import resource_filename as pkgr_fn

    in_file = str(tmpdir / ("src" + ext))
    nii = nb.Nifti1Image(np.zeros((5, 5, 5), dtype=np.float32), np.eye(4))
    nii.set_data_dtype(in_dtype)
    nii.to_filename(in_file)

    in_xform = pkgr_fn("niworkflows", "data/itkIdentityTransform.txt")

    ifargs = {"copy_dtype": copy_dtype, "reference_image": in_file}
    args = (in_file, in_xform, ifargs, 0, str(tmpdir))
    out_file, cmdline = _applytfms(args)

    assert out_file == str(tmpdir / ("src_xform-%05d%s" % (0, ext)))

    out_nii = nb.load(out_file)
    assert np.allclose(nii.affine, out_nii.affine)
    assert np.allclose(nii.get_fdata(), out_nii.get_fdata())
    if copy_dtype:
        assert nii.get_data_dtype() == out_nii.get_data_dtype()
Пример #6
0
def init_anat_seg_wf(
    age_months=None,
    anat_modality="T1w",
    template_dir=None,
    sloppy=False,
    omp_nthreads=1,
    name="anat_seg_wf",
):
    """
    Calculate brain tissue segmentations from either:
     A) a collection of manually segmented templates
     B) FSL's FAST
    """

    if anat_modality != "T1w":
        raise NotImplementedError(
            "Only T1w images are currently accepted for the segmentation workflow."
        )

    wf = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=["anat_brain"]),
                        name="inputnode")
    outputnode = pe.Node(
        niu.IdentityInterface(fields=["anat_aseg", "anat_dseg", "anat_tpms"]),
        name="outputnode",
    )

    # Coerce segmentation labels to BIDS
    lut_anat_dseg = pe.Node(niu.Function(function=_apply_bids_lut),
                            name="lut_anat_dseg")

    if template_dir is None:
        # Use FSL FAST for segmentations
        anat_dseg = pe.Node(fsl.FAST(segments=True,
                                     no_bias=True,
                                     probability_maps=True),
                            name='anat_dseg',
                            mem_gb=3)
        lut_anat_dseg.inputs.lut = (0, 3, 1, 2
                                    )  # Maps: 0 -> 0, 3 -> 1, 1 -> 2, 2 -> 3.
        fast2bids = pe.Node(niu.Function(function=_probseg_fast2bids),
                            name="fast2bids",
                            run_without_submitting=True)

        wf.connect([
            (inputnode, anat_dseg, [
                ('anat_brain', 'in_files'),
            ]),
            (anat_dseg, lut_anat_dseg, [
                ('partial_volume_map', 'in_dseg'),
            ]),
            (lut_anat_dseg, outputnode, [
                ('out', 'anat_dseg'),
            ]),
            (anat_dseg, fast2bids, [
                ('partial_volume_files', 'inlist'),
            ]),
            (fast2bids, outputnode, [
                ('out', 'anat_tpms'),
            ]),
        ])
        return wf

    # Otherwise, register to templates and run ANTs JointFusion
    lut_anat_dseg.inputs.lut = _aseg_to_three()
    tmpl_anats, tmpl_segs = _parse_segmentation_atlases(
        anat_modality, template_dir)

    # register to templates
    ants_params = "testing" if sloppy else "precise"
    # Register to each subject space
    norm = pe.MapNode(
        Registration(from_file=pkgr_fn(
            "niworkflows.data", f"antsBrainExtraction_{ants_params}.json")),
        name="norm",
        iterfield=["moving_image"],
        n_procs=omp_nthreads,
        mem_gb=DEFAULT_MEMORY_MIN_GB,
    )
    norm.inputs.moving_image = tmpl_anats
    norm.inputs.float = True

    apply_atlas = pe.MapNode(
        ApplyTransforms(
            dimension=3,
            interpolation="NearestNeighbor",
            float=True,
        ),
        iterfield=["transforms", "input_image"],
        name="apply_atlas",
    )
    apply_atlas.inputs.input_image = tmpl_anats

    apply_seg = pe.MapNode(
        ApplyTransforms(dimension=3,
                        interpolation="MultiLabel"),  # NearestNeighbor?
        name="apply_seg",
        iterfield=["transforms", "input_image"],
    )
    apply_seg.inputs.input_image = tmpl_segs

    jointfusion = pe.Node(
        JointFusion(
            dimension=3,
            out_label_fusion="fusion_labels.nii.gz",
            num_threads=omp_nthreads,
        ),
        name="jointfusion",
    )

    jf_label = pe.Node(niu.Function(function=_to_dtype), name="jf_label")

    # split each tissue into individual masks
    split_seg = pe.Node(niu.Function(function=_split_segments),
                        name="split_seg")
    to_list = pe.Node(niu.Function(function=_to_list), name='to_list')

    # fmt: off
    wf.connect([
        (inputnode, norm, [('anat_brain', 'fixed_image')]),
        (norm, apply_atlas, [('forward_transforms', 'transforms')]),
        (inputnode, apply_atlas, [('anat_brain', 'reference_image')]),
        (norm, apply_seg, [('forward_transforms', 'transforms')]),
        (inputnode, apply_seg, [('anat_brain', 'reference_image')]),
        (inputnode, to_list, [('anat_brain', 'in_file')]),
        (to_list, jointfusion, [('out', 'target_image')]),
        (apply_atlas, jointfusion, [('output_image', 'atlas_image')]),
        (apply_seg, jointfusion, [('output_image', 'atlas_segmentation_image')
                                  ]),
        (jointfusion, jf_label, [('out_label_fusion', 'in_file')]),
        (jf_label, outputnode, [('out', 'anat_aseg')]),
        (jf_label, lut_anat_dseg, [('out', 'in_dseg')]),
        (lut_anat_dseg, outputnode, [('out', 'anat_dseg')]),
        (lut_anat_dseg, split_seg, [('out', 'in_file')]),
        (split_seg, outputnode, [('out', 'anat_tpms')]),
    ])
    # fmt: on

    return wf
Пример #7
0
def add(
    template_id,
    osf_user,
    osf_password,
    osf_overwrite,
    gh_user,
    gh_token,
    path,
    nprocs,
):
    """Add a new template."""
    from .io import run_command
    from .utils import copy_template
    import shutil
    from datalad import api as dl

    gh_password = getenv("GITHUB_PASSWORD")
    if not gh_user or not gh_token:
        raise click.BadParameter("Insufficient secrets to login into GitHub")

    path = Path(path or f"tpl-{template_id}").absolute()
    cwd = Path.cwd()

    if not path.exists():
        raise click.UsageError(f"<{path}> does not exist.")

    metadata = {}

    # Check metadata
    if (path / "template_description.json").exists():
        metadata = json.loads((path / "template_description.json").read_text())
    metadata["Identifier"] = template_id

    # Check license
    license_path = path / "LICENSE"
    if not license_path.exists():
        license_path = path / "LICENCE"
    if not license_path.exists():
        license_path = path / "COPYING"

    if not license_path.exists():
        license_prompt = click.prompt(
            text="""\
A LICENSE file MUST be distributed with the template. The TemplateFlow Manager can \
set a license (either CC0 or CC-BY) for you.""",
            type=click.Choice(("CC0", "CC-BY", "Custom (abort)")),
            default="Custom (abort)",
        )
        if license_prompt == "Custom (abort)":
            raise click.UsageError(
                "Cannot proceed without a valid license. Please write a LICENSE "
                "file before uploading.")

        license_path = Path(
            pkgr_fn("tfmanager", f"data/{license_prompt}.LICENSE"))
        metadata["License"] = license_prompt

    # Check RRID
    if not metadata.get("RRID"):
        rrid = click.prompt(
            text="Has a RRID (research resource ID) already been assigned?",
            type=str,
            default='') or None

        if rrid:
            metadata["RRID"] = rrid

    # Check short description
    if not metadata.get("Name", "").strip():
        short_desc = click.prompt(
            text="""\
The "Name" metadata is not found within the <template_description.json> file. \
Please provide a short description for this resource.""",
            type=str,
        )

        if not short_desc:
            raise click.UsageError(
                "Cannot proceed without a short description.")

        metadata["Name"] = short_desc

    # Check authors
    authors_prompt = [
        a.strip() for a in metadata.get("Authors", []) if a.strip()
    ]
    if not authors_prompt:
        authors_prompt = [
            n.strip() for n in click.prompt(
                text="""\
The "Authors" metadata is not found within the <template_description.json> file. \
Please provide a list of authors separated by semicolon (;) in <Lastname Initial(s)> format.""",
                type=str,
            ).split(";") if n
        ]
        if not authors_prompt:
            click.confirm("No authors were given, do you want to continue?",
                          abort=True)

    metadata["Authors"] = authors_prompt

    # Check references
    refs_prompt = [
        f"""\
{'https://doi.org/' if not a.strip().startswith('http') else ''}\
{a.replace("doi:", "").strip()}"""
        for a in metadata.get("ReferencesAndLinks", []) if a.strip()
    ]
    if not refs_prompt:
        refs_prompt = [
            n.replace('"', "").strip() for n in click.prompt(
                text="""\
The "ReferencesAndLinks" metadata is not found within the <template_description.json> file. \
Please provide a list of links and publications within double-quotes \
(for example, "doi:10.1101/2021.02.10.430678") and separated by spaces (< >).""",
                type=str,
            ).split(" ") if n
        ]
        if not refs_prompt:
            click.confirm("No authors were given, do you want to continue?",
                          abort=True)
    metadata["ReferencesAndLinks"] = refs_prompt

    with TemporaryDirectory() as tmpdir:
        repodir = Path(tmpdir) / "templateflow"

        # Clone root <user>/templateflow project - fork if necessary
        click.echo(f"Preparing Pull-Request (wd={tmpdir}).")
        clone = run_command(
            f"git clone https://github.com/{gh_user}/templateflow.git "
            "--branch tpl-intake --single-branch",
            cwd=tmpdir,
            capture_output=False,
        )
        if clone.returncode != 0:
            run_command(
                "hub clone templateflow/templateflow",
                cwd=tmpdir,
                capture_output=False,
                env={
                    "GITHUB_USER": gh_user,
                    "GITHUB_PASSWORD": gh_password
                },
            )
            run_command(
                "hub fork --remote-name origin",
                cwd=str(repodir),
                capture_output=False,
                env={
                    "GITHUB_USER": gh_user,
                    "GITHUB_PASSWORD": gh_password
                },
            )
        else:
            run_command(
                "git remote add upstream https://github.com/templateflow/templateflow.git",
                cwd=str(repodir),
                capture_output=False,
            )

        chdir(repodir)

        # Create datalad dataset
        dl.create(
            path=f"tpl-{template_id}",
            cfg_proc="text2git",
            initopts={"initial-branch": "main"},
            description=metadata["Name"],
        )

        # Populate template
        copy_template(
            path=path,
            dest=repodir / f"tpl-{template_id}",
        )
        # Copy license
        shutil.copy(license_path, repodir / f"tpl-{template_id}" / "LICENSE")
        # (Over)write template_description.json
        (repodir / f"tpl-{template_id}" /
         "template_description.json").write_text(json.dumps(metadata,
                                                            indent=2))
        # Init/update CHANGELOG
        changelog = repodir / f"tpl-{template_id}" / "CHANGES"
        changes = [
            f"""
## {datetime.date.today().ctime()} - TemplateFlow Manager Upload
Populated contents after NIfTI sanitizing by the TF Manager.

"""
        ]
        if changelog.exists():
            changes += [changelog.read_text()]
        changelog.write_text("\n".join(changes))

        # Init OSF sibling
        rrid_str = f" (RRID: {metadata['RRID']})" if metadata.get(
            "RRID") else ""
        dl.create_sibling_osf(
            title=f"TemplateFlow resource: <{template_id}>{rrid_str}",
            name="osf",
            dataset=f"./tpl-{template_id}",
            public=True,
            category="data",
            description=metadata["Name"],
            tags=["TemplateFlow dataset", template_id])
        # Init GH sibling
        dl.create_sibling_github(reponame=f"tpl-{template_id}",
                                 dataset=str(repodir / f"tpl-{template_id}"),
                                 github_login=gh_user,
                                 publish_depends="osf-storage",
                                 existing="replace",
                                 access_protocol="ssh")

        # Save added contents
        dl.save(dataset=str(repodir / f"tpl-{template_id}"),
                message="ADD: TemplateFlow Manager initialized contents")

        # Push to siblings
        dl.push(
            dataset=str(repodir / f"tpl-{template_id}"),
            to="github",
            jobs=cpu_count(),
        )

        # Back home
        chdir(cwd)

        run_command(
            "git fetch upstream tpl-intake",
            cwd=str(repodir),
            capture_output=False,
        )
        run_command(
            f"git checkout -b pr/tpl-{template_id} upstream/tpl-intake",
            cwd=str(repodir),
            capture_output=False,
        )
        (repodir / f"{path.name}.toml").write_text(
            toml.dumps({
                "github": {
                    "user": gh_user
                },
            }))
        run_command(
            f"git add {path.name}.toml",
            cwd=str(repodir),
            capture_output=False,
        )
        run_command(
            f"git commit -m 'add(tpl-{template_id}): create intake file'",
            cwd=str(repodir),
            capture_output=False,
        )
        run_command(
            f"git push -u origin pr/tpl-{template_id}",
            cwd=str(repodir),
            capture_output=False,
            env={
                "GITHUB_USER": gh_user,
                "GITHUB_TOKEN": gh_token
            },
        )

        (repodir.parent / "message.md").write_text(f"""\
ADD: ``tpl-{template_id}``

## {metadata.get('Name', '<missing Name>')}

Identifier: {metadata.get('Identifier', '<missing Identifier>')}
Datalad: https://github.com/{gh_user}/tpl-{template_id}

### Authors
{', '.join(metadata['Authors'])}.

### License
{metadata.get('License', metadata.get('Licence', '<missing License>'))}

### Cohorts
{' '.join(('The dataset contains', str(len(metadata.get('cohort', []))), 'cohorts.'))
 if metadata.get('cohort') else 'The dataset does not contain cohorts.'}

### References and links
{', '.join(metadata.get('ReferencesAndLinks', [])) or 'N/A'}

""")
        run_command(
            "hub pull-request -b templateflow:tpl-intake "
            f"-h {gh_user}:pr/tpl-{template_id} "
            f"-F {repodir.parent / 'message.md'}",
            cwd=str(repodir),
            capture_output=False,
            env={
                "GITHUB_USER": gh_user,
                "GITHUB_TOKEN": gh_token
            },
        )
Пример #8
0
def init_fsl_hmc_wf(scan_groups,
                    b0_threshold,
                    impute_slice_threshold,
                    fmap_demean,
                    fmap_bspline,
                    eddy_config,
                    mem_gb=3,
                    omp_nthreads=1,
                    dwi_metadata=None,
                    slice_quality='outlier_n_sqr_stdev_map',
                    name="fsl_hmc_wf"):
    """
    This workflow controls the dwi preprocessing stages using FSL tools.


    **Parameters**
        scan_groups: dict
            dictionary with fieldmaps and warp space information for the dwis
        impute_slice_threshold: float
            threshold for a slice to be replaced with imputed values. Overrides the
            parameter in ``eddy_config`` if set to a number > 0.
        do_topup: bool
            Should topup be performed before eddy? requires an rpe series or an
            rpe_b0.
        eddy_config: str
            Path to a JSON file containing settings for the call to ``eddy``.


    **Inputs**

        dwi_files: list
            List of single-volume files across all DWI series
        b0_indices: list
            Indexes into ``dwi_files`` that correspond to b=0 volumes
        bvecs: list
            List of paths to single-line bvec files
        bvals: list
            List of paths to single-line bval files
        b0_images: list
            List of single b=0 volumes
        original_files: list
            List of the files from which each DWI volume came from.

    **Outputs**


    **Subworkflows**

    """

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=['dwi_files', 'b0_indices', 'bvec_files', 'bval_files', 'b0_images',
                    'original_files', 't1_brain', 't1_2_mni_reverse_transform']),
        name='inputnode')

    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=["b0_template", "b0_template_mask", "pre_sdc_template",
                    "hmc_optimization_data", "sdc_method", 'slice_quality', 'motion_params',
                    "cnr_map", "bvec_files_to_transform", "dwi_files_to_transform", "b0_indices",
                    "to_dwi_ref_affines", "to_dwi_ref_warps", "rpe_b0_info"]),
        name='outputnode')

    workflow = Workflow(name=name)
    gather_inputs = pe.Node(GatherEddyInputs(), name="gather_inputs")
    if eddy_config is None:
        # load from the defaults
        eddy_cfg_file = pkgr_fn('qsiprep.data', 'eddy_params.json')
    else:
        eddy_cfg_file = eddy_config

    with open(eddy_cfg_file, "r") as f:
        eddy_args = json.load(f)

    eddy = pe.Node(ExtendedEddy(**eddy_args), name="eddy")
    dwi_merge = pe.Node(MergeDWIs(), name="dwi_merge")
    spm_motion = pe.Node(Eddy2SPMMotion(), name="spm_motion")

    workflow.connect([
        (inputnode, gather_inputs, [
            ('dwi_files', 'dwi_files'),
            ('bval_files', 'bval_files'),
            ('bvec_files', 'bvec_files'),
            ('b0_indices', 'b0_indices'),
            ('b0_images', 'b0_images'),
            ('original_files', 'original_files')]),
        (inputnode, dwi_merge, [
            ('dwi_files', 'dwi_files'),
            ('bval_files', 'bval_files'),
            ('bvec_files', 'bvec_files'),
            ('original_files', 'bids_dwi_files')]),
        (gather_inputs, eddy, [
            ('eddy_indices', 'in_index'),
            ('eddy_acqp', 'in_acqp')]),
        (dwi_merge, eddy, [
            ('out_dwi', 'in_file'),
            ('out_bval', 'in_bval'),
            ('out_bvec', 'in_bvec')]),
        (gather_inputs, outputnode, [
            ('pre_topup_image', 'pre_sdc_template'),
            ('forward_warps', 'to_dwi_ref_warps'),
            ('forward_transforms', 'to_dwi_ref_affines')])
    ])

    # If a topupref is provided, use it for TOPUP
    rpe_b0 = None
    fieldmap_type = scan_groups['fieldmap_info']['type']
    if fieldmap_type == 'epi':
        rpe_b0 = scan_groups['fieldmap_info']['epi']
    elif fieldmap_type == 'rpe_series':
        rpe_b0 = scan_groups['fieldmap_info']['rpe_series'][0]

    if rpe_b0 is not None:
        outputnode.inputs.sdc_method = "TOPUP"
        gather_inputs.inputs.rpe_b0 = rpe_b0
        prepare_rpe_b0 = pe.Node(B0RPEFieldmap(b0_file=rpe_b0), name="prepare_rpe_b0")

        topup = pe.Node(fsl.TOPUP(), name="topup")
        unwarped_mean = pe.Node(afni.TStat(outputtype='NIFTI_GZ'), name='unwarped_mean')
        unwarped_enhance = init_enhance_and_skullstrip_dwi_wf(name='unwarped_enhance')

        workflow.connect([
            (prepare_rpe_b0, outputnode, [('fmap_info', 'inputnode.rpe_b0_info')]),
            (prepare_rpe_b0, gather_inputs, [('fmap_file', 'rpe_b0')]),
            (gather_inputs, topup, [
                ('topup_datain', 'encoding_file'),
                ('topup_imain', 'in_file'),
                ('topup_config', 'config')]),
            (topup, unwarped_mean, [('out_corrected', 'in_file')]),
            (unwarped_mean, unwarped_enhance, [('out_file', 'inputnode.in_file')]),
            (unwarped_enhance, outputnode, [
                ('outputnode.skull_stripped_file', 'b0_template')]),
            (unwarped_enhance, outputnode, [
                ('outputnode.mask_file', 'b0_template_mask')]),
            (unwarped_enhance, eddy, [
                ('outputnode.mask_file', 'in_mask')]),
            (topup, eddy, [
                ('out_movpar', 'in_topup_movpar'),
                ('out_fieldcoef', 'in_topup_fieldcoef')]),
        ])
    elif fieldmap_type in ('fieldmap', 'phasediff', 'phase'):
        outputnode.inputs.sdc_method = fieldmap_type
        b0_enhance = init_enhance_and_skullstrip_dwi_wf(name='b0_enhance')
        b0_sdc_wf = init_sdc_wf(
            scan_groups['fieldmap_info'], dwi_metadata, omp_nthreads=omp_nthreads,
            fmap_demean=fmap_demean, fmap_bspline=fmap_bspline)

        workflow.connect([
            (gather_inputs, b0_enhance, [('pre_topup_image', 'inputnode.in_file')]),
            (b0_enhance, b0_sdc_wf, [
                ('outputnode.bias_corrected_file', 'inputnode.b0_ref'),
                ('outputnode.skull_stripped_file', 'inputnode.b0_ref_brain'),
                ('outputnode.mask_file', 'inputnode.b0_mask')]),
            (inputnode, b0_sdc_wf, [
                ('t1_brain', 'inputnode.t1_brain'),
                ('t1_2_mni_reverse_transform',
                 'inputnode.t1_2_mni_reverse_transform')]),
            (b0_sdc_wf, outputnode, [
                ('outputnode.method', 'sdc_method'),
                ('outputnode.b0_ref', 'b0_template'),
                ('outputnode.b0_mask', 'b0_template_mask')]),
            (b0_sdc_wf, eddy, [
                ('outputnode.fieldmap_hz', 'field'),
                ('outputnode.b0_mask', 'in_mask')])

        ])
    else:
        outputnode.inputs.sdc_method = "None"
        b0_enhance = init_enhance_and_skullstrip_dwi_wf(name='b0_enhance')
        workflow.connect([
            (gather_inputs, b0_enhance, [('pre_topup_image', 'inputnode.in_file')]),
            (b0_enhance, outputnode, [
                ('outputnode.skull_stripped_file', 'b0_template')]),
            (b0_enhance, outputnode, [
                ('outputnode.mask_file', 'b0_template_mask')]),
            (b0_enhance, eddy, [
                ('outputnode.mask_file', 'in_mask')]),
        ])

    # Organize outputs for the rest of the pipeline
    split_eddy = pe.Node(SplitDWIs(b0_threshold=b0_threshold), name="split_eddy")
    workflow.connect([
        (eddy, split_eddy, [
            ('out_rotated_bvecs', 'bvec_file'),
            ('out_corrected', 'dwi_file')]),
        (dwi_merge, split_eddy, [('out_bval', 'bval_file')]),
        (dwi_merge, outputnode, [
            ('original_images', 'original_files')]),
        (split_eddy, outputnode, [
            ('dwi_files', 'dwi_files_to_transform'),
            ('bvec_files', 'bvec_files_to_transform')]),
        (eddy, outputnode, [
            (slice_quality, 'slice_quality'),
            ('out_cnr_maps', 'cnr_map'),
            (slice_quality, 'hmc_optimization_data')]),
        (eddy, spm_motion, [('out_parameter', 'eddy_motion')]),
        (spm_motion, outputnode, [('spm_motion_file', 'motion_params')])
    ])

    return workflow
Пример #9
0
def init_brain_extraction_wf(name='brain_extraction_wf',
                             in_template='OASIS',
                             use_float=True,
                             normalization_quality='precise',
                             omp_nthreads=None,
                             mem_gb=3.0,
                             modality='T1',
                             atropos_refine=True,
                             atropos_use_random_seed=True,
                             atropos_model=None):
    """
    A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh``
    workflow (only for 3D images).

    The official workflow is built as follows (and this implementation
    follows the same organization):

      1. Step 1 performs several clerical tasks (adding padding, calculating
         the Laplacian of inputs, affine initialization) and the core
         spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: smart binarization of the brain mask
      4. Superstep 6: apply ATROPOS and massage its outputs
      5. Superstep 7: use results from 4 to refine the brain mask


    .. workflow::
        :graph2use: orig
        :simple_form: yes
        from niworkflows.anat import init_brain_extraction_wf
        wf = init_brain_extraction_wf()


    **Parameters**

        in_template : str
            Name of the skull-stripping template ('OASIS', 'NKI', or
            path).
            The brain template from which regions will be projected
            Anatomical template created using e.g. LPBA40 data set with
            ``buildtemplateparallel.sh`` in ANTs.
            The workflow will automatically search for a brain probability
            mask created using e.g. LPBA40 data set which have brain masks
            defined, and warped to anatomical template and
            averaged resulting in a probability image.
        use_float : bool
            Whether single precision should be used
        normalization_quality : str
            Use more precise or faster registration parameters
            (default: ``precise``, other possible values: ``testing``)
        omp_nthreads : int
            Maximum number of threads an individual process may use
        mem_gb : float
            Estimated peak memory consumption of the most hungry nodes
            in the workflow
        modality : str
            Sequence type of the first input image ('T1', 'T2', or 'FLAIR')
        atropos_refine : bool
            Enables or disables the whole ATROPOS sub-workflow
        atropos_use_random_seed : bool
            Whether ATROPOS should generate a random seed based on the
            system's clock
        atropos_model : tuple or None
            Allows to specify a particular segmentation model, overwriting
            the defaults based on ``modality``
        name : str, optional
            Workflow name (default: antsBrainExtraction)


    **Inputs**

        in_files
            List of input anatomical images to be brain-extracted,
            typically T1-weighted.
            If a list of anatomical images is provided, subsequently
            specified images are used during the segmentation process.
            However, only the first image is used in the registration
            of priors.
            Our suggestion would be to specify the T1w as the first image.
        in_mask
            (optional) Mask used for registration to limit the metric
            computation to a specific region.


    **Outputs**

        bias_corrected
            The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
            correction.
        out_mask
            Calculated brain mask
        bias_image
            The :abbr:`INU (intensity non-uniformity)` field estimated for each
            input in ``in_files``
        out_segm
            Output segmentation by ATROPOS
        out_tpms
            Output :abbr:`TPMs (tissue probability maps)` by ATROPOS


    """
    wf = pe.Workflow(name)

    template_path = None
    if in_template in TEMPLATE_MAP:
        template_path = get_dataset(in_template)
    else:
        template_path = in_template

    mod = ('%sw' % modality[:2].upper()
           if modality.upper().startswith('T') else modality.upper())

    # Append template modality
    potential_targets = list(Path(template_path).glob('*_%s.nii.gz' % mod))
    if not potential_targets:
        raise ValueError('No %s template was found under "%s".' %
                         (mod, template_path))

    tpl_target_path = str(potential_targets[0])
    target_basename = '_'.join(tpl_target_path.split('_')[:-1])

    # Get probabilistic brain mask if available
    tpl_mask_path = '%s_class-brainmask_probtissue.nii.gz' % target_basename
    # Fall-back to a binary mask just in case
    if not os.path.exists(tpl_mask_path):
        tpl_mask_path = '%s_brainmask.nii.gz' % target_basename

    if not os.path.exists(tpl_mask_path):
        raise ValueError(
            'Probability map for the brain mask associated to this template '
            '"%s" not found.' % tpl_mask_path)

    if omp_nthreads is None or omp_nthreads < 1:
        omp_nthreads = cpu_count()

    inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']),
                        name='inputnode')

    # Try to find a registration mask, set if available
    tpl_regmask_path = '%s_label-BrainCerebellumRegistration_roi.nii.gz' % target_basename
    if os.path.exists(tpl_regmask_path):
        inputnode.inputs.in_mask = tpl_regmask_path

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bias_corrected', 'out_mask', 'bias_image', 'out_segm']),
                         name='outputnode')

    trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity',
                                 op2='0.01 0.999 256'),
                       name='truncate_images',
                       iterfield=['op1'])
    inu_n4 = pe.MapNode(N4BiasFieldCorrection(dimension=3,
                                              save_bias=True,
                                              copy_header=True,
                                              n_iterations=[50] * 4,
                                              convergence_threshold=1e-7,
                                              shrink_factor=4,
                                              bspline_fitting_distance=200),
                        n_procs=omp_nthreads,
                        name='inu_n4',
                        iterfield=['input_image'])

    res_tmpl = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                              apply_smoothing=True),
                       name='res_tmpl')
    res_tmpl.inputs.input_image = tpl_target_path
    res_target = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                                apply_smoothing=True),
                         name='res_target')

    lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                       name='lap_tmpl')
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                         name='lap_target')
    mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name='mrg_target')

    # Initialize transforms with antsAI
    init_aff = pe.Node(AI(metric=('Mattes', 32, 'Regular', 0.2),
                          transform=('Affine', 0.1),
                          search_factor=(20, 0.12),
                          principal_axes=False,
                          convergence=(10, 1e-6, 10),
                          verbose=True),
                       name='init_aff',
                       n_procs=omp_nthreads)

    if parseversion(Registration().version) > Version('2.2.0'):
        init_aff.inputs.search_grid = (40, (0, 40, 40))

    # Set up spatial normalization
    norm = pe.Node(Registration(from_file=pkgr_fn(
        'niworkflows.data', 'antsBrainExtraction_%s.json' %
        normalization_quality)),
                   name='norm',
                   n_procs=omp_nthreads,
                   mem_gb=mem_gb)
    norm.inputs.float = use_float
    fixed_mask_trait = 'fixed_image_mask'
    if parseversion(Registration().version) >= Version('2.2.0'):
        fixed_mask_trait += 's'

    map_brainmask = pe.Node(ApplyTransforms(interpolation='Gaussian',
                                            float=True),
                            name='map_brainmask',
                            mem_gb=1)
    map_brainmask.inputs.input_image = tpl_mask_path

    thr_brainmask = pe.Node(ThresholdImage(dimension=3,
                                           th_low=0.5,
                                           th_high=1.0,
                                           inside_value=1,
                                           outside_value=0),
                            name='thr_brainmask')

    # Morphological dilation, radius=2
    dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'),
                            name='dil_brainmask')
    # Get largest connected component
    get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'),
                            name='get_brainmask')

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(),
                            iterfield=['in_file'],
                            name='apply_mask')

    wf.connect([
        (inputnode, trunc, [('in_files', 'op1')]),
        (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]),
        (inputnode, norm, [('in_mask', fixed_mask_trait)]),
        (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]),
        (trunc, inu_n4, [('output_image', 'input_image')]),
        (inu_n4, res_target, [(('output_image', _pop), 'input_image')]),
        (inu_n4, lap_target, [(('output_image', _pop), 'op1')]),
        (res_tmpl, init_aff, [('output_image', 'fixed_image')]),
        (res_target, init_aff, [('output_image', 'moving_image')]),
        (inu_n4, mrg_target, [('output_image', 'in1')]),
        (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]),
        (lap_target, mrg_target, [('output_image', 'in2')]),
        (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
        (mrg_tmpl, norm, [('out', 'fixed_image')]),
        (mrg_target, norm, [('out', 'moving_image')]),
        (norm, map_brainmask, [('reverse_invert_flags',
                                'invert_transform_flags'),
                               ('reverse_transforms', 'transforms')]),
        (map_brainmask, thr_brainmask, [('output_image', 'input_image')]),
        (thr_brainmask, dil_brainmask, [('output_image', 'op1')]),
        (dil_brainmask, get_brainmask, [('output_image', 'op1')]),
        (inu_n4, apply_mask, [('output_image', 'in_file')]),
        (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        (get_brainmask, outputnode, [('output_image', 'out_mask')]),
        (apply_mask, outputnode, [('out_file', 'bias_corrected')]),
        (inu_n4, outputnode, [('bias_image', 'bias_image')]),
    ])

    if atropos_refine:
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model
            or list(ATROPOS_MODELS[modality].values()))

        wf.disconnect([
            (get_brainmask, outputnode, [('output_image', 'out_mask')]),
            (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        ])
        wf.connect([
            (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]),
            (get_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask')
                                         ]),
            (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask')]),
            (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]),
            (atropos_wf, outputnode, [('outputnode.out_segm', 'out_segm'),
                                      ('outputnode.out_tpms', 'out_tpms')])
        ])
    return wf
Пример #10
0
def init_enhance_and_skullstrip_bold_wf(name='enhance_and_skullstrip_bold_wf',
                                        pre_mask=False,
                                        omp_nthreads=1):
    """
    This workflow takes in a :abbr:`BOLD (blood-oxygen level-dependant)`
    :abbr:`fMRI (functional MRI)` average/summary (e.g., a reference image
    averaging non-steady-state timepoints), and sharpens the histogram
    with the application of the N4 algorithm for removing the
    :abbr:`INU (intensity non-uniformity)` bias field and calculates a signal
    mask.

    Steps of this workflow are:

      1. Calculate a tentative mask by registering (9-parameters) to *fMRIPrep*'s
         :abbr:`EPI (echo-planar imaging)` -*boldref* template, which
         is in MNI space.
         The tentative mask is obtained by resampling the MNI template's
         brainmask into *boldref*-space.
      2. Binary dilation of the tentative mask with a sphere of 3mm diameter.
      3. Run ANTs' ``N4BiasFieldCorrection`` on the input
         :abbr:`BOLD (blood-oxygen level-dependant)` average, using the
         mask generated in 1) instead of the internal Otsu thresholding.
      4. Calculate a loose mask using FSL's ``bet``, with one mathematical morphology
         dilation of one iteration and a sphere of 6mm as structuring element.
      5. Mask the :abbr:`INU (intensity non-uniformity)`-corrected image
         with the latest mask calculated in 3), then use AFNI's ``3dUnifize``
         to *standardize* the T2* contrast distribution.
      6. Calculate a mask using AFNI's ``3dAutomask`` after the contrast
         enhancement of 4).
      7. Calculate a final mask as the intersection of 4) and 6).
      8. Apply final mask on the enhanced reference.

    Step 1 can be skipped if the ``pre_mask`` argument is set to ``True`` and
    a tentative mask is passed in to the workflow throught the ``pre_mask``
    Nipype input.


    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from niworkflows.func.util import init_enhance_and_skullstrip_bold_wf
        wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=1)

    **Parameters**
        name : str
            Name of workflow (default: ``enhance_and_skullstrip_bold_wf``)
        pre_mask : bool
            Indicates whether the ``pre_mask`` input will be set (and thus, step 1
            should be skipped).
        omp_nthreads : int
            number of threads available to parallel nodes

    **Inputs**

        in_file
            BOLD image (single volume)
        pre_mask : bool
            A tentative brain mask to initialize the workflow (requires ``pre_mask``
            parameter set ``True``).


    **Outputs**

        bias_corrected_file
            the ``in_file`` after `N4BiasFieldCorrection`_
        skull_stripped_file
            the ``bias_corrected_file`` after skull-stripping
        mask_file
            mask of the skull-stripped input file
        out_report
            reportlet for the skull-stripping

    .. _N4BiasFieldCorrection: https://hdl.handle.net/10380/3053
    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'pre_mask']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['mask_file', 'skull_stripped_file', 'bias_corrected_file']),
                         name='outputnode')

    # Dilate pre_mask
    pre_dilate = pe.Node(fsl.DilateImage(operation='max',
                                         kernel_shape='sphere',
                                         kernel_size=3.0,
                                         internal_datatype='char'),
                         name='pre_mask_dilate')

    # Ensure mask's header matches reference's
    check_hdr = pe.Node(MatchHeader(),
                        name='check_hdr',
                        run_without_submitting=True)

    # Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
    n4_correct = pe.Node(ants.N4BiasFieldCorrection(
        dimension=3, copy_header=True, bspline_fitting_distance=200),
                         name='n4_correct',
                         n_procs=1)

    # Create a generous BET mask out of the bias-corrected EPI
    skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
                                    name='skullstrip_first_pass')
    bet_dilate = pe.Node(fsl.DilateImage(operation='max',
                                         kernel_shape='sphere',
                                         kernel_size=6.0,
                                         internal_datatype='char'),
                         name='skullstrip_first_dilate')
    bet_mask = pe.Node(fsl.ApplyMask(), name='skullstrip_first_mask')

    # Use AFNI's unifize for T2 constrast & fix header
    unifize = pe.Node(
        afni.Unifize(
            t2=True,
            outputtype='NIFTI_GZ',
            # Default -clfrac is 0.1, 0.4 was too conservative
            # -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation)
            args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
            out_file="uni.nii.gz"),
        name='unifize')
    fixhdr_unifize = pe.Node(CopyXForm(), name='fixhdr_unifize', mem_gb=0.1)

    # Run ANFI's 3dAutomask to extract a refined brain mask
    skullstrip_second_pass = pe.Node(afni.Automask(dilate=1,
                                                   outputtype='NIFTI_GZ'),
                                     name='skullstrip_second_pass')
    fixhdr_skullstrip2 = pe.Node(CopyXForm(),
                                 name='fixhdr_skullstrip2',
                                 mem_gb=0.1)

    # Take intersection of both masks
    combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'),
                            name='combine_masks')

    # Compute masked brain
    apply_mask = pe.Node(fsl.ApplyMask(), name='apply_mask')

    if not pre_mask:
        bold_template = get_template('MNI152NLin2009cAsym',
                                     resolution=2,
                                     desc='fMRIPrep',
                                     suffix='boldref')
        brain_mask = get_template('MNI152NLin2009cAsym',
                                  resolution=2,
                                  desc='brain',
                                  suffix='mask')

        # Initialize transforms with antsAI
        init_aff = pe.Node(AI(fixed_image=str(bold_template),
                              fixed_image_mask=str(brain_mask),
                              metric=('Mattes', 32, 'Regular', 0.2),
                              transform=('Affine', 0.1),
                              search_factor=(20, 0.12),
                              principal_axes=False,
                              convergence=(10, 1e-6, 10),
                              verbose=True),
                           name='init_aff',
                           n_procs=omp_nthreads)

        # Registration().version may be None
        if parseversion(Registration().version or '0.0.0') > Version('2.2.0'):
            init_aff.inputs.search_grid = (40, (0, 40, 40))

        # Set up spatial normalization
        norm = pe.Node(Registration(from_file=pkgr_fn(
            'fmriprep.data', 'epi_atlasbased_brainmask.json')),
                       name='norm',
                       n_procs=omp_nthreads)
        norm.inputs.fixed_image = str(bold_template)
        map_brainmask = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                float=True,
                                                input_image=str(brain_mask)),
                                name='map_brainmask')
        workflow.connect([
            (inputnode, init_aff, [('in_file', 'moving_image')]),
            (inputnode, map_brainmask, [('in_file', 'reference_image')]),
            (inputnode, norm, [('in_file', 'moving_image')]),
            (init_aff, norm, [('output_transform', 'initial_moving_transform')
                              ]),
            (norm, map_brainmask, [('reverse_invert_flags',
                                    'invert_transform_flags'),
                                   ('reverse_transforms', 'transforms')]),
            (map_brainmask, pre_dilate, [('output_image', 'in_file')]),
        ])
    else:
        workflow.connect([
            (inputnode, pre_dilate, [('pre_mask', 'in_file')]),
        ])

    workflow.connect([
        (inputnode, check_hdr, [('in_file', 'reference')]),
        (pre_dilate, check_hdr, [('out_file', 'in_file')]),
        (check_hdr, n4_correct, [('out_file', 'mask_image')]),
        (inputnode, n4_correct, [('in_file', 'input_image')]),
        (inputnode, fixhdr_unifize, [('in_file', 'hdr_file')]),
        (inputnode, fixhdr_skullstrip2, [('in_file', 'hdr_file')]),
        (n4_correct, skullstrip_first_pass, [('output_image', 'in_file')]),
        (skullstrip_first_pass, bet_dilate, [('mask_file', 'in_file')]),
        (bet_dilate, bet_mask, [('out_file', 'mask_file')]),
        (skullstrip_first_pass, bet_mask, [('out_file', 'in_file')]),
        (bet_mask, unifize, [('out_file', 'in_file')]),
        (unifize, fixhdr_unifize, [('out_file', 'in_file')]),
        (fixhdr_unifize, skullstrip_second_pass, [('out_file', 'in_file')]),
        (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
        (skullstrip_second_pass, fixhdr_skullstrip2, [('out_file', 'in_file')
                                                      ]),
        (fixhdr_skullstrip2, combine_masks, [('out_file', 'operand_file')]),
        (fixhdr_unifize, apply_mask, [('out_file', 'in_file')]),
        (combine_masks, apply_mask, [('out_file', 'mask_file')]),
        (combine_masks, outputnode, [('out_file', 'mask_file')]),
        (apply_mask, outputnode, [('out_file', 'skull_stripped_file')]),
        (n4_correct, outputnode, [('output_image', 'bias_corrected_file')]),
    ])

    return workflow
Пример #11
0
def init_infant_brain_extraction_wf(
    age_months=None,
    ants_affine_init=False,
    bspline_fitting_distance=200,
    sloppy=False,
    skull_strip_template="UNCInfant",
    template_specs=None,
    mem_gb=3.0,
    debug=False,
    name="infant_brain_extraction_wf",
    omp_nthreads=None,
):
    """
    Build an atlas-based brain extraction pipeline for infant T2w MRI data.

    Pros/Cons of available templates
    --------------------------------
    * MNIInfant
     + More cohorts available for finer-grain control
     + T1w/T2w images available
     - Template masks are poor

    * UNCInfant
     + Accurate masks
     - No T2w image available


    Parameters
    ----------
    age_months : :obj:`int`
        Age of this participant, in months.
    ants_affine_init : :obj:`bool`, optional
        Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.
    bspline_fitting_distance : :obj:`float`
        Distance in mm between B-Spline control points for N4 INU estimation.
    sloppy : :obj:`bool`
        Run in *sloppy* mode.
    skull_strip_template : :obj:`str`
        A TemplateFlow ID indicating which template will be used as target for atlas-based
        segmentation.
    template_specs : :obj:`dict`
        Additional template specifications (e.g., resolution or cohort) to correctly select
        the adequate template instance.
    mem_gb : :obj:`float`
        Base memory fingerprint unit.
    name : :obj:`str`
        This particular workflow's unique name (Nipype requirement).
    omp_nthreads : :obj:`int`
        The number of threads for individual processes in this workflow.
    debug : :obj:`bool`
        Produce intermediate registration files

    Inputs
    ------
    in_t2w : :obj:`str`
        The unprocessed input T2w image.

    Outputs
    -------
    t2w_preproc : :obj:`str`
        The preprocessed T2w image (INU and clipping).
    t2w_brain : :obj:`str`
        The preprocessed, brain-extracted T2w image.
    out_mask : :obj:`str`
        The brainmask projected from the template into the T2w, after
        binarization.
    out_probmap : :obj:`str`
        The same as above, before binarization.

    """
    from nipype.interfaces.ants import N4BiasFieldCorrection, ImageMath

    # niworkflows
    from niworkflows.interfaces.nibabel import ApplyMask, Binarize, IntensityClip
    from niworkflows.interfaces.fixes import (
        FixHeaderRegistration as Registration,
        FixHeaderApplyTransforms as ApplyTransforms,
    )
    from templateflow.api import get as get_template

    from ...interfaces.nibabel import BinaryDilation
    from ...utils.misc import cohort_by_months

    # handle template specifics
    template_specs = template_specs or {}
    if skull_strip_template == "MNIInfant":
        template_specs["resolution"] = 2 if sloppy else 1

    if not template_specs.get("cohort"):
        if age_months is None:
            raise KeyError(
                f"Age or cohort for {skull_strip_template} must be provided!")
        template_specs["cohort"] = cohort_by_months(skull_strip_template,
                                                    age_months)

    tpl_target_path = get_template(
        skull_strip_template,
        suffix="T1w",  # no T2w template
        desc=None,
        **template_specs,
    )
    if not tpl_target_path:
        raise RuntimeError(
            f"An instance of template <tpl-{skull_strip_template}> with T1w suffix "
            "could not be found.")

    tpl_brainmask_path = get_template(skull_strip_template,
                                      label="brain",
                                      suffix="probseg",
                                      **template_specs) or get_template(
                                          skull_strip_template,
                                          desc="brain",
                                          suffix="mask",
                                          **template_specs)

    tpl_regmask_path = get_template(
        skull_strip_template,
        label="BrainCerebellumExtraction",
        suffix="mask",
        **template_specs,
    )

    # main workflow
    workflow = pe.Workflow(name)

    inputnode = pe.Node(niu.IdentityInterface(fields=["in_t2w"]),
                        name="inputnode")
    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=["t2w_preproc", "t2w_brain", "out_mask", "out_probmap"]),
        name="outputnode",
    )

    # Ensure template comes with a range of intensities ANTs will like
    clip_tmpl = pe.Node(IntensityClip(p_max=99), name="clip_tmpl")
    clip_tmpl.inputs.in_file = _pop(tpl_target_path)

    # Generate laplacian registration targets
    lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                       name="lap_tmpl")
    lap_t2w = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                      name="lap_t2w")
    norm_lap_tmpl = pe.Node(niu.Function(function=_norm_lap),
                            name="norm_lap_tmpl")
    norm_lap_t2w = pe.Node(niu.Function(function=_norm_lap),
                           name="norm_lap_t2w")

    # Merge image nodes
    mrg_tmpl = pe.Node(niu.Merge(2),
                       name="mrg_tmpl",
                       run_without_submitting=True)
    mrg_t2w = pe.Node(niu.Merge(2),
                      name="mrg_t2w",
                      run_without_submitting=True)
    bin_regmask = pe.Node(Binarize(thresh_low=0.20), name="bin_regmask")
    bin_regmask.inputs.in_file = str(tpl_brainmask_path)
    refine_mask = pe.Node(BinaryDilation(radius=3, iterations=2),
                          name="refine_mask")

    fixed_masks = pe.Node(niu.Merge(4),
                          name="fixed_masks",
                          run_without_submitting=True)
    fixed_masks.inputs.in1 = "NULL"
    fixed_masks.inputs.in2 = "NULL"
    fixed_masks.inputs.in3 = "NULL" if not tpl_regmask_path else _pop(
        tpl_regmask_path)

    # Set up initial spatial normalization
    ants_params = "testing" if sloppy else "precise"
    norm = pe.Node(
        Registration(from_file=pkgr_fn(
            "nibabies.data", f"antsBrainExtraction_{ants_params}.json")),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = sloppy
    if debug:
        norm.inputs.args = "--write-interval-volumes 5"

    map_mask_t2w = pe.Node(
        ApplyTransforms(interpolation="Gaussian", float=True),
        name="map_mask_t2w",
        mem_gb=1,
    )

    # map template brainmask to t2w space
    map_mask_t2w.inputs.input_image = str(tpl_brainmask_path)

    thr_t2w_mask = pe.Node(Binarize(thresh_low=0.80), name="thr_t2w_mask")

    # Refine INU correction
    final_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            bspline_fitting_distance=bspline_fitting_distance,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            rescale_intensities=True,
            shrink_factor=4,
        ),
        n_procs=omp_nthreads,
        name="final_n4",
    )
    final_clip = pe.Node(IntensityClip(p_min=5.0, p_max=99.5),
                         name="final_clip")
    apply_mask = pe.Node(ApplyMask(), name="apply_mask")

    # fmt:off
    workflow.connect([
        (inputnode, final_n4, [("in_t2w", "input_image")]),
        # 1. Massage T2w
        (inputnode, mrg_t2w, [("in_t2w", "in1")]),
        (inputnode, lap_t2w, [("in_t2w", "op1")]),
        (inputnode, map_mask_t2w, [("in_t2w", "reference_image")]),
        (bin_regmask, refine_mask, [("out_file", "in_file")]),
        (refine_mask, fixed_masks, [("out_file", "in4")]),
        (lap_t2w, norm_lap_t2w, [("output_image", "in_file")]),
        (norm_lap_t2w, mrg_t2w, [("out", "in2")]),
        # 2. Prepare template
        (clip_tmpl, lap_tmpl, [("out_file", "op1")]),
        (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
        (clip_tmpl, mrg_tmpl, [("out_file", "in1")]),
        (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
        # 3. Set normalization node inputs
        (mrg_tmpl, norm, [("out", "fixed_image")]),
        (mrg_t2w, norm, [("out", "moving_image")]),
        (fixed_masks, norm, [("out", "fixed_image_masks")]),
        # 4. Map template brainmask into T2w space
        (norm, map_mask_t2w, [("reverse_transforms", "transforms"),
                              ("reverse_invert_flags",
                               "invert_transform_flags")]),
        (map_mask_t2w, thr_t2w_mask, [("output_image", "in_file")]),
        (thr_t2w_mask, apply_mask, [("out_mask", "in_mask")]),
        (final_n4, apply_mask, [("output_image", "in_file")]),
        # 5. Refine T2w INU correction with brain mask
        (map_mask_t2w, final_n4, [("output_image", "weight_image")]),
        (final_n4, final_clip, [("output_image", "in_file")]),
        # 9. Outputs
        (final_clip, outputnode, [("out_file", "t2w_preproc")]),
        (map_mask_t2w, outputnode, [("output_image", "out_probmap")]),
        (thr_t2w_mask, outputnode, [("out_mask", "out_mask")]),
        (apply_mask, outputnode, [("out_file", "t2w_brain")]),
    ])
    # fmt:on

    if ants_affine_init:
        from nipype.interfaces.ants.utils import AI

        ants_kwargs = dict(
            metric=("Mattes", 32, "Regular", 0.2),
            transform=("Affine", 0.1),
            search_factor=(20, 0.12),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            search_grid=(40, (0, 40, 40)),
            verbose=True,
        )

        if ants_affine_init == "random":
            ants_kwargs["metric"] = ("Mattes", 32, "Random", 0.2)
        if ants_affine_init == "search":
            ants_kwargs["search_grid"] = (20, (20, 40, 40))

        init_aff = pe.Node(
            AI(**ants_kwargs),
            name="init_aff",
            n_procs=omp_nthreads,
        )
        if tpl_regmask_path:
            init_aff.inputs.fixed_image_mask = _pop(tpl_regmask_path)

        # fmt:off
        workflow.connect([
            (clip_tmpl, init_aff, [("out_file", "fixed_image")]),
            (inputnode, init_aff, [("in_t2w", "moving_image")]),
            (init_aff, norm, [("output_transform", "initial_moving_transform")
                              ]),
        ])
        # fmt:on

    return workflow
Пример #12
0
def init_coregistration_wf(
    *,
    bspline_fitting_distance=200,
    mem_gb=3.0,
    name="coregistration_wf",
    omp_nthreads=None,
    sloppy=False,
    debug=False,
):
    """
    Set-up a T2w-to-T1w within-baby co-registration framework.

    See the ANTs' registration config file (under ``nibabies/data``) for further
    details.
    The main surprise in it is that, for some participants, accurate registration
    requires extra degrees of freedom (one affine level and one SyN level) to ensure
    that the T1w and T2w images align well.
    I attribute this requirement to the following potential reasons:

      * The T1w image and the T2w image were acquired in different sessions, apart in
        time enough for growth to happen.
        Although this is, in theory possible, it doesn't seem the images we have tested
        on are acquired on different sessions.
      * The skull is still so malleable that a change of position of the baby inside the
        coil made an actual change on the overall shape of their head.
      * Nonlinear distortions of the T1w and T2w images are, for some reason, more notorious
        for babies than they are for adults.
        We would need to look into each sequence's details to confirm this.

    Parameters
    ----------
    bspline_fitting_distance : :obj:`float`
        Distance in mm between B-Spline control points for N4 INU estimation.
    mem_gb : :obj:`float`
        Base memory fingerprint unit.
    name : :obj:`str`
        This particular workflow's unique name (Nipype requirement).
    omp_nthreads : :obj:`int`
        The number of threads for individual processes in this workflow.
    sloppy : :obj:`bool`
        Run in *sloppy* mode.
    debug : :obj:`bool`
        Produce intermediate registration files


    Inputs
    ------
    in_t1w : :obj:`str`
        The unprocessed input T1w image.
    in_t2w_preproc : :obj:`str`
        The preprocessed input T2w image, from the brain extraction workflow.
    in_mask : :obj:`str`
        The brainmask, as obtained in T2w space.
    in_probmap : :obj:`str`
        The probabilistic brainmask, as obtained in T2w space.

    Outputs
    -------
    t1w_preproc : :obj:`str`
        The preprocessed T1w image (INU and clipping).
    t2w_preproc : :obj:`str`
        The preprocessed T2w image (INU and clipping), aligned into the T1w's space.
    t1w_brain : :obj:`str`
        The preprocessed, brain-extracted T1w image.
    t1w_mask : :obj:`str`
        The binary brainmask projected from the T2w.
    t1w2t2w_xfm : :obj:`str`
        The T1w-to-T2w mapping.

    """
    from nipype.interfaces.ants import N4BiasFieldCorrection
    from niworkflows.interfaces.fixes import (
        FixHeaderRegistration as Registration,
        FixHeaderApplyTransforms as ApplyTransforms,
    )
    from niworkflows.interfaces.nibabel import ApplyMask, Binarize
    from ...interfaces.nibabel import BinaryDilation

    workflow = pe.Workflow(name)

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=["in_t1w", "in_t2w_preproc", "in_mask", "in_probmap"]),
        name="inputnode",
    )
    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "t1w_preproc",
            "t1w_brain",
            "t1w_mask",
            "t1w2t2w_xfm",
            "t2w_preproc",
        ]),
        name="outputnode",
    )

    fixed_masks_arg = pe.Node(niu.Merge(3),
                              name="fixed_masks_arg",
                              run_without_submitting=True)

    # Dilate t2w mask for easier t1->t2 registration
    reg_mask = pe.Node(BinaryDilation(radius=8, iterations=3), name="reg_mask")
    refine_mask = pe.Node(BinaryDilation(radius=8, iterations=1),
                          name="refine_mask")

    # Set up T2w -> T1w within-subject registration
    coreg = pe.Node(
        Registration(
            from_file=pkgr_fn("nibabies.data", "within_subject_t1t2.json")),
        name="coreg",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    coreg.inputs.float = sloppy
    if debug:
        coreg.inputs.args = "--write-interval-volumes 5"
        coreg.inputs.output_inverse_warped_image = sloppy
        coreg.inputs.output_warped_image = sloppy

    map_mask = pe.Node(ApplyTransforms(interpolation="Gaussian"),
                       name="map_mask",
                       mem_gb=1)
    map_t2w = pe.Node(ApplyTransforms(interpolation="BSpline"),
                      name="map_t2w",
                      mem_gb=1)
    thr_mask = pe.Node(Binarize(thresh_low=0.80), name="thr_mask")

    final_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            bspline_fitting_distance=bspline_fitting_distance,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            rescale_intensities=True,
            shrink_factor=4,
        ),
        n_procs=omp_nthreads,
        name="final_n4",
    )
    apply_mask = pe.Node(ApplyMask(), name="apply_mask")

    # fmt:off
    workflow.connect([
        (inputnode, map_mask, [("in_t1w", "reference_image")]),
        (inputnode, final_n4, [("in_t1w", "input_image")]),
        (inputnode, coreg, [("in_t1w", "moving_image"),
                            ("in_t2w_preproc", "fixed_image")]),
        (inputnode, map_mask, [("in_probmap", "input_image")]),
        (inputnode, reg_mask, [("in_mask", "in_file")]),
        (inputnode, refine_mask, [("in_mask", "in_file")]),
        (reg_mask, fixed_masks_arg, [("out_file", "in1")]),
        (reg_mask, fixed_masks_arg, [("out_file", "in2")]),
        (refine_mask, fixed_masks_arg, [("out_file", "in3")]),
        (inputnode, map_t2w, [("in_t1w", "reference_image")]),
        (inputnode, map_t2w, [("in_t2w_preproc", "input_image")]),
        (fixed_masks_arg, coreg, [("out", "fixed_image_masks")]),
        (coreg, map_mask, [
            ("reverse_transforms", "transforms"),
            ("reverse_invert_flags", "invert_transform_flags"),
        ]),
        (coreg, map_t2w, [
            ("reverse_transforms", "transforms"),
            ("reverse_invert_flags", "invert_transform_flags"),
        ]),
        (map_mask, thr_mask, [("output_image", "in_file")]),
        (map_mask, final_n4, [("output_image", "weight_image")]),
        (final_n4, apply_mask, [("output_image", "in_file")]),
        (thr_mask, apply_mask, [("out_mask", "in_mask")]),
        (final_n4, outputnode, [("output_image", "t1w_preproc")]),
        (map_t2w, outputnode, [("output_image", "t2w_preproc")]),
        (thr_mask, outputnode, [("out_mask", "t1w_mask")]),
        (apply_mask, outputnode, [("out_file", "t1w_brain")]),
        (coreg, outputnode, [("forward_transforms", "t1w2t2w_xfm")]),
    ])
    # fmt:on
    return workflow
Пример #13
0
def init_infant_brain_extraction_wf(
    age_months=None,
    ants_affine_init=False,
    bspline_fitting_distance=200,
    sloppy=False,
    skull_strip_template="UNCInfant",
    template_specs=None,
    interim_checkpoints=True,
    mem_gb=3.0,
    mri_scheme="T1w",
    name="infant_brain_extraction_wf",
    atropos_model=None,
    omp_nthreads=None,
    output_dir=None,
    use_float=True,
    use_t2w=False,
):
    """
    Build an atlas-based brain extraction pipeline for infant T1w/T2w MRI data.

    Pros/Cons of available templates
    --------------------------------
    * MNIInfant
     + More cohorts available for finer-grain control
     + T1w/T2w images available
     - Template masks are poor

    * UNCInfant
     + Accurate masks
     - No T2w image available


    Parameters
    ----------
    ants_affine_init : :obj:`bool`, optional
        Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.

    """
    # handle template specifics
    template_specs = template_specs or {}
    if skull_strip_template == 'MNIInfant':
        template_specs['resolution'] = 2 if sloppy else 1

    if not template_specs.get('cohort'):
        if age_months is None:
            raise KeyError(
                f"Age or cohort for {skull_strip_template} must be provided!")
        template_specs['cohort'] = cohort_by_months(skull_strip_template,
                                                    age_months)

    inputnode = pe.Node(
        niu.IdentityInterface(fields=["t1w", "t2w", "in_mask"]),
        name="inputnode")
    outputnode = pe.Node(niu.IdentityInterface(
        fields=["t1w_corrected", "t1w_corrected_brain", "t1w_mask"]),
                         name="outputnode")

    if not use_t2w:
        raise RuntimeError("A T2w image is currently required.")

    tpl_target_path = get_template(
        skull_strip_template,
        suffix='T1w',  # no T2w template
        desc=None,
        **template_specs,
    )
    if not tpl_target_path:
        raise RuntimeError(
            f"An instance of template <tpl-{skull_strip_template}> with MR scheme "
            f"'{'T1w' or mri_scheme}' could not be found.")

    tpl_brainmask_path = get_template(skull_strip_template,
                                      label="brain",
                                      suffix="probseg",
                                      **template_specs) or get_template(
                                          skull_strip_template,
                                          desc="brain",
                                          suffix="mask",
                                          **template_specs)

    tpl_regmask_path = get_template(skull_strip_template,
                                    label="BrainCerebellumExtraction",
                                    suffix="mask",
                                    **template_specs)

    # validate images
    val_tmpl = pe.Node(ValidateImage(), name='val_tmpl')
    val_t1w = val_tmpl.clone("val_t1w")
    val_t2w = val_tmpl.clone("val_t2w")
    val_tmpl.inputs.in_file = _pop(tpl_target_path)

    gauss_tmpl = pe.Node(niu.Function(function=_gauss_filter),
                         name="gauss_tmpl")

    # Spatial normalization step
    lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                       name="lap_tmpl")
    lap_t1w = lap_tmpl.clone("lap_t1w")
    lap_t2w = lap_tmpl.clone("lap_t2w")

    # Merge image nodes
    mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
    mrg_t2w = mrg_tmpl.clone("mrg_t2w")
    mrg_t1w = mrg_tmpl.clone("mrg_t1w")

    norm_lap_tmpl = pe.Node(niu.Function(function=_trunc),
                            name="norm_lap_tmpl")
    norm_lap_tmpl.inputs.dtype = "float32"
    norm_lap_tmpl.inputs.out_max = 1.0
    norm_lap_tmpl.inputs.percentile = (0.01, 99.99)
    norm_lap_tmpl.inputs.clip_max = None

    norm_lap_t1w = norm_lap_tmpl.clone('norm_lap_t1w')
    norm_lap_t2w = norm_lap_t1w.clone('norm_lap_t2w')

    # Set up initial spatial normalization
    ants_params = "testing" if sloppy else "precise"
    norm = pe.Node(
        Registration(from_file=pkgr_fn(
            "niworkflows.data", f"antsBrainExtraction_{ants_params}.json")),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = use_float
    if tpl_regmask_path:
        norm.inputs.fixed_image_masks = tpl_regmask_path

    # Set up T2w -> T1w within-subject registration
    norm_subj = pe.Node(
        Registration(
            from_file=pkgr_fn("nibabies.data", "within_subject_t1t2.json")),
        name="norm_subj",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm_subj.inputs.float = use_float

    # main workflow
    wf = pe.Workflow(name)
    # Create a buffer interface as a cache for the actual inputs to registration
    buffernode = pe.Node(
        niu.IdentityInterface(fields=["hires_target", "smooth_target"]),
        name="buffernode")

    # truncate target intensity for N4 correction
    clip_tmpl = pe.Node(niu.Function(function=_trunc), name="clip_tmpl")
    clip_t2w = clip_tmpl.clone('clip_t2w')
    clip_t1w = clip_tmpl.clone('clip_t1w')

    # INU correction of the t1w
    init_t2w_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=False,
            copy_header=True,
            n_iterations=[50] * (4 - sloppy),
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="init_t2w_n4",
    )
    init_t1w_n4 = init_t2w_n4.clone("init_t1w_n4")

    clip_t2w_inu = pe.Node(niu.Function(function=_trunc), name="clip_t2w_inu")
    clip_t1w_inu = clip_t2w_inu.clone("clip_t1w_inu")

    map_mask_t2w = pe.Node(ApplyTransforms(interpolation="Gaussian",
                                           float=True),
                           name="map_mask_t2w",
                           mem_gb=1)
    map_mask_t1w = map_mask_t2w.clone("map_mask_t1w")

    # map template brainmask to t2w space
    map_mask_t2w.inputs.input_image = str(tpl_brainmask_path)

    thr_t2w_mask = pe.Node(Binarize(thresh_low=0.80), name="thr_t2w_mask")
    thr_t1w_mask = thr_t2w_mask.clone('thr_t1w_mask')

    bspline_grid = pe.Node(niu.Function(function=_bspline_distance),
                           name="bspline_grid")

    # Refine INU correction
    final_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            bspline_fitting_distance=bspline_fitting_distance,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            rescale_intensities=True,
            shrink_factor=4,
        ),
        n_procs=omp_nthreads,
        name="final_n4",
    )
    final_mask = pe.Node(ApplyMask(), name="final_mask")

    if atropos_model is None:
        atropos_model = tuple(ATROPOS_MODELS[mri_scheme].values())

    atropos_wf = init_atropos_wf(
        use_random_seed=False,
        omp_nthreads=omp_nthreads,
        mem_gb=mem_gb,
        in_segmentation_model=atropos_model,
    )
    # if tpl_regmask_path:
    #     atropos_wf.get_node('inputnode').inputs.in_mask_dilated = tpl_regmask_path

    sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1),
                     name='sel_wm',
                     run_without_submitting=True)

    wf.connect([
        # 1. massage template
        (val_tmpl, clip_tmpl, [("out_file", "in_file")]),
        (clip_tmpl, lap_tmpl, [("out", "op1")]),
        (clip_tmpl, mrg_tmpl, [("out", "in1")]),
        (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
        (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
        # 2. massage T2w
        (inputnode, val_t2w, [('t2w', 'in_file')]),
        (val_t2w, clip_t2w, [('out_file', 'in_file')]),
        (clip_t2w, init_t2w_n4, [('out', 'input_image')]),
        (init_t2w_n4, clip_t2w_inu, [("output_image", "in_file")]),
        (clip_t2w_inu, lap_t2w, [('out', 'op1')]),
        (clip_t2w_inu, mrg_t2w, [('out', 'in1')]),
        (lap_t2w, norm_lap_t2w, [("output_image", "in_file")]),
        (norm_lap_t2w, mrg_t2w, [("out", "in2")]),
        # 3. normalize T2w to target template (UNC)
        (mrg_t2w, norm, [("out", "moving_image")]),
        (mrg_tmpl, norm, [("out", "fixed_image")]),
        # 4. map template brainmask to T2w space
        (inputnode, map_mask_t2w, [('t2w', 'reference_image')]),
        (norm, map_mask_t2w, [("reverse_transforms", "transforms"),
                              ("reverse_invert_flags",
                               "invert_transform_flags")]),
        (map_mask_t2w, thr_t2w_mask, [("output_image", "in_file")]),
        # 5. massage T1w
        (inputnode, val_t1w, [("t1w", "in_file")]),
        (val_t1w, clip_t1w, [("out_file", "in_file")]),
        (clip_t1w, init_t1w_n4, [("out", "input_image")]),
        (init_t1w_n4, clip_t1w_inu, [("output_image", "in_file")]),
        (clip_t1w_inu, lap_t1w, [('out', 'op1')]),
        (clip_t1w_inu, mrg_t1w, [('out', 'in1')]),
        (lap_t1w, norm_lap_t1w, [("output_image", "in_file")]),
        (norm_lap_t1w, mrg_t1w, [("out", "in2")]),
        # 6. normalize within subject T1w to T2w
        (mrg_t1w, norm_subj, [("out", "moving_image")]),
        (mrg_t2w, norm_subj, [("out", "fixed_image")]),
        (thr_t2w_mask, norm_subj, [("out_mask", "fixed_image_mask")]),
        # 7. map mask to T1w space
        (thr_t2w_mask, map_mask_t1w, [("out_mask", "input_image")]),
        (inputnode, map_mask_t1w, [("t1w", "reference_image")]),
        (norm_subj, map_mask_t1w, [
            ("reverse_transforms", "transforms"),
            ("reverse_invert_flags", "invert_transform_flags"),
        ]),
        (map_mask_t1w, thr_t1w_mask, [("output_image", "in_file")]),
        # 8. T1w INU
        (inputnode, final_n4, [("t1w", "input_image")]),
        (inputnode, bspline_grid, [("t1w", "in_file")]),
        (bspline_grid, final_n4, [("out", "args")]),
        (map_mask_t1w, final_n4, [("output_image", "weight_image")]),
        (final_n4, final_mask, [("output_image", "in_file")]),
        (thr_t1w_mask, final_mask, [("out_mask", "in_mask")]),
        # 9. Outputs
        (final_n4, outputnode, [("output_image", "t1w_corrected")]),
        (thr_t1w_mask, outputnode, [("out_mask", "t1w_mask")]),
        (final_mask, outputnode, [("out_file", "t1w_corrected_brain")]),
    ])

    if ants_affine_init:
        ants_kwargs = dict(
            metric=("Mattes", 32, "Regular", 0.2),
            transform=("Affine", 0.1),
            search_factor=(20, 0.12),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            search_grid=(40, (0, 40, 40)),
            verbose=True,
        )

        if ants_affine_init == 'random':
            ants_kwargs['metric'] = ("Mattes", 32, "Random", 0.2)
        if ants_affine_init == 'search':
            ants_kwargs['search_grid'] = (20, (20, 40, 40))

        init_aff = pe.Node(
            AI(**ants_kwargs),
            name="init_aff",
            n_procs=omp_nthreads,
        )
        if tpl_regmask_path:
            init_aff.inputs.fixed_image_mask = _pop(tpl_regmask_path)

        wf.connect([
            (clip_tmpl, init_aff, [("out", "fixed_image")]),
            (clip_t2w_inu, init_aff, [("out", "moving_image")]),
            (init_aff, norm, [("output_transform", "initial_moving_transform")
                              ]),
        ])

    return wf
Пример #14
0
def init_enhance_and_skullstrip_bold_wf(
        name='enhance_and_skullstrip_bold_wf',
        pre_mask=False,
        omp_nthreads=1):
    """
    This workflow takes in a :abbr:`BOLD (blood-oxygen level-dependant)`
    :abbr:`fMRI (functional MRI)` average/summary (e.g. a reference image
    averaging non-steady-state timepoints), and sharpens the histogram
    with the application of the N4 algorithm for removing the
    :abbr:`INU (intensity non-uniformity)` bias field and calculates a signal
    mask.

    Steps of this workflow are:

      1. Calculate a tentative mask by registering (9-parameters) to *fMRIPrep*'s
         :abbr:`EPI (echo-planar imaging)` -*boldref* template, which
         is in MNI space.
         The tentative mask is obtained by resampling the MNI template's
         brainmask into *boldref*-space.
      2. Binary dilation of the tentative mask with a sphere of 3mm diameter.
      3. Run ANTs' ``N4BiasFieldCorrection`` on the input
         :abbr:`BOLD (blood-oxygen level-dependant)` average, using the
         mask generated in 1) instead of the internal Otsu thresholding.
      4. Calculate a loose mask using FSL's ``bet``, with one mathematical morphology
         dilation of one iteration and a sphere of 6mm as structuring element.
      5. Mask the :abbr:`INU (intensity non-uniformity)`-corrected image
         with the latest mask calculated in 3), then use AFNI's ``3dUnifize``
         to *standardize* the T2* contrast distribution.
      6. Calculate a mask using AFNI's ``3dAutomask`` after the contrast
         enhancement of 4).
      7. Calculate a final mask as the intersection of 4) and 6).
      8. Apply final mask on the enhanced reference.

    Step 1 can be skipped if the ``pre_mask`` argument is set to ``True`` and
    a tentative mask is passed in to the workflow throught the ``pre_mask``
    Nipype input.


    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.util import init_enhance_and_skullstrip_bold_wf
        wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=1)

    **Parameters**
        name : str
            Name of workflow (default: ``enhance_and_skullstrip_bold_wf``)
        pre_mask : bool
            Indicates whether the ``pre_mask`` input will be set (and thus, step 1
            should be skipped).
        omp_nthreads : int
            number of threads available to parallel nodes

    **Inputs**

        in_file
            BOLD image (single volume)
        pre_mask : bool
            A tentative brain mask to initialize the workflow (requires ``pre_mask``
            parameter set ``True``).


    **Outputs**

        bias_corrected_file
            the ``in_file`` after `N4BiasFieldCorrection`_
        skull_stripped_file
            the ``bias_corrected_file`` after skull-stripping
        mask_file
            mask of the skull-stripped input file
        out_report
            reportlet for the skull-stripping

    .. _N4BiasFieldCorrection: https://hdl.handle.net/10380/3053
    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'pre_mask']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'mask_file', 'skull_stripped_file', 'bias_corrected_file']), name='outputnode')

    # Dilate pre_mask
    pre_dilate = pe.Node(fsl.DilateImage(
        operation='max', kernel_shape='sphere', kernel_size=3.0,
        internal_datatype='char'), name='pre_mask_dilate')

    # Ensure mask's header matches reference's
    check_hdr = pe.Node(MatchHeader(), name='check_hdr',
                        run_without_submitting=True)

    # Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
    n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
                         name='n4_correct', n_procs=1)

    # Create a generous BET mask out of the bias-corrected EPI
    skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
                                    name='skullstrip_first_pass')
    bet_dilate = pe.Node(fsl.DilateImage(
        operation='max', kernel_shape='sphere', kernel_size=6.0,
        internal_datatype='char'), name='skullstrip_first_dilate')
    bet_mask = pe.Node(fsl.ApplyMask(), name='skullstrip_first_mask')

    # Use AFNI's unifize for T2 constrast & fix header
    unifize = pe.Node(afni.Unifize(
        t2=True, outputtype='NIFTI_GZ',
        # Default -clfrac is 0.1, 0.4 was too conservative
        # -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation)
        args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
        out_file="uni.nii.gz"), name='unifize')
    fixhdr_unifize = pe.Node(CopyXForm(), name='fixhdr_unifize', mem_gb=0.1)

    # Run ANFI's 3dAutomask to extract a refined brain mask
    skullstrip_second_pass = pe.Node(afni.Automask(dilate=1,
                                                   outputtype='NIFTI_GZ'),
                                     name='skullstrip_second_pass')
    fixhdr_skullstrip2 = pe.Node(CopyXForm(), name='fixhdr_skullstrip2', mem_gb=0.1)

    # Take intersection of both masks
    combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'),
                            name='combine_masks')

    # Compute masked brain
    apply_mask = pe.Node(fsl.ApplyMask(), name='apply_mask')

    if not pre_mask:
        bold_template = get_template('fMRIPrep') / 'tpl-fMRIPrep_space-MNI_res-02_boldref.nii.gz'
        brain_mask = get_template('MNI152NLin2009cAsym') / \
            'tpl-MNI152NLin2009cAsym_space-MNI_res-02_brainmask.nii.gz'

        # Initialize transforms with antsAI
        init_aff = pe.Node(AI(
            fixed_image=str(bold_template),
            fixed_image_mask=str(brain_mask),
            metric=('Mattes', 32, 'Regular', 0.2),
            transform=('Affine', 0.1),
            search_factor=(20, 0.12),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            verbose=True),
            name='init_aff',
            n_procs=omp_nthreads)

        # Registration().version may be None
        if parseversion(Registration().version or '0.0.0') > Version('2.2.0'):
            init_aff.inputs.search_grid = (40, (0, 40, 40))

        # Set up spatial normalization
        norm = pe.Node(Registration(
            from_file=pkgr_fn(
                'fmriprep.data',
                'epi_atlasbased_brainmask.json')),
            name='norm',
            n_procs=omp_nthreads)
        norm.inputs.fixed_image = str(bold_template)
        map_brainmask = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', float=True, input_image=str(brain_mask)),
            name='map_brainmask'
        )
        workflow.connect([
            (inputnode, init_aff, [('in_file', 'moving_image')]),
            (inputnode, map_brainmask, [('in_file', 'reference_image')]),
            (inputnode, norm, [('in_file', 'moving_image')]),
            (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
            (norm, map_brainmask, [
                ('reverse_invert_flags', 'invert_transform_flags'),
                ('reverse_transforms', 'transforms')]),
            (map_brainmask, pre_dilate, [('output_image', 'in_file')]),
        ])
    else:
        workflow.connect([
            (inputnode, pre_dilate, [('pre_mask', 'in_file')]),
        ])

    workflow.connect([
        (inputnode, check_hdr, [('in_file', 'reference')]),
        (pre_dilate, check_hdr, [('out_file', 'in_file')]),
        (check_hdr, n4_correct, [('out_file', 'mask_image')]),
        (inputnode, n4_correct, [('in_file', 'input_image')]),
        (inputnode, fixhdr_unifize, [('in_file', 'hdr_file')]),
        (inputnode, fixhdr_skullstrip2, [('in_file', 'hdr_file')]),
        (n4_correct, skullstrip_first_pass, [('output_image', 'in_file')]),
        (skullstrip_first_pass, bet_dilate, [('mask_file', 'in_file')]),
        (bet_dilate, bet_mask, [('out_file', 'mask_file')]),
        (skullstrip_first_pass, bet_mask, [('out_file', 'in_file')]),
        (bet_mask, unifize, [('out_file', 'in_file')]),
        (unifize, fixhdr_unifize, [('out_file', 'in_file')]),
        (fixhdr_unifize, skullstrip_second_pass, [('out_file', 'in_file')]),
        (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
        (skullstrip_second_pass, fixhdr_skullstrip2, [('out_file', 'in_file')]),
        (fixhdr_skullstrip2, combine_masks, [('out_file', 'operand_file')]),
        (fixhdr_unifize, apply_mask, [('out_file', 'in_file')]),
        (combine_masks, apply_mask, [('out_file', 'mask_file')]),
        (combine_masks, outputnode, [('out_file', 'mask_file')]),
        (apply_mask, outputnode, [('out_file', 'skull_stripped_file')]),
        (n4_correct, outputnode, [('output_image', 'bias_corrected_file')]),
    ])

    return workflow
Пример #15
0
def init_infant_brain_extraction_wf(
    ants_affine_init=False,
    bspline_fitting_distance=200,
    debug=False,
    in_template="MNIInfant",
    template_specs=None,
    interim_checkpoints=True,
    mem_gb=3.0,
    mri_scheme="T2w",
    name="infant_brain_extraction_wf",
    atropos_model=None,
    omp_nthreads=None,
    output_dir=None,
    use_float=True,
):
    """
    Build an atlas-based brain extraction pipeline for infant T2w MRI data.

    Parameters
    ----------
    ants_affine_init : :obj:`bool`, optional
        Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.

    """
    inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]),
                        name="inputnode")
    outputnode = pe.Node(niu.IdentityInterface(
        fields=["out_corrected", "out_brain", "out_mask"]),
                         name="outputnode")

    template_specs = template_specs or {}
    # Find a suitable target template in TemplateFlow
    tpl_target_path = get_template(in_template,
                                   suffix=mri_scheme,
                                   **template_specs)
    if not tpl_target_path:
        raise RuntimeError(
            f"An instance of template <tpl-{in_template}> with MR scheme '{mri_scheme}'"
            " could not be found.")

    # tpl_brainmask_path = get_template(
    #     in_template, desc="brain", suffix="probseg", **template_specs
    # )
    # if not tpl_brainmask_path:

    # ignore probseg for the time being
    tpl_brainmask_path = get_template(in_template,
                                      desc="brain",
                                      suffix="mask",
                                      **template_specs)

    tpl_regmask_path = get_template(in_template,
                                    desc="BrainCerebellumExtraction",
                                    suffix="mask",
                                    **template_specs)

    # validate images
    val_tmpl = pe.Node(ValidateImage(), name='val_tmpl')
    val_tmpl.inputs.in_file = _pop(tpl_target_path)

    val_target = pe.Node(ValidateImage(), name='val_target')

    # Resample both target and template to a controlled, isotropic resolution
    res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS),
                       name="res_tmpl")  # testing
    res_target = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS),
                         name="res_target")  # testing
    gauss_tmpl = pe.Node(niu.Function(function=_gauss_filter),
                         name="gauss_tmpl")

    # Spatial normalization step
    lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                       name="lap_tmpl")
    lap_target = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                         name="lap_target")

    # Merge image nodes
    mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
    mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")

    norm_lap_tmpl = pe.Node(niu.Function(function=_trunc),
                            name="norm_lap_tmpl")
    norm_lap_tmpl.inputs.dtype = "float32"
    norm_lap_tmpl.inputs.out_max = 1.0
    norm_lap_tmpl.inputs.percentile = (0.01, 99.99)
    norm_lap_tmpl.inputs.clip_max = None

    norm_lap_target = pe.Node(niu.Function(function=_trunc),
                              name="norm_lap_target")
    norm_lap_target.inputs.dtype = "float32"
    norm_lap_target.inputs.out_max = 1.0
    norm_lap_target.inputs.percentile = (0.01, 99.99)
    norm_lap_target.inputs.clip_max = None

    # Set up initial spatial normalization
    ants_params = "testing" if debug else "precise"
    norm = pe.Node(
        Registration(from_file=pkgr_fn(
            "niworkflows.data", f"antsBrainExtraction_{ants_params}.json")),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = use_float

    # main workflow
    wf = pe.Workflow(name)
    # Create a buffer interface as a cache for the actual inputs to registration
    buffernode = pe.Node(
        niu.IdentityInterface(fields=["hires_target", "smooth_target"]),
        name="buffernode")

    # truncate target intensity for N4 correction
    clip_target = pe.Node(
        niu.Function(function=_trunc),
        name="clip_target",
    )
    clip_tmpl = pe.Node(
        niu.Function(function=_trunc),
        name="clip_tmpl",
    )
    #clip_tmpl.inputs.in_file = _pop(tpl_target_path)

    # INU correction of the target image
    init_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=False,
            copy_header=True,
            n_iterations=[50] * (4 - debug),
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="init_n4",
    )
    clip_inu = pe.Node(
        niu.Function(function=_trunc),
        name="clip_inu",
    )
    gauss_target = pe.Node(niu.Function(function=_gauss_filter),
                           name="gauss_target")
    wf.connect([
        # truncation, resampling, and initial N4
        (inputnode, val_target, [(("in_files", _pop), "in_file")]),
        # (inputnode, res_target, [(("in_files", _pop), "in_file")]),
        (val_target, res_target, [("out_file", "in_file")]),
        (res_target, clip_target, [("out_file", "in_file")]),
        (val_tmpl, clip_tmpl, [("out_file", "in_file")]),
        (clip_tmpl, res_tmpl, [("out", "in_file")]),
        (clip_target, init_n4, [("out", "input_image")]),
        (init_n4, clip_inu, [("output_image", "in_file")]),
        (clip_inu, gauss_target, [("out", "in_file")]),
        (clip_inu, buffernode, [("out", "hires_target")]),
        (gauss_target, buffernode, [("out", "smooth_target")]),
        (res_tmpl, gauss_tmpl, [("out_file", "in_file")]),
        # (clip_tmpl, gauss_tmpl, [("out", "in_file")]),
    ])

    # Graft a template registration-mask if present
    if tpl_regmask_path:
        hires_mask = pe.Node(ApplyTransforms(
            input_image=_pop(tpl_regmask_path),
            transforms="identity",
            interpolation="NearestNeighbor",
            float=True),
                             name="hires_mask",
                             mem_gb=1)
        wf.connect([
            (res_tmpl, hires_mask, [("out_file", "reference_image")]),
        ])

    map_brainmask = pe.Node(ApplyTransforms(interpolation="Gaussian",
                                            float=True),
                            name="map_brainmask",
                            mem_gb=1)
    map_brainmask.inputs.input_image = str(tpl_brainmask_path)

    thr_brainmask = pe.Node(Binarize(thresh_low=0.80), name="thr_brainmask")
    bspline_grid = pe.Node(niu.Function(function=_bspline_distance),
                           name="bspline_grid")

    # Refine INU correction
    final_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            rescale_intensities=True,
            shrink_factor=4,
        ),
        n_procs=omp_nthreads,
        name="final_n4",
    )
    final_mask = pe.Node(ApplyMask(), name="final_mask")

    if atropos_model is None:
        atropos_model = tuple(ATROPOS_MODELS[mri_scheme].values())

    atropos_wf = init_atropos_wf(
        use_random_seed=False,
        omp_nthreads=omp_nthreads,
        mem_gb=mem_gb,
        in_segmentation_model=atropos_model,
    )
    # if tpl_regmask_path:
    #     atropos_wf.get_node('inputnode').inputs.in_mask_dilated = tpl_regmask_path

    sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1),
                     name='sel_wm',
                     run_without_submitting=True)

    wf.connect([
        (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
        (inputnode, final_n4, [(("in_files", _pop), "input_image")]),
        (inputnode, bspline_grid, [(("in_files", _pop), "in_file")]),
        # (bspline_grid, final_n4, [("out", "bspline_fitting_distance")]),
        (bspline_grid, final_n4, [("out", "args")]),
        # merge laplacian and original images
        (buffernode, lap_target, [("smooth_target", "op1")]),
        (buffernode, mrg_target, [("hires_target", "in1")]),
        (lap_target, norm_lap_target, [("output_image", "in_file")]),
        (norm_lap_target, mrg_target, [("out", "in2")]),
        # Template massaging
        (res_tmpl, lap_tmpl, [("out_file", "op1")]),
        (res_tmpl, mrg_tmpl, [("out_file", "in1")]),
        (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
        (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
        # spatial normalization
        (mrg_target, norm, [("out", "moving_image")]),
        (mrg_tmpl, norm, [("out", "fixed_image")]),
        (norm, map_brainmask, [("reverse_transforms", "transforms"),
                               ("reverse_invert_flags",
                                "invert_transform_flags")]),
        (map_brainmask, thr_brainmask, [("output_image", "in_file")]),
        # take a second pass of N4
        (map_brainmask, final_n4, [("output_image", "weight_image")]),
        (final_n4, final_mask, [("output_image", "in_file")]),
        (thr_brainmask, final_mask, [("out_mask", "in_mask")]),
        (final_n4, outputnode, [("output_image", "out_corrected")]),
        (thr_brainmask, outputnode, [("out_mask", "out_mask")]),
        (final_mask, outputnode, [("out_file", "out_brain")]),
    ])

    # wf.disconnect([
    #     (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
    #     (copy_xform, outputnode, [('out_mask', 'out_mask')]),
    # ])

    # wf.connect([
    #     (init_n4, atropos_wf, [
    #         ('output_image', 'inputnode.in_files')]),  # intensity image
    #     (thr_brainmask, atropos_wf, [
    #         ('out_mask', 'inputnode.in_mask')]),
    #     (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
    #     (sel_wm, final_n4, [('out', 'weight_image')]),
    # ])
    # wf.connect([
    # (atropos_wf, outputnode, [
    #     ('outputnode.out_mask', 'out_mask'),
    #     ('outputnode.out_segm', 'out_segm'),
    #     ('outputnode.out_tpms', 'out_tpms')]),
    # ])

    if tpl_regmask_path:
        wf.connect([
            (hires_mask, norm, [("output_image", "fixed_image_masks")]),
            # (hires_mask, atropos_wf, [
            #     ("output_image", "inputnode.in_mask_dilated")]),
        ])

    if interim_checkpoints:
        final_apply = pe.Node(ApplyTransforms(interpolation="BSpline",
                                              float=True),
                              name="final_apply",
                              mem_gb=1)
        final_report = pe.Node(SimpleBeforeAfter(
            before_label=f"tpl-{in_template}",
            after_label="target",
            out_report="final_report.svg"),
                               name="final_report")
        wf.connect([
            (inputnode, final_apply, [(("in_files", _pop), "reference_image")
                                      ]),
            (res_tmpl, final_apply, [("out_file", "input_image")]),
            (norm, final_apply, [("reverse_transforms", "transforms"),
                                 ("reverse_invert_flags",
                                  "invert_transform_flags")]),
            (final_apply, final_report, [("output_image", "before")]),
            (outputnode, final_report, [("out_corrected", "after"),
                                        ("out_mask", "wm_seg")]),
        ])

    if output_dir:
        from nipype.interfaces.io import DataSink
        ds_final_inu = pe.Node(DataSink(base_directory=str(output_dir.parent)),
                               name="ds_final_inu")
        ds_final_msk = pe.Node(DataSink(base_directory=str(output_dir.parent)),
                               name="ds_final_msk")
        ds_report = pe.Node(DataSink(base_directory=str(output_dir.parent)),
                            name="ds_report")

        wf.connect([
            (outputnode, ds_final_inu,
             [("out_corrected", f"{output_dir.name}.@inu_corrected")]),
            (outputnode, ds_final_msk, [("out_mask",
                                         f"{output_dir.name}.@brainmask")]),
            (final_report, ds_report, [("out_report",
                                        f"{output_dir.name}.@report")]),
        ])

    if not ants_affine_init:
        return wf

    # Initialize transforms with antsAI
    lowres_tmpl = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS),
                          name="lowres_tmpl")
    lowres_target = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS),
                            name="lowres_target")

    init_aff = pe.Node(
        AI(
            metric=("Mattes", 32, "Regular", 0.25),
            transform=("Affine", 0.1),
            search_factor=(15, 0.1),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            search_grid=(40, (0, 40, 40)),
            verbose=True,
        ),
        name="init_aff",
        n_procs=omp_nthreads,
    )
    wf.connect([
        (gauss_tmpl, lowres_tmpl, [("out", "in_file")]),
        (lowres_tmpl, init_aff, [("out_file", "fixed_image")]),
        (gauss_target, lowres_target, [("out", "in_file")]),
        (lowres_target, init_aff, [("out_file", "moving_image")]),
        (init_aff, norm, [("output_transform", "initial_moving_transform")]),
    ])

    if tpl_regmask_path:
        lowres_mask = pe.Node(ApplyTransforms(
            input_image=_pop(tpl_regmask_path),
            transforms="identity",
            interpolation="MultiLabel",
            float=True),
                              name="lowres_mask",
                              mem_gb=1)
        wf.connect([
            (lowres_tmpl, lowres_mask, [("out_file", "reference_image")]),
            (lowres_mask, init_aff, [("output_image", "fixed_image_mask")]),
        ])

    if interim_checkpoints:
        init_apply = pe.Node(ApplyTransforms(interpolation="BSpline",
                                             float=True),
                             name="init_apply",
                             mem_gb=1)
        init_report = pe.Node(SimpleBeforeAfter(
            before_label=f"tpl-{in_template}",
            after_label="target",
            out_report="init_report.svg"),
                              name="init_report")
        wf.connect([
            (lowres_target, init_apply, [("out_file", "input_image")]),
            (res_tmpl, init_apply, [("out_file", "reference_image")]),
            (init_aff, init_apply, [("output_transform", "transforms")]),
            (init_apply, init_report, [("output_image", "after")]),
            (res_tmpl, init_report, [("out_file", "before")]),
        ])

        if output_dir:
            ds_init_report = pe.Node(
                DataSink(base_directory=str(output_dir.parent)),
                name="ds_init_report")
            wf.connect(init_report, "out_report", ds_init_report,
                       f"{output_dir.name}.@init_report")
    return wf
Пример #16
0
def init_rodent_brain_extraction_wf(
    ants_affine_init=False,
    factor=20,
    arc=0.12,
    step=4,
    grid=(0, 4, 4),
    debug=False,
    interim_checkpoints=True,
    mem_gb=3.0,
    mri_scheme="T2w",
    name="rodent_brain_extraction_wf",
    omp_nthreads=None,
    output_dir=None,
    template_id="Fischer344",
    template_specs=None,
    use_float=True,
):
    """
    Build an atlas-based brain extraction pipeline for rodent T1w and T2w MRI data.

    Parameters
    ----------
    ants_affine_init : :obj:`bool`, optional
        Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.

    """
    inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]),
                        name="inputnode")
    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=["out_corrected", "out_brain", "out_mask"]),
        name="outputnode",
    )

    template_specs = template_specs or {}
    if template_id == "WHS" and "resolution" not in template_specs:
        template_specs["resolution"] = 2

    # Find a suitable target template in TemplateFlow
    tpl_target_path = get_template(
        template_id,
        suffix=mri_scheme,
        **template_specs,
    )
    if not tpl_target_path:
        raise RuntimeError(
            f"An instance of template <tpl-{template_id}> with MR scheme '{mri_scheme}'"
            " could not be found.")

    tpl_brainmask_path = get_template(
        template_id,
        atlas=None,
        hemi=None,
        desc="brain",
        suffix="probseg",
        **template_specs,
    ) or get_template(
        template_id,
        atlas=None,
        hemi=None,
        desc="brain",
        suffix="mask",
        **template_specs,
    )

    tpl_regmask_path = get_template(
        template_id,
        atlas=None,
        desc="BrainCerebellumExtraction",
        suffix="mask",
        **template_specs,
    )

    denoise = pe.Node(DenoiseImage(dimension=3, copy_header=True),
                      name="denoise",
                      n_procs=omp_nthreads)

    # Resample template to a controlled, isotropic resolution
    res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS, smooth=True),
                       name="res_tmpl")

    # Create Laplacian images
    lap_tmpl = pe.Node(ImageMath(operation="Laplacian", copy_header=True),
                       name="lap_tmpl")
    tmpl_sigma = pe.Node(niu.Function(function=_lap_sigma),
                         name="tmpl_sigma",
                         run_without_submitting=True)
    norm_lap_tmpl = pe.Node(niu.Function(function=_norm_lap),
                            name="norm_lap_tmpl")

    lap_target = pe.Node(ImageMath(operation="Laplacian", copy_header=True),
                         name="lap_target")
    target_sigma = pe.Node(niu.Function(function=_lap_sigma),
                           name="target_sigma",
                           run_without_submitting=True)
    norm_lap_target = pe.Node(niu.Function(function=_norm_lap),
                              name="norm_lap_target")

    # Set up initial spatial normalization
    ants_params = "testing" if debug else "precise"
    norm = pe.Node(
        Registration(from_file=pkgr_fn(
            "nirodents",
            f"data/artsBrainExtraction_{ants_params}_{mri_scheme}.json")),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = use_float

    # main workflow
    wf = pe.Workflow(name)

    # truncate target intensity for N4 correction
    clip_target = pe.Node(IntensityClip(p_min=15, p_max=99.9),
                          name="clip_target")

    # truncate template intensity to match target
    clip_tmpl = pe.Node(IntensityClip(p_min=5, p_max=98), name="clip_tmpl")
    clip_tmpl.inputs.in_file = _pop(tpl_target_path)

    # set INU bspline grid based on voxel size
    bspline_grid = pe.Node(niu.Function(function=_bspline_grid),
                           name="bspline_grid")

    # INU correction of the target image
    init_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=False,
            copy_header=True,
            n_iterations=[50] * (4 - debug),
            convergence_threshold=1e-7,
            shrink_factor=4,
            rescale_intensities=True,
        ),
        n_procs=omp_nthreads,
        name="init_n4",
    )
    clip_inu = pe.Node(IntensityClip(p_min=1, p_max=99.8), name="clip_inu")

    # Create a buffer interface as a cache for the actual inputs to registration
    buffernode = pe.Node(niu.IdentityInterface(fields=["hires_target"]),
                         name="buffernode")

    # Merge image nodes
    mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
    mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")

    # fmt: off
    wf.connect([
        # Target image massaging
        (inputnode, denoise, [(("in_files", _pop), "input_image")]),
        (inputnode, bspline_grid, [(("in_files", _pop), "in_file")]),
        (bspline_grid, init_n4, [("out", "args")]),
        (denoise, clip_target, [("output_image", "in_file")]),
        (clip_target, init_n4, [("out_file", "input_image")]),
        (init_n4, clip_inu, [("output_image", "in_file")]),
        (clip_inu, target_sigma, [("out_file", "in_file")]),
        (clip_inu, buffernode, [("out_file", "hires_target")]),
        (buffernode, lap_target, [("hires_target", "op1")]),
        (target_sigma, lap_target, [("out", "op2")]),
        (lap_target, norm_lap_target, [("output_image", "in_file")]),
        (buffernode, mrg_target, [("hires_target", "in1")]),
        (norm_lap_target, mrg_target, [("out", "in2")]),
        # Template massaging
        (clip_tmpl, res_tmpl, [("out_file", "in_file")]),
        (res_tmpl, tmpl_sigma, [("out_file", "in_file")]),
        (res_tmpl, lap_tmpl, [("out_file", "op1")]),
        (tmpl_sigma, lap_tmpl, [("out", "op2")]),
        (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
        (res_tmpl, mrg_tmpl, [("out_file", "in1")]),
        (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
        # Setup inputs to spatial normalization
        (mrg_target, norm, [("out", "moving_image")]),
        (mrg_tmpl, norm, [("out", "fixed_image")]),
    ])
    # fmt: on

    # Graft a template registration-mask if present
    if tpl_regmask_path:
        hires_mask = pe.Node(
            ApplyTransforms(
                input_image=_pop(tpl_regmask_path),
                transforms="identity",
                interpolation="Gaussian",
                float=True,
            ),
            name="hires_mask",
            mem_gb=1,
        )

        # fmt: off
        wf.connect([
            (res_tmpl, hires_mask, [("out_file", "reference_image")]),
            (hires_mask, norm, [("output_image", "fixed_image_masks")]),
        ])
        # fmt: on

    # Finally project brain mask and refine INU correction
    map_brainmask = pe.Node(
        ApplyTransforms(interpolation="Gaussian", float=True),
        name="map_brainmask",
        mem_gb=1,
    )
    map_brainmask.inputs.input_image = str(tpl_brainmask_path)

    thr_brainmask = pe.Node(Binarize(thresh_low=0.50), name="thr_brainmask")

    final_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 4,
            convergence_threshold=1e-7,
            rescale_intensities=True,
            shrink_factor=4,
        ),
        n_procs=omp_nthreads,
        name="final_n4",
    )
    final_mask = pe.Node(ApplyMask(), name="final_mask")

    # fmt: off
    wf.connect([
        (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
        (bspline_grid, final_n4, [("out", "args")]),
        (denoise, final_n4, [("output_image", "input_image")]),
        # Project template's brainmask into subject space
        (norm, map_brainmask, [("reverse_transforms", "transforms"),
                               ("reverse_invert_flags",
                                "invert_transform_flags")]),
        (map_brainmask, thr_brainmask, [("output_image", "in_file")]),
        # take a second pass of N4
        (map_brainmask, final_n4, [("output_image", "mask_image")]),
        (final_n4, final_mask, [("output_image", "in_file")]),
        (thr_brainmask, final_mask, [("out_mask", "in_mask")]),
        (final_n4, outputnode, [("output_image", "out_corrected")]),
        (thr_brainmask, outputnode, [("out_mask", "out_mask")]),
        (final_mask, outputnode, [("out_file", "out_brain")]),
    ])
    # fmt: on

    if interim_checkpoints:
        final_apply = pe.Node(
            ApplyTransforms(interpolation="BSpline", float=True),
            name="final_apply",
            mem_gb=1,
        )
        final_report = pe.Node(
            SimpleBeforeAfter(after_label="target",
                              before_label=f"tpl-{template_id}"),
            name="final_report",
        )
        # fmt: off
        wf.connect([
            (inputnode, final_apply, [(("in_files", _pop), "reference_image")
                                      ]),
            (res_tmpl, final_apply, [("out_file", "input_image")]),
            (norm, final_apply, [("reverse_transforms", "transforms"),
                                 ("reverse_invert_flags",
                                  "invert_transform_flags")]),
            (final_apply, final_report, [("output_image", "before")]),
            (outputnode, final_report, [("out_corrected", "after"),
                                        ("out_mask", "wm_seg")]),
        ])
        # fmt: on

    if ants_affine_init:
        # Initialize transforms with antsAI
        lowres_tmpl = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS, smooth=True),
                              name="lowres_tmpl")
        lowres_trgt = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS, smooth=True),
                              name="lowres_trgt")

        init_aff = pe.Node(
            AI(
                convergence=(100, 1e-6, 10),
                metric=("Mattes", 32, "Random", 0.25),
                principal_axes=False,
                search_factor=(factor, arc),
                search_grid=(step, grid),
                transform=("Affine", 0.1),
                verbose=True,
            ),
            name="init_aff",
            n_procs=omp_nthreads,
        )
        # fmt: off
        wf.connect([
            (clip_inu, lowres_trgt, [("out_file", "in_file")]),
            (lowres_trgt, init_aff, [("out_file", "moving_image")]),
            (clip_tmpl, lowres_tmpl, [("out_file", "in_file")]),
            (lowres_tmpl, init_aff, [("out_file", "fixed_image")]),
            (init_aff, norm, [("output_transform", "initial_moving_transform")
                              ]),
        ])
        # fmt: on

        if tpl_regmask_path:
            lowres_mask = pe.Node(
                ApplyTransforms(
                    input_image=_pop(tpl_regmask_path),
                    transforms="identity",
                    interpolation="MultiLabel",
                ),
                name="lowres_mask",
                mem_gb=1,
            )
            # fmt: off
            wf.connect([
                (lowres_tmpl, lowres_mask, [("out_file", "reference_image")]),
                (lowres_mask, init_aff, [("output_image", "fixed_image_mask")
                                         ]),
            ])
            # fmt: on

        if interim_checkpoints:
            init_apply = pe.Node(
                ApplyTransforms(interpolation="BSpline",
                                invert_transform_flags=[True]),
                name="init_apply",
                mem_gb=1,
            )
            init_mask = pe.Node(
                ApplyTransforms(interpolation="Gaussian",
                                invert_transform_flags=[True]),
                name="init_mask",
                mem_gb=1,
            )
            init_mask.inputs.input_image = str(tpl_brainmask_path)
            init_report = pe.Node(
                SimpleBeforeAfter(
                    out_report="init_report.svg",
                    before_label="target",
                    after_label="template",
                ),
                name="init_report",
            )
            # fmt: off
            wf.connect([
                (lowres_trgt, init_apply, [("out_file", "reference_image")]),
                (lowres_tmpl, init_apply, [("out_file", "input_image")]),
                (init_aff, init_apply, [("output_transform", "transforms")]),
                (lowres_trgt, init_report, [("out_file", "before")]),
                (init_apply, init_report, [("output_image", "after")]),
                (lowres_trgt, init_mask, [("out_file", "reference_image")]),
                (init_aff, init_mask, [("output_transform", "transforms")]),
                (init_mask, init_report, [("output_image", "wm_seg")]),
            ])
            # fmt: on
    else:
        norm.inputs.initial_moving_transform_com = 1

    if output_dir:
        ds_final_inu = pe.Node(DerivativesDataSink(
            base_directory=str(output_dir),
            desc="preproc",
            compress=True,
        ),
                               name="ds_final_inu",
                               run_without_submitting=True)
        ds_final_msk = pe.Node(DerivativesDataSink(
            base_directory=str(output_dir),
            desc="brain",
            suffix="mask",
            compress=True,
        ),
                               name="ds_final_msk",
                               run_without_submitting=True)

        # fmt: off
        wf.connect([
            (inputnode, ds_final_inu, [("in_files", "source_file")]),
            (inputnode, ds_final_msk, [("in_files", "source_file")]),
            (outputnode, ds_final_inu, [("out_corrected", "in_file")]),
            (outputnode, ds_final_msk, [("out_mask", "in_file")]),
        ])
        # fmt: on

        if interim_checkpoints:
            ds_report = pe.Node(DerivativesDataSink(
                base_directory=str(output_dir),
                desc="brain",
                suffix="mask",
                datatype="figures"),
                                name="ds_report",
                                run_without_submitting=True)
            # fmt: off
            wf.connect([
                (inputnode, ds_report, [("in_files", "source_file")]),
                (final_report, ds_report, [("out_report", "in_file")]),
            ])
            # fmt: on

        if ants_affine_init and interim_checkpoints:
            ds_report_init = pe.Node(DerivativesDataSink(
                base_directory=str(output_dir),
                desc="init",
                suffix="mask",
                datatype="figures"),
                                     name="ds_report_init",
                                     run_without_submitting=True)
            # fmt: off
            wf.connect([
                (inputnode, ds_report_init, [("in_files", "source_file")]),
                (init_report, ds_report_init, [("out_report", "in_file")]),
            ])
            # fmt: on

    return wf
Пример #17
0
def init_enhance_and_skullstrip_bold_wf(
    brainmask_thresh=0.5,
    name="enhance_and_skullstrip_bold_wf",
    omp_nthreads=1,
    pre_mask=False,
):
    """
    Enhance and run brain extraction on a BOLD EPI image.

    This workflow takes in a :abbr:`BOLD (blood-oxygen level-dependant)`
    :abbr:`fMRI (functional MRI)` average/summary (e.g., a reference image
    averaging non-steady-state timepoints), and sharpens the histogram
    with the application of the N4 algorithm for removing the
    :abbr:`INU (intensity non-uniformity)` bias field and calculates a signal
    mask.

    Steps of this workflow are:

      1. Calculate a tentative mask by registering (9-parameters) to *fMRIPrep*'s
         :abbr:`EPI (echo-planar imaging)` -*boldref* template, which
         is in MNI space.
         The tentative mask is obtained by resampling the MNI template's
         brainmask into *boldref*-space.
      2. Binary dilation of the tentative mask with a sphere of 3mm diameter.
      3. Run ANTs' ``N4BiasFieldCorrection`` on the input
         :abbr:`BOLD (blood-oxygen level-dependant)` average, using the
         mask generated in 1) instead of the internal Otsu thresholding.
      4. Calculate a loose mask using FSL's ``bet``, with one mathematical morphology
         dilation of one iteration and a sphere of 6mm as structuring element.
      5. Mask the :abbr:`INU (intensity non-uniformity)`-corrected image
         with the latest mask calculated in 3), then use AFNI's ``3dUnifize``
         to *standardize* the T2* contrast distribution.
      6. Calculate a mask using AFNI's ``3dAutomask`` after the contrast
         enhancement of 4).
      7. Calculate a final mask as the intersection of 4) and 6).
      8. Apply final mask on the enhanced reference.

    Step 1 can be skipped if the ``pre_mask`` argument is set to ``True`` and
    a tentative mask is passed in to the workflow throught the ``pre_mask``
    Nipype input.


    Workflow graph
        .. workflow ::
            :graph2use: orig
            :simple_form: yes

            from niworkflows.func.util import init_enhance_and_skullstrip_bold_wf
            wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=1)

    .. _N4BiasFieldCorrection: https://hdl.handle.net/10380/3053

    Parameters
    ----------
    brainmask_thresh: :obj:`float`
        Lower threshold for the probabilistic brainmask to obtain
        the final binary mask (default: 0.5).
    name : str
        Name of workflow (default: ``enhance_and_skullstrip_bold_wf``)
    omp_nthreads : int
        number of threads available to parallel nodes
    pre_mask : bool
        Indicates whether the ``pre_mask`` input will be set (and thus, step 1
        should be skipped).

    Inputs
    ------
    in_file : str
        BOLD image (single volume)
    pre_mask : bool
        A tentative brain mask to initialize the workflow (requires ``pre_mask``
        parameter set ``True``).


    Outputs
    -------
    bias_corrected_file : str
        the ``in_file`` after `N4BiasFieldCorrection`_
    skull_stripped_file : str
        the ``bias_corrected_file`` after skull-stripping
    mask_file : str
        mask of the skull-stripped input file
    out_report : str
        reportlet for the skull-stripping

    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=["in_file", "pre_mask"]),
                        name="inputnode")
    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=["mask_file", "skull_stripped_file", "bias_corrected_file"
                    ]),
        name="outputnode",
    )

    # Dilate pre_mask
    pre_dilate = pe.Node(
        fsl.DilateImage(
            operation="max",
            kernel_shape="sphere",
            kernel_size=3.0,
            internal_datatype="char",
        ),
        name="pre_mask_dilate",
    )

    # Ensure mask's header matches reference's
    check_hdr = pe.Node(MatchHeader(),
                        name="check_hdr",
                        run_without_submitting=True)

    # Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
    n4_correct = pe.Node(
        N4BiasFieldCorrection(dimension=3,
                              copy_header=True,
                              bspline_fitting_distance=200),
        shrink_factor=2,
        name="n4_correct",
        n_procs=1,
    )
    n4_correct.inputs.rescale_intensities = True

    # Create a generous BET mask out of the bias-corrected EPI
    skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
                                    name="skullstrip_first_pass")
    bet_dilate = pe.Node(
        fsl.DilateImage(
            operation="max",
            kernel_shape="sphere",
            kernel_size=6.0,
            internal_datatype="char",
        ),
        name="skullstrip_first_dilate",
    )
    bet_mask = pe.Node(fsl.ApplyMask(), name="skullstrip_first_mask")

    # Use AFNI's unifize for T2 constrast & fix header
    unifize = pe.Node(
        afni.Unifize(
            t2=True,
            outputtype="NIFTI_GZ",
            # Default -clfrac is 0.1, 0.4 was too conservative
            # -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation)
            args="-clfrac 0.2 -rbt 18.3 65.0 90.0",
            out_file="uni.nii.gz",
        ),
        name="unifize",
    )
    fixhdr_unifize = pe.Node(CopyXForm(), name="fixhdr_unifize", mem_gb=0.1)

    # Run ANFI's 3dAutomask to extract a refined brain mask
    skullstrip_second_pass = pe.Node(afni.Automask(dilate=1,
                                                   outputtype="NIFTI_GZ"),
                                     name="skullstrip_second_pass")
    fixhdr_skullstrip2 = pe.Node(CopyXForm(),
                                 name="fixhdr_skullstrip2",
                                 mem_gb=0.1)

    # Take intersection of both masks
    combine_masks = pe.Node(fsl.BinaryMaths(operation="mul"),
                            name="combine_masks")

    # Compute masked brain
    apply_mask = pe.Node(fsl.ApplyMask(), name="apply_mask")

    if not pre_mask:
        from ..interfaces.nibabel import Binarize

        bold_template = get_template("MNI152NLin2009cAsym",
                                     resolution=2,
                                     desc="fMRIPrep",
                                     suffix="boldref")
        brain_mask = get_template("MNI152NLin2009cAsym",
                                  resolution=2,
                                  desc="brain",
                                  suffix="mask")

        # Initialize transforms with antsAI
        init_aff = pe.Node(
            AI(
                fixed_image=str(bold_template),
                fixed_image_mask=str(brain_mask),
                metric=("Mattes", 32, "Regular", 0.2),
                transform=("Affine", 0.1),
                search_factor=(20, 0.12),
                principal_axes=False,
                convergence=(10, 1e-6, 10),
                verbose=True,
            ),
            name="init_aff",
            n_procs=omp_nthreads,
        )

        # Registration().version may be None
        if parseversion(Registration().version or "0.0.0") > Version("2.2.0"):
            init_aff.inputs.search_grid = (40, (0, 40, 40))

        # Set up spatial normalization
        norm = pe.Node(
            Registration(from_file=pkgr_fn("niworkflows.data",
                                           "epi_atlasbased_brainmask.json")),
            name="norm",
            n_procs=omp_nthreads,
        )
        norm.inputs.fixed_image = str(bold_template)
        map_brainmask = pe.Node(
            ApplyTransforms(
                interpolation="BSpline",
                float=True,
                # Use the higher resolution and probseg for numerical stability in rounding
                input_image=str(
                    get_template(
                        "MNI152NLin2009cAsym",
                        resolution=1,
                        label="brain",
                        suffix="probseg",
                    )),
            ),
            name="map_brainmask",
        )
        binarize_mask = pe.Node(Binarize(thresh_low=brainmask_thresh),
                                name="binarize_mask")

        # fmt: off
        workflow.connect([
            (inputnode, init_aff, [("in_file", "moving_image")]),
            (inputnode, map_brainmask, [("in_file", "reference_image")]),
            (inputnode, norm, [("in_file", "moving_image")]),
            (init_aff, norm, [("output_transform", "initial_moving_transform")
                              ]),
            (norm, map_brainmask, [
                ("reverse_invert_flags", "invert_transform_flags"),
                ("reverse_transforms", "transforms"),
            ]),
            (map_brainmask, binarize_mask, [("output_image", "in_file")]),
            (binarize_mask, pre_dilate, [("out_mask", "in_file")]),
        ])
        # fmt: on
    else:
        # fmt: off
        workflow.connect([
            (inputnode, pre_dilate, [("pre_mask", "in_file")]),
        ])
        # fmt: on

    # fmt: off
    workflow.connect([
        (inputnode, check_hdr, [("in_file", "reference")]),
        (pre_dilate, check_hdr, [("out_file", "in_file")]),
        (check_hdr, n4_correct, [("out_file", "mask_image")]),
        (inputnode, n4_correct, [("in_file", "input_image")]),
        (inputnode, fixhdr_unifize, [("in_file", "hdr_file")]),
        (inputnode, fixhdr_skullstrip2, [("in_file", "hdr_file")]),
        (n4_correct, skullstrip_first_pass, [("output_image", "in_file")]),
        (skullstrip_first_pass, bet_dilate, [("mask_file", "in_file")]),
        (bet_dilate, bet_mask, [("out_file", "mask_file")]),
        (skullstrip_first_pass, bet_mask, [("out_file", "in_file")]),
        (bet_mask, unifize, [("out_file", "in_file")]),
        (unifize, fixhdr_unifize, [("out_file", "in_file")]),
        (fixhdr_unifize, skullstrip_second_pass, [("out_file", "in_file")]),
        (skullstrip_first_pass, combine_masks, [("mask_file", "in_file")]),
        (skullstrip_second_pass, fixhdr_skullstrip2, [("out_file", "in_file")
                                                      ]),
        (fixhdr_skullstrip2, combine_masks, [("out_file", "operand_file")]),
        (fixhdr_unifize, apply_mask, [("out_file", "in_file")]),
        (combine_masks, apply_mask, [("out_file", "mask_file")]),
        (combine_masks, outputnode, [("out_file", "mask_file")]),
        (apply_mask, outputnode, [("out_file", "skull_stripped_file")]),
        (n4_correct, outputnode, [("output_image", "bias_corrected_file")]),
    ])
    # fmt: on

    return workflow
Пример #18
0
def init_brain_extraction_wf(name='brain_extraction_wf',
                             in_template='OASIS30ANTs',
                             use_float=True,
                             normalization_quality='precise',
                             omp_nthreads=None,
                             mem_gb=3.0,
                             bids_suffix='T1w',
                             atropos_refine=True,
                             atropos_use_random_seed=True,
                             atropos_model=None,
                             use_laplacian=True,
                             bspline_fitting_distance=200):
    """
    A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh``
    workflow (only for 3D images).

    The official workflow is built as follows (and this implementation
    follows the same organization):

      1. Step 1 performs several clerical tasks (adding padding, calculating
         the Laplacian of inputs, affine initialization) and the core
         spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: smart binarization of the brain mask
      4. Superstep 6: apply ATROPOS and massage its outputs
      5. Superstep 7: use results from 4 to refine the brain mask


    .. workflow::
        :graph2use: orig
        :simple_form: yes
        from niworkflows.anat import init_brain_extraction_wf
        wf = init_brain_extraction_wf()


    **Parameters**

        in_template : str
            Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or
            path).
            The brain template from which regions will be projected
            Anatomical template created using e.g. LPBA40 data set with
            ``buildtemplateparallel.sh`` in ANTs.
            The workflow will automatically search for a brain probability
            mask created using e.g. LPBA40 data set which have brain masks
            defined, and warped to anatomical template and
            averaged resulting in a probability image.
        use_float : bool
            Whether single precision should be used
        normalization_quality : str
            Use more precise or faster registration parameters
            (default: ``precise``, other possible values: ``testing``)
        omp_nthreads : int
            Maximum number of threads an individual process may use
        mem_gb : float
            Estimated peak memory consumption of the most hungry nodes
            in the workflow
        bids_suffix : str
            Sequence type of the first input image. For a list of acceptable values
            see https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
        atropos_refine : bool
            Enables or disables the whole ATROPOS sub-workflow
        atropos_use_random_seed : bool
            Whether ATROPOS should generate a random seed based on the
            system's clock
        atropos_model : tuple or None
            Allows to specify a particular segmentation model, overwriting
            the defaults based on ``bids_suffix``
        use_laplacian : bool
            Enables or disables alignment of the Laplacian as an additional
            criterion for image registration quality (default: True)
        bspline_fitting_distance : float
            The size of the b-spline mesh grid elements, in mm (default: 200)
        name : str, optional
            Workflow name (default: antsBrainExtraction)


    **Inputs**

        in_files
            List of input anatomical images to be brain-extracted,
            typically T1-weighted.
            If a list of anatomical images is provided, subsequently
            specified images are used during the segmentation process.
            However, only the first image is used in the registration
            of priors.
            Our suggestion would be to specify the T1w as the first image.
        in_mask
            (optional) Mask used for registration to limit the metric
            computation to a specific region.


    **Outputs**


        out_file
            Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
        out_mask
            Calculated brain mask
        bias_corrected
            The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
            correction, before skull-stripping.
        bias_image
            The :abbr:`INU (intensity non-uniformity)` field estimated for each
            input in ``in_files``
        out_segm
            Output segmentation by ATROPOS
        out_tpms
            Output :abbr:`TPMs (tissue probability maps)` by ATROPOS


    """
    from templateflow.api import get as get_template
    wf = pe.Workflow(name)

    tpl_target_path = str(
        get_template(in_template, desc=None, resolution=1, suffix=bids_suffix))

    # Get probabilistic brain mask if available
    tpl_mask_path = get_template(
        in_template, resolution=1, label='brain', suffix='probseg') or \
        get_template(in_template, resolution=1, desc='brain', suffix='mask')

    if omp_nthreads is None or omp_nthreads < 1:
        omp_nthreads = cpu_count()

    inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']),
                        name='inputnode')

    # Try to find a registration mask, set if available
    tpl_regmask_path = get_template(
        in_template, resolution=1,
        desc='BrainCerebellumExtraction', suffix='mask')
    if tpl_regmask_path:
        inputnode.inputs.in_mask = str(tpl_regmask_path)

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['out_file', 'out_mask', 'bias_corrected', 'bias_image', 'out_segm']),
        name='outputnode')

    trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity', op2='0.01 0.999 256'),
                       name='truncate_images', iterfield=['op1'])
    inu_n4 = pe.MapNode(
        N4BiasFieldCorrection(
            dimension=3, save_bias=False, copy_header=True,
            n_iterations=[50] * 4, convergence_threshold=1e-7, shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance),
        n_procs=omp_nthreads, name='inu_n4', iterfield=['input_image'])

    res_tmpl = pe.Node(ResampleImageBySpacing(
        out_spacing=(4, 4, 4), apply_smoothing=True), name='res_tmpl')
    res_tmpl.inputs.input_image = tpl_target_path
    res_target = pe.Node(ResampleImageBySpacing(
        out_spacing=(4, 4, 4), apply_smoothing=True), name='res_target')

    lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                       name='lap_tmpl')
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                         name='lap_target')
    mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name='mrg_target')

    # Initialize transforms with antsAI
    init_aff = pe.Node(AI(
        metric=('Mattes', 32, 'Regular', 0.25),
        transform=('Affine', 0.1),
        search_factor=(15, 0.1),
        principal_axes=False,
        convergence=(10, 1e-6, 10),
        verbose=True),
        name='init_aff',
        n_procs=omp_nthreads)

    if parseversion(Registration().version) > Version('2.2.0'):
        init_aff.inputs.search_grid = (40, (0, 40, 40))

    # Set up spatial normalization
    settings_file = 'antsBrainExtraction_%s.json' if use_laplacian \
        else 'antsBrainExtractionNoLaplacian_%s.json'
    norm = pe.Node(Registration(from_file=pkgr_fn(
        'niworkflows.data', settings_file % normalization_quality)),
        name='norm',
        n_procs=omp_nthreads,
        mem_gb=mem_gb)
    norm.inputs.float = use_float
    fixed_mask_trait = 'fixed_image_mask'
    if parseversion(Registration().version) >= Version('2.2.0'):
        fixed_mask_trait += 's'

    map_brainmask = pe.Node(
        ApplyTransforms(interpolation='Gaussian', float=True),
        name='map_brainmask',
        mem_gb=1
    )
    map_brainmask.inputs.input_image = str(tpl_mask_path)

    thr_brainmask = pe.Node(ThresholdImage(
        dimension=3, th_low=0.5, th_high=1.0, inside_value=1,
        outside_value=0), name='thr_brainmask')

    # Morphological dilation, radius=2
    dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'),
                            name='dil_brainmask')
    # Get largest connected component
    get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'),
                            name='get_brainmask')

    # Refine INU correction
    inu_n4_final = pe.MapNode(
        N4BiasFieldCorrection(
            dimension=3, save_bias=True, copy_header=True,
            n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance),
        n_procs=omp_nthreads, name='inu_n4_final', iterfield=['input_image'])

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask')

    wf.connect([
        (inputnode, trunc, [('in_files', 'op1')]),
        (inputnode, inu_n4_final, [('in_files', 'input_image')]),
        (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]),
        (inputnode, norm, [('in_mask', fixed_mask_trait)]),
        (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]),
        (trunc, inu_n4, [('output_image', 'input_image')]),
        (inu_n4, res_target, [
            (('output_image', _pop), 'input_image')]),
        (res_tmpl, init_aff, [('output_image', 'fixed_image')]),
        (res_target, init_aff, [('output_image', 'moving_image')]),
        (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
        (norm, map_brainmask, [
            ('reverse_transforms', 'transforms'),
            ('reverse_invert_flags', 'invert_transform_flags')]),
        (map_brainmask, thr_brainmask, [('output_image', 'input_image')]),
        (thr_brainmask, dil_brainmask, [('output_image', 'op1')]),
        (dil_brainmask, get_brainmask, [('output_image', 'op1')]),
        (inu_n4_final, apply_mask, [('output_image', 'in_file')]),
        (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        (get_brainmask, outputnode, [('output_image', 'out_mask')]),
        (apply_mask, outputnode, [('out_file', 'out_file')]),
        (inu_n4_final, outputnode, [('output_image', 'bias_corrected'),
                                    ('bias_image', 'bias_image')]),
    ])

    if use_laplacian:
        lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                           name='lap_tmpl')
        lap_tmpl.inputs.op1 = tpl_target_path
        lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                             name='lap_target')
        mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
        mrg_tmpl.inputs.in1 = tpl_target_path
        mrg_target = pe.Node(niu.Merge(2), name='mrg_target')
        wf.connect([
            (inu_n4, lap_target, [
                (('output_image', _pop), 'op1')]),
            (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]),
            (inu_n4, mrg_target, [('output_image', 'in1')]),
            (lap_target, mrg_target, [('output_image', 'in2')]),
            (mrg_tmpl, norm, [('out', 'fixed_image')]),
            (mrg_target, norm, [('out', 'moving_image')]),
        ])
    else:
        norm.inputs.fixed_image = tpl_target_path
        wf.connect([
            (inu_n4, norm, [
                (('output_image', _pop), 'moving_image')]),
        ])

    if atropos_refine:
        atropos_model = atropos_model or list(ATROPOS_MODELS[bids_suffix].values())
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model,
        )
        sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1), name='sel_wm',
                         run_without_submitting=True)

        wf.disconnect([
            (get_brainmask, outputnode, [('output_image', 'out_mask')]),
            (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        ])
        wf.connect([
            (inu_n4, atropos_wf, [
                ('output_image', 'inputnode.in_files')]),
            (thr_brainmask, atropos_wf, [
                ('output_image', 'inputnode.in_mask')]),
            (get_brainmask, atropos_wf, [
                ('output_image', 'inputnode.in_mask_dilated')]),
            (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
            (sel_wm, inu_n4_final, [('out', 'weight_image')]),
            (atropos_wf, outputnode, [
                ('outputnode.out_mask', 'out_mask')]),
            (atropos_wf, apply_mask, [
                ('outputnode.out_mask', 'mask_file')]),
            (atropos_wf, outputnode, [
                ('outputnode.out_segm', 'out_segm'),
                ('outputnode.out_tpms', 'out_tpms')])
        ])
    return wf
Пример #19
0
def init_fsl_hmc_wf(scan_groups,
                    source_file,
                    b0_threshold,
                    impute_slice_threshold,
                    fmap_demean,
                    fmap_bspline,
                    eddy_config,
                    mem_gb=3,
                    omp_nthreads=1,
                    dwi_metadata=None,
                    slice_quality='outlier_n_sqr_stdev_map',
                    sloppy=False,
                    name="fsl_hmc_wf"):
    """
    This workflow controls the dwi preprocessing stages using FSL tools.

    I couldn't get this to work reliably unless everything was oriented in LAS+ before going to
    TOPUP and eddy. For this reason, if TOPUP is going to be used (for an epi fieldmap or an
    RPE series) or there is no fieldmap correction, operations occurring before eddy are done in
    LAS+. The fieldcoefs are applied during eddy's run and the corrected series comes out.
    This is finally converted to LPS+ and sent to the rest of the pipeline.

    If a GRE fieldmap is available, the correction is applied to eddy's outputs after they have
    been converted back to LPS+.

    Finally, if SyN is chosen, it is applied to the LPS+ converted, eddy-resampled data.


    **Parameters**

        scan_groups: dict
            dictionary with fieldmaps and warp space information for the dwis
        impute_slice_threshold: float
            threshold for a slice to be replaced with imputed values. Overrides the
            parameter in ``eddy_config`` if set to a number > 0.
        do_topup: bool
            Should topup be performed before eddy? requires an rpe series or an
            rpe_b0.
        eddy_config: str
            Path to a JSON file containing settings for the call to ``eddy``.


    **Inputs**

        dwi_file: str
            DWI series
        bvec_file: str
            bvec file
        bval_file: str
            bval file
        b0_indices: list
            Indexes into ``dwi_files`` that correspond to b=0 volumes
        b0_images: list
            List of single b=0 volumes
        original_files: list
            List of the files from which each DWI volume came.
        t1_brain: str
            Skull stripped T1w image
        t1_mask: str
            mask for t1_brain

    """

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'dwi_file', 'bvec_file', 'bval_file', 'b0_indices', 'b0_images',
        'original_files', 't1_brain', 't1_mask', 't1_seg',
        't1_2_mni_reverse_transform'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        "b0_template", "b0_template_mask", "pre_sdc_template", "bval_files",
        "hmc_optimization_data", "sdc_method", 'slice_quality',
        'motion_params', "cnr_map", "bvec_files_to_transform",
        "dwi_files_to_transform", "b0_indices", "to_dwi_ref_affines",
        "to_dwi_ref_warps", "rpe_b0_info"
    ]),
                         name='outputnode')

    workflow = Workflow(name=name)
    gather_inputs = pe.Node(GatherEddyInputs(b0_threshold=b0_threshold),
                            name="gather_inputs")
    if eddy_config is None:
        # load from the defaults
        eddy_cfg_file = pkgr_fn('qsiprep.data', 'eddy_params.json')
    else:
        eddy_cfg_file = eddy_config

    with open(eddy_cfg_file, "r") as f:
        eddy_args = json.load(f)
    enhance_pre_sdc = pe.Node(EnhanceB0(), name='enhance_pre_sdc')

    # Run in parallel if possible
    LOGGER.info("Using %d threads in eddy", omp_nthreads)
    eddy_args["num_threads"] = omp_nthreads
    pre_eddy_b0_ref_wf = init_dwi_reference_wf(register_t1=True,
                                               source_file=source_file,
                                               name='pre_eddy_b0_ref_wf',
                                               gen_report=False)
    eddy = pe.Node(ExtendedEddy(**eddy_args), name="eddy")
    spm_motion = pe.Node(Eddy2SPMMotion(), name="spm_motion")

    # Convert eddy outputs back to LPS+, split them
    back_to_lps = pe.Node(ConformDwi(orientation="LPS"), name='back_to_lps')
    cnr_lps = pe.Node(ConformDwi(orientation="LPS"), name='cnr_lps')
    split_eddy_lps = pe.Node(SplitDWIs(b0_threshold=b0_threshold,
                                       deoblique_bvecs=True),
                             name="split_eddy_lps")

    # Convert the b=0 template from pre_eddy_b0_ref to LPS+
    b0_ref_to_lps = pe.Node(ConformDwi(orientation="LPS"),
                            name='b0_ref_to_lps')
    b0_ref_mask_to_lps = pe.Node(ConformDwi(orientation="LPS"),
                                 name='b0_ref_mask_to_lps')
    b0_ref_brain_to_lps = pe.Node(ConformDwi(orientation="LPS"),
                                  name='b0_ref_brain_to_lps')

    workflow.connect([
        # These images and gradients should be in LAS+
        (inputnode, gather_inputs, [('dwi_file', 'dwi_file'),
                                    ('bval_file', 'bval_file'),
                                    ('bvec_file', 'bvec_file'),
                                    ('original_files', 'original_files')]),
        (inputnode, pre_eddy_b0_ref_wf, [('t1_brain', 'inputnode.t1_brain'),
                                         ('t1_mask', 'inputnode.t1_mask'),
                                         ('t1_seg', 'inputnode.t1_seg')]),
        # Convert distorted ref to LPS+
        (pre_eddy_b0_ref_wf, b0_ref_to_lps, [('outputnode.ref_image',
                                              'dwi_file')]),
        (pre_eddy_b0_ref_wf, b0_ref_mask_to_lps, [('outputnode.dwi_mask',
                                                   'dwi_file')]),
        (pre_eddy_b0_ref_wf, b0_ref_brain_to_lps,
         [('outputnode.ref_image_brain', 'dwi_file')]),
        (gather_inputs, eddy, [('eddy_indices', 'in_index'),
                               ('eddy_acqp', 'in_acqp')]),
        (inputnode, eddy, [('dwi_file', 'in_file'), ('bval_file', 'in_bval'),
                           ('bvec_file', 'in_bvec')]),
        (pre_eddy_b0_ref_wf, eddy, [('outputnode.dwi_mask', 'in_mask')]),
        (gather_inputs, outputnode, [('forward_transforms',
                                      'to_dwi_ref_affines')]),
        (gather_inputs, enhance_pre_sdc, [('pre_topup_image', 'b0_file')]),
        (enhance_pre_sdc, outputnode, [('enhanced_file', 'pre_sdc_template')]),
        (eddy, back_to_lps, [('out_corrected', 'dwi_file'),
                             ('out_rotated_bvecs', 'bvec_file')]),
        (inputnode, back_to_lps, [('bval_file', 'bval_file')]),
        (back_to_lps, split_eddy_lps, [('dwi_file', 'dwi_file'),
                                       ('bval_file', 'bval_file'),
                                       ('bvec_file', 'bvec_file')]),
        (inputnode, outputnode, [('original_files', 'original_files')]),
        (split_eddy_lps, outputnode, [('dwi_files', 'dwi_files_to_transform'),
                                      ('bvec_files',
                                       'bvec_files_to_transform'),
                                      ('bval_files', 'bval_files'),
                                      ('b0_indices', 'b0_indices')]),
        (eddy, cnr_lps, [('out_cnr_maps', 'dwi_file')]),
        (cnr_lps, outputnode, [('dwi_file', 'cnr_map')]),
        (eddy, outputnode, [(slice_quality, 'slice_quality'),
                            (slice_quality, 'hmc_optimization_data')]),
        (eddy, spm_motion, [('out_parameter', 'eddy_motion')]),
        (b0_ref_mask_to_lps, outputnode, [('dwi_file', 'b0_template_mask')]),
        (spm_motion, outputnode, [('spm_motion_file', 'motion_params')])
    ])

    # Fieldmap correction to be done in LAS+: TOPUP for rpe series or epi fieldmap
    # If a topupref is provided, use it for TOPUP
    fieldmap_type = scan_groups['fieldmap_info']['suffix'] or ''
    workflow.__desc__ = boilerplate_from_eddy_config(eddy_args, fieldmap_type)
    if fieldmap_type in ('epi', 'rpe_series'):
        # If there are EPI fieldmaps in fmaps/, make sure they get to TOPUP. It will always use
        # b=0 images from the DWI series regardless
        gather_inputs.inputs.topup_requested = True
        if 'epi' in scan_groups['fieldmap_info']:
            gather_inputs.inputs.epi_fmaps = scan_groups['fieldmap_info'][
                'epi']
        outputnode.inputs.sdc_method = "TOPUP"
        topup = pe.Node(fsl.TOPUP(out_field="fieldmap_HZ.nii.gz", scale=1),
                        name="topup")
        topup_summary = pe.Node(TopupSummary(), name='topup_summary')
        ds_report_topupsummary = pe.Node(DerivativesDataSink(
            suffix='topupsummary', source_file=source_file),
                                         name='ds_report_topupsummary',
                                         run_without_submitting=True,
                                         mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_topupcsv = pe.Node(DerivativesDataSink(suffix='topupcsv',
                                                  source_file=source_file),
                              name='ds_topupcsv',
                              run_without_submitting=True,
                              mem_gb=DEFAULT_MEMORY_MIN_GB)

        # Enhance and skullstrip the TOPUP output to get a mask for eddy
        unwarped_mean = pe.Node(IntraModalMerge(hmc=False, to_lps=False),
                                name='unwarped_mean')
        # Register the first volume of topup imain to the first volume of the merged dwi
        topup_to_eddy_reg = pe.Node(fsl.FLIRT(dof=6, output_type="NIFTI_GZ"),
                                    name="topup_to_eddy_reg")
        workflow.connect([
            # There will be no SDC warps, they are applied by eddy
            (gather_inputs, outputnode, [('forward_warps', 'to_dwi_ref_warps')]
             ),
            (gather_inputs, topup, [('topup_datain', 'encoding_file'),
                                    ('topup_imain', 'in_file'),
                                    ('topup_config', 'config')]),
            (topup, eddy, [('out_field', 'field')]),
            (gather_inputs, topup_to_eddy_reg, [('topup_first', 'in_file'),
                                                ('eddy_first', 'reference')]),
            (gather_inputs, ds_topupcsv, [('b0_csv', 'in_file')]),
            (topup_to_eddy_reg, eddy, [('out_matrix_file', 'field_mat')]),
            # Use corrected images from TOPUP to make a mask for eddy
            (topup, unwarped_mean, [('out_corrected', 'in_files')]),
            (unwarped_mean, pre_eddy_b0_ref_wf, [('out_avg',
                                                  'inputnode.b0_template')]),
            (b0_ref_to_lps, outputnode, [('dwi_file', 'b0_template')]),
            # Save reports
            (gather_inputs, topup_summary, [('topup_report', 'summary')]),
            (topup_summary, ds_report_topupsummary, [('out_report', 'in_file')
                                                     ]),
        ])

        return workflow

    # The topup inputs will only have one PE direction,
    # so they can be used to make a b=0 reference to mask for eddy
    distorted_merge = pe.Node(IntraModalMerge(hmc=True, to_lps=False),
                              name='distorted_merge')
    workflow.connect([
        # Use the distorted mask for eddy
        (gather_inputs, distorted_merge, [('topup_imain', 'in_files')]),
        (distorted_merge, pre_eddy_b0_ref_wf, [('out_avg',
                                                'inputnode.b0_template')])
    ])

    if fieldmap_type in ('fieldmap',
                         'syn') or fieldmap_type.startswith("phase"):

        outputnode.inputs.sdc_method = fieldmap_type
        b0_sdc_wf = init_sdc_wf(scan_groups['fieldmap_info'],
                                dwi_metadata,
                                omp_nthreads=omp_nthreads,
                                fmap_demean=fmap_demean,
                                fmap_bspline=fmap_bspline)

        workflow.connect([
            # Send to SDC workflow
            (b0_ref_to_lps, b0_sdc_wf, [('dwi_file', 'inputnode.b0_ref')]),
            (b0_ref_brain_to_lps, b0_sdc_wf, [('dwi_file',
                                               'inputnode.b0_ref_brain')]),
            (b0_ref_mask_to_lps, b0_sdc_wf, [('dwi_file', 'inputnode.b0_mask')
                                             ]),
            (inputnode, b0_sdc_wf, [('t1_brain', 'inputnode.t1_brain'),
                                    ('t1_2_mni_reverse_transform',
                                     'inputnode.t1_2_mni_reverse_transform')]),
            # These deformations will be applied later, use the unwarped image now
            (b0_sdc_wf, outputnode, [('outputnode.out_warp',
                                      'to_dwi_ref_warps'),
                                     ('outputnode.method', 'sdc_method'),
                                     ('outputnode.b0_ref', 'b0_template')])
        ])

    else:
        outputnode.inputs.sdc_method = "None"
        workflow.connect([(b0_ref_to_lps, outputnode, [('dwi_file',
                                                        'b0_template')])])
    return workflow
Пример #20
0
def init_fsl_hmc_wf(scan_groups,
                    b0_threshold,
                    impute_slice_threshold,
                    fmap_demean,
                    fmap_bspline,
                    eddy_config,
                    mem_gb=3,
                    omp_nthreads=1,
                    dwi_metadata=None,
                    slice_quality='outlier_n_sqr_stdev_map',
                    sloppy=False,
                    name="fsl_hmc_wf"):
    """
    This workflow controls the dwi preprocessing stages using FSL tools.

    I couldn't get this to work reliably unless everything was oriented in LAS+ before going to
    TOPUP and eddy. For this reason, if TOPUP is going to be used (for an epi fieldmap or an
    RPE series) or there is no fieldmap correction, operations occurring before eddy are done in
    LAS+. The fieldcoefs are applied during eddy's run and the corrected series comes out.
    This is finally converted to LPS+ and sent to the rest of the pipeline.

    If a GRE fieldmap is available, the correction is applied to eddy's outputs after they have
    been converted back to LPS+.

    Finally, if SyN is chosen, it is applied to the LPS+ converted, eddy-resampled data.


    **Parameters**

        scan_groups: dict
            dictionary with fieldmaps and warp space information for the dwis
        impute_slice_threshold: float
            threshold for a slice to be replaced with imputed values. Overrides the
            parameter in ``eddy_config`` if set to a number > 0.
        do_topup: bool
            Should topup be performed before eddy? requires an rpe series or an
            rpe_b0.
        eddy_config: str
            Path to a JSON file containing settings for the call to ``eddy``.


    **Inputs**

        dwi_files: list
            List of single-volume files across all DWI series
        b0_indices: list
            Indexes into ``dwi_files`` that correspond to b=0 volumes
        bvecs: list
            List of paths to single-line bvec files
        bvals: list
            List of paths to single-line bval files
        b0_images: list
            List of single b=0 volumes
        original_files: list
            List of the files from which each DWI volume came.


    """

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=['dwi_files', 'b0_indices', 'bvec_files', 'bval_files', 'b0_images',
                    'original_files', 't1_brain', 't1_2_mni_reverse_transform']),
        name='inputnode')

    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=["b0_template", "b0_template_mask", "pre_sdc_template",
                    "hmc_optimization_data", "sdc_method", 'slice_quality', 'motion_params',
                    "cnr_map", "bvec_files_to_transform", "dwi_files_to_transform", "b0_indices",
                    "to_dwi_ref_affines", "to_dwi_ref_warps", "rpe_b0_info"]),
        name='outputnode')

    workflow = Workflow(name=name)
    gather_inputs = pe.Node(GatherEddyInputs(), name="gather_inputs")
    if eddy_config is None:
        # load from the defaults
        eddy_cfg_file = pkgr_fn('qsiprep.data', 'eddy_params.json')
    else:
        eddy_cfg_file = eddy_config

    with open(eddy_cfg_file, "r") as f:
        eddy_args = json.load(f)

    # Use run in parallel if possible
    LOGGER.info("Using %d threads in eddy", omp_nthreads)
    eddy_args["num_threads"] = omp_nthreads
    eddy = pe.Node(ExtendedEddy(**eddy_args), name="eddy")
    # These should be in LAS+
    dwi_merge = pe.Node(MergeDWIs(), name="dwi_merge")
    spm_motion = pe.Node(Eddy2SPMMotion(), name="spm_motion")
    # Convert eddy outputs back to LPS+, split them
    pre_topup_lps = pe.Node(ConformDwi(orientation="LPS"), name='pre_topup_lps')
    pre_topup_enhance = init_enhance_and_skullstrip_dwi_wf(name='pre_topup_enhance')
    back_to_lps = pe.Node(ConformDwi(orientation="LPS"), name='back_to_lps')
    cnr_lps = pe.Node(ConformDwi(orientation="LPS"), name='cnr_lps')
    split_eddy_lps = pe.Node(SplitDWIs(b0_threshold=b0_threshold), name="split_eddy_lps")
    mean_b0_lps = pe.Node(ants.AverageImages(dimension=3, normalize=True), name='mean_b0_lps')
    lps_b0_enhance = init_enhance_and_skullstrip_dwi_wf(name='lps_b0_enhance')

    workflow.connect([
        # These images and gradients should be in LAS+
        (inputnode, gather_inputs, [
            ('dwi_files', 'dwi_files'),
            ('bval_files', 'bval_files'),
            ('bvec_files', 'bvec_files'),
            ('b0_indices', 'b0_indices'),
            ('b0_images', 'b0_images'),
            ('original_files', 'original_files')]),
        # Re-concatenate
        (inputnode, dwi_merge, [
            ('dwi_files', 'dwi_files'),
            ('bval_files', 'bval_files'),
            ('bvec_files', 'bvec_files'),
            ('original_files', 'bids_dwi_files')]),
        (gather_inputs, eddy, [
            ('eddy_indices', 'in_index'),
            ('eddy_acqp', 'in_acqp')]),
        (dwi_merge, eddy, [
            ('out_dwi', 'in_file'),
            ('out_bval', 'in_bval'),
            ('out_bvec', 'in_bvec')]),
        (gather_inputs, pre_topup_lps, [
            ('pre_topup_image', 'dwi_file')]),
        (gather_inputs, outputnode, [('forward_transforms', 'to_dwi_ref_affines')]),
        (pre_topup_lps, pre_topup_enhance, [
            ('dwi_file', 'inputnode.in_file')]),
        (pre_topup_enhance, outputnode, [
            ('outputnode.bias_corrected_file', 'pre_sdc_template')]),
        (eddy, back_to_lps, [
            ('out_corrected', 'dwi_file'),
            ('out_rotated_bvecs', 'bvec_file')]),
        (dwi_merge, back_to_lps, [('out_bval', 'bval_file')]),
        (back_to_lps, split_eddy_lps, [
            ('dwi_file', 'dwi_file'),
            ('bval_file', 'bval_file'),
            ('bvec_file', 'bvec_file')]),
        (dwi_merge, outputnode, [
            ('original_images', 'original_files')]),
        (split_eddy_lps, outputnode, [
            ('dwi_files', 'dwi_files_to_transform'),
            ('bvec_files', 'bvec_files_to_transform')]),
        (split_eddy_lps, mean_b0_lps, [('b0_images', 'images')]),
        (mean_b0_lps, lps_b0_enhance, [('output_average_image', 'inputnode.in_file')]),
        (eddy, cnr_lps, [('out_cnr_maps', 'dwi_file')]),
        (cnr_lps, outputnode, [('dwi_file', 'cnr_map')]),
        (eddy, outputnode, [
            (slice_quality, 'slice_quality'),
            (slice_quality, 'hmc_optimization_data')]),
        (eddy, spm_motion, [('out_parameter', 'eddy_motion')]),
        (spm_motion, outputnode, [('spm_motion_file', 'motion_params')])
    ])

    # Fieldmap correction to be done in LAS+: TOPUP for rpe series or epi fieldmap
    # If a topupref is provided, use it for TOPUP
    rpe_b0 = None
    fieldmap_type = scan_groups['fieldmap_info']['suffix']
    if fieldmap_type == 'epi':
        rpe_b0 = scan_groups['fieldmap_info']['epi']
    elif fieldmap_type == 'rpe_series':
        rpe_b0 = scan_groups['fieldmap_info']['rpe_series']
    using_topup = rpe_b0 is not None

    if using_topup:
        outputnode.inputs.sdc_method = "TOPUP"
        # Whether an rpe series (from dwi/) or an epi fmap (in fmap/) extract just the
        # b=0s for topup
        prepare_rpe_b0 = pe.Node(
            B0RPEFieldmap(b0_file=rpe_b0, orientation='LAS', output_3d_images=False),
            name="prepare_rpe_b0")
        topup = pe.Node(fsl.TOPUP(out_field="fieldmap_HZ.nii.gz"), name="topup")
        # Enhance and skullstrip the TOPUP output to get a mask for eddy
        unwarped_mean = pe.Node(afni.TStat(outputtype='NIFTI_GZ'), name='unwarped_mean')
        unwarped_enhance = init_enhance_and_skullstrip_dwi_wf(name='unwarped_enhance')

        workflow.connect([
            (prepare_rpe_b0, outputnode, [('fmap_info', 'inputnode.rpe_b0_info')]),
            (prepare_rpe_b0, gather_inputs, [('fmap_file', 'rpe_b0')]),
            (gather_inputs, topup, [
                ('topup_datain', 'encoding_file'),
                ('topup_imain', 'in_file'),
                ('topup_config', 'config')]),
            (gather_inputs, outputnode, [('forward_warps', 'to_dwi_ref_warps')]),
            (topup, unwarped_mean, [('out_corrected', 'in_file')]),
            (unwarped_mean, unwarped_enhance, [('out_file', 'inputnode.in_file')]),
            (unwarped_enhance, eddy, [('outputnode.mask_file', 'in_mask')]),
            (topup, eddy, [
                ('out_field', 'field')]),
            (lps_b0_enhance, outputnode, [
                ('outputnode.bias_corrected_file', 'b0_template'),
                ('outputnode.mask_file', 'b0_template_mask')]),
            ])
        return workflow

    # Enhance and skullstrip the TOPUP output to get a mask for eddy
    distorted_enhance = init_enhance_and_skullstrip_dwi_wf(name='distorted_enhance')
    workflow.connect([
        # Use the distorted mask for eddy
        (gather_inputs, distorted_enhance, [('pre_topup_image', 'inputnode.in_file')]),
        (distorted_enhance, eddy, [('outputnode.mask_file', 'in_mask')]),
    ])
    if fieldmap_type in ('fieldmap', 'phasediff', 'phase', 'syn'):

        outputnode.inputs.sdc_method = fieldmap_type
        b0_sdc_wf = init_sdc_wf(
            scan_groups['fieldmap_info'], dwi_metadata, omp_nthreads=omp_nthreads,
            fmap_demean=fmap_demean, fmap_bspline=fmap_bspline)

        workflow.connect([
            # Calculate distortion correction on eddy-corrected data
            (lps_b0_enhance, b0_sdc_wf, [
                ('outputnode.bias_corrected_file', 'inputnode.b0_ref'),
                ('outputnode.skull_stripped_file', 'inputnode.b0_ref_brain'),
                ('outputnode.mask_file', 'inputnode.b0_mask')]),
            (inputnode, b0_sdc_wf, [
                ('t1_brain', 'inputnode.t1_brain'),
                ('t1_2_mni_reverse_transform',
                 'inputnode.t1_2_mni_reverse_transform')]),

            # These deformations will be applied later, use the unwarped image now
            (b0_sdc_wf, outputnode, [
                ('outputnode.out_warp', 'to_dwi_ref_warps'),
                ('outputnode.method', 'sdc_method'),
                ('outputnode.b0_ref', 'b0_template'),
                ('outputnode.b0_mask', 'b0_template_mask')])])

    else:
        outputnode.inputs.sdc_method = "None"
        workflow.connect([
            (lps_b0_enhance, outputnode, [
                ('outputnode.skull_stripped_file', 'b0_template'),
                ('outputnode.mask_file', 'b0_template_mask')]),
            ])

    return workflow
Пример #21
0
def init_brain_extraction_wf(tpl_target_path,
                             tpl_mask_path,
                             tpl_regmask_path,
                             name='brain_extraction_wf',
                             template_spec=None,
                             use_float=True,
                             normalization_quality='precise',
                             omp_nthreads=None,
                             mem_gb=3.0,
                             bids_suffix='T1w',
                             atropos_refine=True,
                             atropos_use_random_seed=True,
                             atropos_model=None,
                             use_laplacian=True,
                             bspline_fitting_distance=200):
    """
    A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh``
    workflow (only for 3D images).
    The official workflow is built as follows (and this implementation
    follows the same organization):
      1. Step 1 performs several clerical tasks (adding padding, calculating
         the Laplacian of inputs, affine initialization) and the core
         spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: smart binarization of the brain mask
      4. Superstep 6: apply ATROPOS and massage its outputs
      5. Superstep 7: use results from 4 to refine the brain mask
    .. workflow::
        :graph2use: orig
        :simple_form: yes
        from niworkflows.anat import init_brain_extraction_wf
        wf = init_brain_extraction_wf()
    **Parameters**
        in_template : str
            Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or
            path).
            The brain template from which regions will be projected
            Anatomical template created using e.g. LPBA40 data set with
            ``buildtemplateparallel.sh`` in ANTs.
            The workflow will automatically search for a brain probability
            mask created using e.g. LPBA40 data set which have brain masks
            defined, and warped to anatomical template and
            averaged resulting in a probability image.
        use_float : bool
            Whether single precision should be used
        normalization_quality : str
            Use more precise or faster registration parameters
            (default: ``precise``, other possible values: ``testing``)
        omp_nthreads : int
            Maximum number of threads an individual process may use
        mem_gb : float
            Estimated peak memory consumption of the most hungry nodes
            in the workflow
        bids_suffix : str
            Sequence type of the first input image. For a list of acceptable values
            see https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
        atropos_refine : bool
            Enables or disables the whole ATROPOS sub-workflow
        atropos_use_random_seed : bool
            Whether ATROPOS should generate a random seed based on the
            system's clock
        atropos_model : tuple or None
            Allows to specify a particular segmentation model, overwriting
            the defaults based on ``bids_suffix``
        use_laplacian : bool
            Enables or disables alignment of the Laplacian as an additional
            criterion for image registration quality (default: True)
        bspline_fitting_distance : float
            The size of the b-spline mesh grid elements, in mm (default: 200)
        name : str, optional
            Workflow name (default: antsBrainExtraction)
    **Inputs**
        in_files
            List of input anatomical images to be brain-extracted,
            typically T1-weighted.
            If a list of anatomical images is provided, subsequently
            specified images are used during the segmentation process.
            However, only the first image is used in the registration
            of priors.
            Our suggestion would be to specify the T1w as the first image.
        in_mask
            (optional) Mask used for registration to limit the metric
            computation to a specific region.
    **Outputs**
        out_file
            Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
        out_mask
            Calculated brain mask
        bias_corrected
            The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
            correction, before skull-stripping.
        bias_image
            The :abbr:`INU (intensity non-uniformity)` field estimated for each
            input in ``in_files``
        out_segm
            Output segmentation by ATROPOS
        out_tpms
            Output :abbr:`TPMs (tissue probability maps)` by ATROPOS
    """
    # from templateflow.api import get as get_template
    wf = pe.Workflow(name)

    template_spec = template_spec or {}

    # suffix passed via spec takes precedence
    template_spec['suffix'] = template_spec.get('suffix', bids_suffix)

    # # Get probabilistic brain mask if available
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']),
                        name='inputnode')

    # # Try to find a registration mask, set if available
    if tpl_regmask_path:
        inputnode.inputs.in_mask = str(tpl_regmask_path)

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'out_file', 'out_mask', 'bias_corrected', 'bias_image', 'out_segm',
        'out_tpms'
    ]),
                         name='outputnode')

    copy_xform = pe.Node(CopyXForm(
        fields=['out_file', 'out_mask', 'bias_corrected', 'bias_image']),
                         name='copy_xform',
                         run_without_submitting=True,
                         mem_gb=2.5)

    trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity',
                                 op2='0.01 0.999 256'),
                       name='truncate_images',
                       iterfield=['op1'])
    inu_n4 = pe.MapNode(N4BiasFieldCorrection(
        dimension=3,
        save_bias=False,
        copy_header=True,
        n_iterations=[50] * 4,
        convergence_threshold=1e-7,
        shrink_factor=4,
        bspline_fitting_distance=bspline_fitting_distance),
                        n_procs=omp_nthreads,
                        name='inu_n4',
                        iterfield=['input_image'])

    res_tmpl = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                              apply_smoothing=True),
                       name='res_tmpl')
    res_tmpl.inputs.input_image = tpl_target_path
    res_target = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4),
                                                apply_smoothing=True),
                         name='res_target')

    lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                       name='lap_tmpl')
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                         name='lap_target')
    mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name='mrg_target')

    # Initialize transforms with antsAI
    init_aff = pe.Node(AI(metric=('Mattes', 32, 'Regular', 0.25),
                          transform=('Affine', 0.1),
                          search_factor=(15, 0.1),
                          principal_axes=False,
                          convergence=(10, 1e-6, 10),
                          verbose=True),
                       name='init_aff',
                       n_procs=omp_nthreads)

    # Tolerate missing ANTs at construction time
    _ants_version = Registration().version
    if _ants_version and parseversion(_ants_version) >= Version('2.3.0'):
        init_aff.inputs.search_grid = (40, (0, 40, 40))

    # Set up spatial normalization
    settings_file = 'antsBrainExtraction_%s.json' if use_laplacian \
        else 'antsBrainExtractionNoLaplacian_%s.json'
    norm = pe.Node(Registration(
        from_file=pkgr_fn('CPAC.anat_preproc', 'data/' +
                          settings_file % normalization_quality)),
                   name='norm',
                   n_procs=omp_nthreads,
                   mem_gb=mem_gb)
    norm.inputs.float = use_float
    fixed_mask_trait = 'fixed_image_mask'
    if _ants_version and parseversion(_ants_version) >= Version('2.2.0'):
        fixed_mask_trait += 's'

    map_brainmask = pe.Node(ApplyTransforms(interpolation='Gaussian',
                                            float=True),
                            name='map_brainmask',
                            mem_gb=1)
    map_brainmask.inputs.input_image = str(tpl_mask_path)

    thr_brainmask = pe.Node(ThresholdImage(dimension=3,
                                           th_low=0.5,
                                           th_high=1.0,
                                           inside_value=1,
                                           outside_value=0),
                            name='thr_brainmask')

    # Morphological dilation, radius=2
    dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'),
                            name='dil_brainmask')
    # Get largest connected component
    get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'),
                            name='get_brainmask')

    # Refine INU correction
    inu_n4_final = pe.MapNode(N4BiasFieldCorrection(
        dimension=3,
        save_bias=True,
        copy_header=True,
        n_iterations=[50] * 5,
        convergence_threshold=1e-7,
        shrink_factor=4,
        bspline_fitting_distance=bspline_fitting_distance),
                              n_procs=omp_nthreads,
                              name='inu_n4_final',
                              iterfield=['input_image'])

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(),
                            iterfield=['in_file'],
                            name='apply_mask')

    wf.connect([
        (inputnode, trunc, [('in_files', 'op1')]),
        (inputnode, copy_xform, [(('in_files', _pop), 'hdr_file')]),
        (inputnode, inu_n4_final, [('in_files', 'input_image')]),
        (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]),
        (inputnode, norm, [('in_mask', fixed_mask_trait)]),
        (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]),
        (trunc, inu_n4, [('output_image', 'input_image')]),
        (inu_n4, res_target, [(('output_image', _pop), 'input_image')]),
        (res_tmpl, init_aff, [('output_image', 'fixed_image')]),
        (res_target, init_aff, [('output_image', 'moving_image')]),
        (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
        (norm, map_brainmask, [('reverse_transforms', 'transforms'),
                               ('reverse_invert_flags',
                                'invert_transform_flags')]),
        (map_brainmask, thr_brainmask, [('output_image', 'input_image')]),
        (thr_brainmask, dil_brainmask, [('output_image', 'op1')]),
        (dil_brainmask, get_brainmask, [('output_image', 'op1')]),
        (inu_n4_final, apply_mask, [('output_image', 'in_file')]),
        (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
        (get_brainmask, copy_xform, [('output_image', 'out_mask')]),
        (apply_mask, copy_xform, [('out_file', 'out_file')]),
        (inu_n4_final, copy_xform, [('output_image', 'bias_corrected'),
                                    ('bias_image', 'bias_image')]),
        (copy_xform, outputnode, [('out_file', 'out_file'),
                                  ('out_mask', 'out_mask'),
                                  ('bias_corrected', 'bias_corrected'),
                                  ('bias_image', 'bias_image')]),
    ])

    if use_laplacian:
        lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                           name='lap_tmpl')
        lap_tmpl.inputs.op1 = tpl_target_path
        lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
                             name='lap_target')
        mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
        mrg_tmpl.inputs.in1 = tpl_target_path
        mrg_target = pe.Node(niu.Merge(2), name='mrg_target')
        wf.connect([
            (inu_n4, lap_target, [(('output_image', _pop), 'op1')]),
            (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]),
            (inu_n4, mrg_target, [('output_image', 'in1')]),
            (lap_target, mrg_target, [('output_image', 'in2')]),
            (mrg_tmpl, norm, [('out', 'fixed_image')]),
            (mrg_target, norm, [('out', 'moving_image')]),
        ])
    else:
        norm.inputs.fixed_image = tpl_target_path
        wf.connect([
            (inu_n4, norm, [(('output_image', _pop), 'moving_image')]),
        ])

    if atropos_refine:
        atropos_model = atropos_model or list(
            ATROPOS_MODELS[bids_suffix].values())
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model,
        )
        sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1),
                         name='sel_wm',
                         run_without_submitting=True)

        wf.disconnect([
            (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
            (copy_xform, outputnode, [('out_mask', 'out_mask')]),
        ])
        wf.connect([
            (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]),
            (thr_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask')
                                         ]),
            (get_brainmask, atropos_wf, [('output_image',
                                          'inputnode.in_mask_dilated')]),
            (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
            (sel_wm, inu_n4_final, [('out', 'weight_image')]),
            (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]),
            (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask'),
                                      ('outputnode.out_segm', 'out_segm'),
                                      ('outputnode.out_tpms', 'out_tpms')]),
        ])
    return wf
Пример #22
0
def init_brain_extraction_wf(
    name="brain_extraction_wf",
    in_template="OASIS30ANTs",
    template_spec=None,
    use_float=True,
    normalization_quality="precise",
    omp_nthreads=None,
    mem_gb=3.0,
    bids_suffix="T1w",
    atropos_refine=True,
    atropos_use_random_seed=True,
    atropos_model=None,
    use_laplacian=True,
    bspline_fitting_distance=200,
):
    """
    Build a workflow for atlas-based brain extraction on anatomical MRI data.

    This is a Nipype implementation of atlas-based brain extraction inspired by
    the official ANTs' ``antsBrainExtraction.sh`` workflow (only for 3D images).

    The workflow follows the following structure:

      1. Step 1 performs several clerical tasks (preliminary INU correction,
         calculating the Laplacian of inputs, affine initialization) and the
         core spatial normalization.
      2. Maps the brain mask into target space using the normalization
         calculated in 1.
      3. Superstep 1b: binarization of the brain mask
      4. Maps the WM (white matter) probability map from the template, if such prior exists.
         Combines the BS (brainstem) probability map before mapping if the WM
         and BS are given separately (as it is the case for ``OASIS30ANTs``.)
      5. Run a second N4 INU correction round, using the prior mapped into
         individual step in step 4 if available.
      6. Superstep 6: apply ATROPOS on the INU-corrected result of step 5, and
         massage its outputs
      7. Superstep 7: use results from 4 to refine the brain mask
      8. If exist, use priors from step 4, calculate the overlap of the posteriors
         estimated in step 4 to select that overlapping the most with the WM+BS
         prior from the template. Combine that posterior with the refined brain
         mask and pass it on to the next step.
      9. Apply a final N4 using the refined brain mask (or the map calculated in
         step 8 if priors were found) as weights map for the algorithm.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from niworkflows.anat.ants import init_brain_extraction_wf
            wf = init_brain_extraction_wf()

    Parameters
    ----------
    in_template : str
        Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or
        path).
        The brain template from which regions will be projected
        Anatomical template created using e.g. LPBA40 data set with
        ``buildtemplateparallel.sh`` in ANTs.
        The workflow will automatically search for a brain probability
        mask created using e.g. LPBA40 data set which have brain masks
        defined, and warped to anatomical template and
        averaged resulting in a probability image.
    use_float : bool
        Whether single precision should be used
    normalization_quality : str
        Use more precise or faster registration parameters
        (default: ``precise``, other possible values: ``testing``)
    omp_nthreads : int
        Maximum number of threads an individual process may use
    mem_gb : float
        Estimated peak memory consumption of the most hungry nodes
        in the workflow
    bids_suffix : str
        Sequence type of the first input image. For a list of acceptable values
        see https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
    atropos_refine : bool
        Enables or disables the whole ATROPOS sub-workflow
    atropos_use_random_seed : bool
        Whether ATROPOS should generate a random seed based on the
        system's clock
    atropos_model : tuple or None
        Allows to specify a particular segmentation model, overwriting
        the defaults based on ``bids_suffix``
    use_laplacian : bool
        Enables or disables alignment of the Laplacian as an additional
        criterion for image registration quality (default: True)
    bspline_fitting_distance : float
        The size of the b-spline mesh grid elements, in mm (default: 200)
    name : str, optional
        Workflow name (default: antsBrainExtraction)

    Inputs
    ------
    in_files : list
        List of input anatomical images to be brain-extracted,
        typically T1-weighted.
        If a list of anatomical images is provided, subsequently
        specified images are used during the segmentation process.
        However, only the first image is used in the registration
        of priors.
        Our suggestion would be to specify the T1w as the first image.
    in_mask : list, optional
        Mask used for registration to limit the metric
        computation to a specific region.

    Outputs
    -------
    out_file : str
        Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
    out_mask : str
        Calculated brain mask
    bias_corrected : str
        The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
        correction, before skull-stripping.
    bias_image : str
        The :abbr:`INU (intensity non-uniformity)` field estimated for each
        input in ``in_files``
    out_segm : str
        Output segmentation by ATROPOS
    out_tpms : str
        Output :abbr:`TPMs (tissue probability maps)` by ATROPOS

    """
    from packaging.version import parse as parseversion, Version
    from templateflow.api import get as get_template

    wf = pe.Workflow(name)

    template_spec = template_spec or {}

    # suffix passed via spec takes precedence
    template_spec["suffix"] = template_spec.get("suffix", bids_suffix)

    tpl_target_path, common_spec = get_template_specs(
        in_template, template_spec=template_spec)

    # Get probabilistic brain mask if available
    tpl_mask_path = get_template(
        in_template, label="brain", suffix="probseg", **
        common_spec) or get_template(
            in_template, desc="brain", suffix="mask", **common_spec)

    if omp_nthreads is None or omp_nthreads < 1:
        omp_nthreads = cpu_count()

    inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]),
                        name="inputnode")

    # Try to find a registration mask, set if available
    tpl_regmask_path = get_template(in_template,
                                    desc="BrainCerebellumExtraction",
                                    suffix="mask",
                                    **common_spec)
    if tpl_regmask_path:
        inputnode.inputs.in_mask = str(tpl_regmask_path)

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "out_file",
            "out_mask",
            "bias_corrected",
            "bias_image",
            "out_segm",
            "out_tpms",
        ]),
        name="outputnode",
    )

    trunc = pe.MapNode(
        ImageMath(operation="TruncateImageIntensity",
                  op2="0.01 0.999 256",
                  copy_header=True),
        name="truncate_images",
        iterfield=["op1"],
    )
    inu_n4 = pe.MapNode(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=False,
            copy_header=True,
            n_iterations=[50] * 4,
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="inu_n4",
        iterfield=["input_image"],
    )

    res_tmpl = pe.Node(
        RegridToZooms(in_file=tpl_target_path, zooms=(4, 4, 4), smooth=True),
        name="res_tmpl",
    )
    res_target = pe.Node(RegridToZooms(zooms=(4, 4, 4), smooth=True),
                         name="res_target")

    lap_tmpl = pe.Node(ImageMath(operation="Laplacian",
                                 op2="1.5 1",
                                 copy_header=True),
                       name="lap_tmpl")
    lap_tmpl.inputs.op1 = tpl_target_path
    lap_target = pe.Node(
        ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
        name="lap_target",
    )
    mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
    mrg_tmpl.inputs.in1 = tpl_target_path
    mrg_target = pe.Node(niu.Merge(2), name="mrg_target")

    # Initialize transforms with antsAI
    init_aff = pe.Node(
        AI(
            metric=("Mattes", 32, "Regular", 0.25),
            transform=("Affine", 0.1),
            search_factor=(15, 0.1),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            verbose=True,
        ),
        name="init_aff",
        n_procs=omp_nthreads,
    )

    # Tolerate missing ANTs at construction time
    try:
        init_aff.inputs.search_grid = (40, (0, 40, 40))
    except ValueError:
        warn("antsAI's option --search-grid was added in ANTS 2.3.0 "
             f"({init_aff.interface.version} found.)")

    # Set up spatial normalization
    settings_file = ("antsBrainExtraction_%s.json" if use_laplacian else
                     "antsBrainExtractionNoLaplacian_%s.json")
    norm = pe.Node(
        Registration(from_file=pkgr_fn("niworkflows.data", settings_file %
                                       normalization_quality)),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = use_float
    fixed_mask_trait = "fixed_image_mask"

    if norm.interface.version and parseversion(
            norm.interface.version) >= Version("2.2.0"):
        fixed_mask_trait += "s"

    map_brainmask = pe.Node(
        ApplyTransforms(interpolation="Gaussian"),
        name="map_brainmask",
        mem_gb=1,
    )
    map_brainmask.inputs.input_image = str(tpl_mask_path)

    thr_brainmask = pe.Node(
        ThresholdImage(
            dimension=3,
            th_low=0.5,
            th_high=1.0,
            inside_value=1,
            outside_value=0,
            copy_header=True,
        ),
        name="thr_brainmask",
    )

    # Refine INU correction
    inu_n4_final = pe.MapNode(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="inu_n4_final",
        iterfield=["input_image"],
    )
    try:
        inu_n4_final.inputs.rescale_intensities = True
    except ValueError:
        warn(
            "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
            f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
            UserWarning,
        )

    # Apply mask
    apply_mask = pe.MapNode(ApplyMask(),
                            iterfield=["in_file"],
                            name="apply_mask")

    # fmt: off
    wf.connect([
        (inputnode, trunc, [("in_files", "op1")]),
        (inputnode, inu_n4_final, [("in_files", "input_image")]),
        (inputnode, init_aff, [("in_mask", "fixed_image_mask")]),
        (inputnode, norm, [("in_mask", fixed_mask_trait)]),
        (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
        (trunc, inu_n4, [("output_image", "input_image")]),
        (inu_n4, res_target, [(("output_image", _pop), "in_file")]),
        (res_tmpl, init_aff, [("out_file", "fixed_image")]),
        (res_target, init_aff, [("out_file", "moving_image")]),
        (init_aff, norm, [("output_transform", "initial_moving_transform")]),
        (norm, map_brainmask, [
            ("reverse_transforms", "transforms"),
            ("reverse_invert_flags", "invert_transform_flags"),
        ]),
        (map_brainmask, thr_brainmask, [("output_image", "input_image")]),
        (map_brainmask, inu_n4_final, [("output_image", "weight_image")]),
        (inu_n4_final, apply_mask, [("output_image", "in_file")]),
        (thr_brainmask, apply_mask, [("output_image", "in_mask")]),
        (thr_brainmask, outputnode, [("output_image", "out_mask")]),
        (inu_n4_final, outputnode, [("output_image", "bias_corrected"),
                                    ("bias_image", "bias_image")]),
        (apply_mask, outputnode, [("out_file", "out_file")]),
    ])
    # fmt: on

    wm_tpm = (get_template(
        in_template, label="WM", suffix="probseg", **common_spec) or None)
    if wm_tpm:
        map_wmmask = pe.Node(
            ApplyTransforms(interpolation="Gaussian"),
            name="map_wmmask",
            mem_gb=1,
        )

        # Add the brain stem if it is found.
        bstem_tpm = (get_template(
            in_template, label="BS", suffix="probseg", **common_spec) or None)
        if bstem_tpm:
            full_wm = pe.Node(niu.Function(function=_imsum), name="full_wm")
            full_wm.inputs.op1 = str(wm_tpm)
            full_wm.inputs.op2 = str(bstem_tpm)
            # fmt: off
            wf.connect([(full_wm, map_wmmask, [("out", "input_image")])])
            # fmt: on
        else:
            map_wmmask.inputs.input_image = str(wm_tpm)
        # fmt: off
        wf.disconnect([
            (map_brainmask, inu_n4_final, [("output_image", "weight_image")]),
        ])
        wf.connect([
            (inputnode, map_wmmask, [(("in_files", _pop), "reference_image")]),
            (norm, map_wmmask, [
                ("reverse_transforms", "transforms"),
                ("reverse_invert_flags", "invert_transform_flags"),
            ]),
            (map_wmmask, inu_n4_final, [("output_image", "weight_image")]),
        ])
        # fmt: on

    if use_laplacian:
        lap_tmpl = pe.Node(
            ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
            name="lap_tmpl",
        )
        lap_tmpl.inputs.op1 = tpl_target_path
        lap_target = pe.Node(
            ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
            name="lap_target",
        )
        mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
        mrg_tmpl.inputs.in1 = tpl_target_path
        mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
        # fmt: off
        wf.connect([
            (inu_n4, lap_target, [(("output_image", _pop), "op1")]),
            (lap_tmpl, mrg_tmpl, [("output_image", "in2")]),
            (inu_n4, mrg_target, [("output_image", "in1")]),
            (lap_target, mrg_target, [("output_image", "in2")]),
            (mrg_tmpl, norm, [("out", "fixed_image")]),
            (mrg_target, norm, [("out", "moving_image")]),
        ])
        # fmt: on

    else:
        norm.inputs.fixed_image = tpl_target_path
        # fmt: off
        wf.connect([
            (inu_n4, norm, [(("output_image", _pop), "moving_image")]),
        ])
        # fmt: on

    if atropos_refine:
        atropos_model = atropos_model or list(
            ATROPOS_MODELS[bids_suffix].values())
        atropos_wf = init_atropos_wf(
            use_random_seed=atropos_use_random_seed,
            omp_nthreads=omp_nthreads,
            mem_gb=mem_gb,
            in_segmentation_model=atropos_model,
            bspline_fitting_distance=bspline_fitting_distance,
            wm_prior=bool(wm_tpm),
        )

        # fmt: off
        wf.disconnect([
            (thr_brainmask, outputnode, [("output_image", "out_mask")]),
            (inu_n4_final, outputnode, [("output_image", "bias_corrected"),
                                        ("bias_image", "bias_image")]),
            (apply_mask, outputnode, [("out_file", "out_file")]),
        ])
        wf.connect([
            (inputnode, atropos_wf, [("in_files", "inputnode.in_files")]),
            (inu_n4_final, atropos_wf, [("output_image",
                                         "inputnode.in_corrected")]),
            (thr_brainmask, atropos_wf, [("output_image", "inputnode.in_mask")
                                         ]),
            (atropos_wf, outputnode, [
                ("outputnode.out_file", "out_file"),
                ("outputnode.bias_corrected", "bias_corrected"),
                ("outputnode.bias_image", "bias_image"),
                ("outputnode.out_mask", "out_mask"),
                ("outputnode.out_segm", "out_segm"),
                ("outputnode.out_tpms", "out_tpms"),
            ]),
        ])
        # fmt: on
        if wm_tpm:
            # fmt: off
            wf.connect([
                (map_wmmask, atropos_wf, [("output_image",
                                           "inputnode.wm_prior")]),
            ])
            # fmt: on
    return wf