Beispiel #1
0
def build_opts(opts):
    """Trigger a new process that builds the workflow graph, based on the input options."""
    import os
    from pathlib import Path
    import logging
    import sys
    import gc
    import warnings
    from multiprocessing import set_start_method, Process, Manager
    from nipype import logging as nlogging
    from niworkflows.utils.misc import check_valid_fs_license

    set_start_method('forkserver')

    logging.addLevelName(
        25, 'IMPORTANT')  # Add a new level between INFO and WARNING
    logging.addLevelName(15,
                         'VERBOSE')  # Add a new level between INFO and DEBUG
    logger = logging.getLogger('cli')

    def _warn_redirect(message,
                       category,
                       filename,
                       lineno,
                       file=None,
                       line=None):
        logger.warning('Captured warning (%s): %s', category, message)

    warnings.showwarning = _warn_redirect

    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    if opts.fs_license_file is not None:
        os.environ["FS_LICENSE"] = os.path.abspath(opts.fs_license_file)

    if not check_valid_fs_license():
        raise RuntimeError(
            'ERROR: a valid license file is required for FreeSurfer to run. '
            'sMRIPrep looked for an existing license file at several paths, in this '
            'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
            'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
            'Get it (for free) by registering at https://'
            'surfer.nmr.mgh.harvard.edu/registration.html')

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    errno = 0

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        smriprep_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if smriprep_wf is None:
        sys.exit(1)

    if opts.write_graph:
        smriprep_wf.write_graph(graph2use="colored",
                                format='svg',
                                simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(smriprep_wf)
    if missing:
        print("Cannot run sMRIPrep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        smriprep_wf.run(**plugin_settings)
    except RuntimeError:
        errno = 1
    else:
        if opts.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(
                api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(
                dseg_tsv,
                str(Path(output_dir) / 'smriprep' / 'desc-aseg_dseg.tsv'))
            _copy_any(
                dseg_tsv,
                str(Path(output_dir) / 'smriprep' / 'desc-aparcaseg_dseg.tsv'))
        logger.log(25, 'sMRIPrep finished without errors')
    finally:
        from niworkflows.reports import generate_reports
        from ..utils.bids import write_derivative_description

        logger.log(25, 'Writing reports for participants: %s',
                   ', '.join(subject_list))
        # Generate reports phase
        errno += generate_reports(subject_list,
                                  output_dir,
                                  run_uuid,
                                  packagename='smriprep')
        write_derivative_description(bids_dir,
                                     str(Path(output_dir) / 'smriprep'))
    sys.exit(int(errno > 0))
Beispiel #2
0
def parse_args(args=None, namespace=None):
    """Parse args and run further checks on the command line."""
    import logging
    from niworkflows.utils.spaces import Reference, SpatialReferences
    from niworkflows.utils.misc import check_valid_fs_license

    parser = _build_parser()
    opts = parser.parse_args(args, namespace)
    config.execution.log_level = int(
        max(25 - 5 * opts.verbose_count, logging.DEBUG))
    config.from_dict(vars(opts))

    # Initialize --output-spaces if not defined
    if config.execution.output_spaces is None:
        config.execution.output_spaces = SpatialReferences(
            [Reference("MNI152NLin2009cAsym", {"res": "native"})])

    # Retrieve logging level
    build_log = config.loggers.cli

    if not check_valid_fs_license(lic=config.execution.fs_license_file):
        raise RuntimeError("""\
ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"""
                           )

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml

        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        _plugin = plugin_settings.get("plugin")
        if _plugin:
            config.nipype.plugin = _plugin
            config.nipype.plugin_args = plugin_settings.get("plugin_args", {})
            config.nipype.nprocs = config.nipype.plugin_args.get(
                "nprocs", config.nipype.nprocs)

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    if 1 < config.nipype.nprocs < config.nipype.omp_nthreads:
        build_log.warning(
            f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed "
            f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})")

    # Inform the user about the risk of using brain-extracted images
    if config.workflow.skull_strip_t1w == "auto":
        build_log.warning("""\
Option ``--skull-strip-t1w`` was set to 'auto'. A heuristic will be \
applied to determine whether the input T1w image(s) have already been skull-stripped.
If that were the case, brain extraction and INU correction will be skipped for those T1w \
inputs. Please, BEWARE OF THE RISKS TO THE CONSISTENCY of results when using varying \
processing workflows across participants. To determine whether a participant has been run \
through the shortcut pipeline (meaning, brain extraction was skipped), please check the \
citation boilerplate. When reporting results with varying pipelines, please make sure you \
mention this particular variant of fMRIPrep listing the participants for which it was \
applied.""")

    bids_dir = config.execution.bids_dir
    output_dir = config.execution.output_dir
    work_dir = config.execution.work_dir
    version = config.environment.version

    if config.execution.fs_subjects_dir is None:
        config.execution.fs_subjects_dir = output_dir / "freesurfer"

    # Wipe out existing work_dir
    if opts.clean_workdir and work_dir.exists():
        from niworkflows.utils.misc import clean_directory

        build_log.info(
            f"Clearing previous fMRIPrep working directory: {work_dir}")
        if not clean_directory(work_dir):
            build_log.warning(
                f"Could not clear all contents of working directory: {work_dir}"
            )

    # Ensure input and output folders are not the same
    if output_dir == bids_dir:
        parser.error(
            "The selected output folder is the same as the input BIDS folder. "
            "Please modify the output path (suggestion: %s)." % bids_dir /
            "derivatives" / ("fmriprep-%s" % version.split("+")[0]))

    if bids_dir in work_dir.parents:
        parser.error(
            "The selected working directory is a subdirectory of the input BIDS folder. "
            "Please modify the output path.")

    # Validate inputs
    if not opts.skip_bids_validation:
        from ..utils.bids import validate_input_dir

        build_log.info(
            "Making sure the input data is BIDS compliant (warnings can be ignored in most "
            "cases).")
        validate_input_dir(config.environment.exec_env, opts.bids_dir,
                           opts.participant_label)

    # Setup directories
    config.execution.log_dir = output_dir / "fmriprep" / "logs"
    # Check and create output and working directories
    config.execution.log_dir.mkdir(exist_ok=True, parents=True)
    output_dir.mkdir(exist_ok=True, parents=True)
    work_dir.mkdir(exist_ok=True, parents=True)

    # Force initialization of the BIDSLayout
    config.execution.init()
    all_subjects = config.execution.layout.get_subjects()
    if config.execution.participant_label is None:
        config.execution.participant_label = all_subjects

    participant_label = set(config.execution.participant_label)
    missing_subjects = participant_label - set(all_subjects)
    if missing_subjects:
        parser.error(
            "One or more participant labels were not found in the BIDS directory: "
            "%s." % ", ".join(missing_subjects))

    config.execution.participant_label = sorted(participant_label)
    config.workflow.skull_strip_template = config.workflow.skull_strip_template[
        0]
Beispiel #3
0
def build_workflow(config_file, retval):
    """Create the Nipype Workflow that supports the whole execution graph."""
    from niworkflows.utils.bids import collect_participants, check_pipeline_version
    from niworkflows.utils.misc import check_valid_fs_license
    from niworkflows.reports import generate_reports
    from .. import config
    from ..utils.misc import check_deps
    from ..workflows.base import init_fmriprep_wf

    config.load(config_file)
    build_log = config.loggers.workflow

    output_dir = config.execution.output_dir
    version = config.environment.version

    retval["return_code"] = 1
    retval["workflow"] = None

    # warn if older results exist: check for dataset_description.json in output folder
    msg = check_pipeline_version(
        version, output_dir / "fmriprep-rodents" / "dataset_description.json")
    if msg is not None:
        build_log.warning(msg)

    # Please note this is the input folder's dataset_description.json
    dset_desc_path = config.execution.bids_dir / "dataset_description.json"
    if dset_desc_path.exists():
        from hashlib import sha256

        desc_content = dset_desc_path.read_bytes()
        config.execution.bids_description_hash = sha256(
            desc_content).hexdigest()

    # First check that bids_dir looks like a BIDS folder
    subject_list = collect_participants(
        config.execution.layout,
        participant_label=config.execution.participant_label)

    # Called with reports only
    if config.execution.reports_only:
        from pkg_resources import resource_filename as pkgrf

        build_log.log(25, "Running --reports-only on participants %s",
                      ", ".join(subject_list))
        retval["return_code"] = generate_reports(
            subject_list,
            config.execution.output_dir,
            config.execution.run_uuid,
            config=pkgrf("fmriprep_rodents", "data/reports-spec.yml"),
            packagename="fmriprep-rodents",
        )
        return retval

    # Build main workflow
    init_msg = f"""
    Running fMRIPREP version {config.environment.version}:
      * BIDS dataset path: {config.execution.bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {config.execution.run_uuid}.
      * Output spaces: {config.execution.output_spaces}."""

    if config.execution.anat_derivatives:
        init_msg += f"""
      * Anatomical derivatives: {config.execution.anat_derivatives}."""

    if config.execution.fs_subjects_dir:
        init_msg += f"""
      * Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."""
    build_log.log(25, init_msg)

    retval["workflow"] = init_fmriprep_wf()

    # Check for FS license after building the workflow
    if not check_valid_fs_license():
        build_log.critical("""\
ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"""
                           )
        retval["return_code"] = 126  # 126 == Command invoked cannot execute.
        return retval

    # Check workflow for missing commands
    missing = check_deps(retval["workflow"])
    if missing:
        build_log.critical(
            "Cannot run fMRIPrep. Missing dependencies:%s",
            "\n\t* ".join(
                [""] +
                [f"{cmd} (Interface: {iface})" for iface, cmd in missing]),
        )
        retval["return_code"] = 127  # 127 == command not found.
        return retval

    config.to_filename(config_file)
    build_log.info(
        "fMRIPrep workflow graph with %d nodes built successfully.",
        len(retval["workflow"]._get_all_nodes()),
    )
    retval["return_code"] = 0
    return retval
Beispiel #4
0
def run_stage_run(opts):
    workdir: Path = opts.workdir

    if opts.graphs is not None:
        graphs: Mapping[str, Any] = opts.graphs
        logger.info("Using graphs from previous step")

    else:
        if opts.graphs_file is not None:
            from ..utils.pickle import load_pickle

            graphs_file = str(resolve(opts.graphs_file, opts.fs_root))

            logger.info(f'Using graphs defined in file "{graphs_file}"')

            obj = load_pickle(graphs_file)
            if not isinstance(obj, Mapping):
                raise ValueError(f'Invalid graphs file "{graphs_file}"')

            graphs = obj

        elif opts.uuid is not None:
            from ..utils.cache import uncache_obj

            obj = uncache_obj(workdir, type_str="graphs", uuid=opts.uuid)
            if not isinstance(obj, Mapping):
                raise ValueError(f'Could not find graphs for "{opts.uuid}"')

            graphs = obj

        else:
            raise RuntimeError(
                'Please specify the uuid of the execution graphs to run using "--uuid"'
            )

    if opts.nipype_resource_monitor is True:
        import nipype

        nipype.config.enable_resource_monitor()

    plugin_args: Dict[str, Union[Path, bool, float]] = dict(
        workdir=workdir,
        watchdog=opts.watchdog,
        stop_on_first_crash=opts.debug,
        resource_monitor=opts.nipype_resource_monitor,
        raise_insufficient=False,
        keep=opts.keep,
    )

    if opts.nipype_n_procs is not None:
        plugin_args["n_procs"] = opts.nipype_n_procs

    if opts.nipype_memory_gb is not None:
        plugin_args["memory_gb"] = opts.nipype_memory_gb
    else:
        from ..memory import memory_limit

        memory_gb = memory_limit()
        if memory_gb is not None:
            plugin_args["memory_gb"] = memory_gb

    runnername = f"{opts.nipype_run_plugin}Plugin"

    import nipype.pipeline.plugins as nip

    import halfpipe.plugins as ppp

    if hasattr(ppp, runnername):
        logger.info(
            f'Using a patched version of nipype_run_plugin "{runnername}"')
        runnercls = getattr(ppp, runnername)

    elif hasattr(nip, runnername):
        logger.warning(f'Using unsupported nipype_run_plugin "{runnername}"')
        runnercls = getattr(nip, runnername)

    else:
        raise ValueError(f'Unknown nipype_run_plugin "{runnername}"')

    logger.debug(f"Using plugin arguments\n{pformat(plugin_args)}")

    from ..workflows.execgraph import filter_subjects

    chunks = list(graphs.keys())
    subjects = filter_subjects(chunks, opts)

    n_chunks = opts.n_chunks
    if n_chunks is None:
        if opts.subject_chunks or opts.use_cluster:
            n_chunks = len(subjects)
        else:
            n_chunks = ceil(len(subjects) / float(opts.max_chunk_size))

    index_arrays = np.array_split(np.arange(len(subjects)), n_chunks)

    if opts.only_chunk_index is not None:
        zero_based_chunk_index = opts.only_chunk_index - 1
        if zero_based_chunk_index >= n_chunks or zero_based_chunk_index < 0:
            logger.info(
                f"Not running chunk {opts.only_chunk_index} as is not defined")
            return

        logger.info(
            f"Will run subject level chunk {opts.only_chunk_index} of {n_chunks}"
        )

        index_arrays = [index_arrays[zero_based_chunk_index]]

    elif opts.only_model_chunk:
        index_arrays = list()

    chunks_to_run: List[nx.DiGraph] = list()
    for index_array in index_arrays:
        graph_list = [graphs[subjects[i]] for i in index_array]
        chunks_to_run.append(nx.compose_all(
            graph_list))  # take len(index_array) subjects and compose

    if opts.only_chunk_index is not None:
        logger.info("Will not run model chunk")

    elif "model" in graphs:
        logger.info("Will run model chunk")

        chunks_to_run.append(graphs["model"])

    if len(chunks_to_run) == 0:
        raise ValueError("No graphs to run")

    from nipype.interfaces import freesurfer as fs

    if any(
            isinstance(node.interface, fs.FSCommand) for chunk in chunks_to_run
            for node in chunk.nodes):
        from niworkflows.utils.misc import check_valid_fs_license

        if not check_valid_fs_license():
            raise RuntimeError(
                "fMRIPrep needs to use FreeSurfer commands, but a valid license file for FreeSurfer could not be found. \n"
                "HALFpipe looked for an existing license file at several paths, in this order: \n"
                '1) a "license.txt" file in your HALFpipe working directory \n'
                '2) command line argument "--fs-license-file" \n'
                "Get it (for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"
            )

    from nipype.pipeline import engine as pe

    for i, chunk in enumerate(chunks_to_run):
        if len(chunks_to_run) > 1:
            logger.info(f"Running chunk {i+1} of {len(chunks_to_run)}")

        try:
            assert isinstance(chunk, nx.DiGraph)

            runner = runnercls(plugin_args=plugin_args)
            firstnode = next(iter(chunk.nodes()))
            if firstnode is not None:
                assert isinstance(firstnode, pe.Node)
                runner.run(chunk, updatehash=False, config=firstnode.config)
        except Exception as e:
            if opts.debug:
                raise e
            else:
                logger.warning(f"Ignoring exception in chunk {i+1}",
                               exc_info=True)

        if len(chunks_to_run) > 1:
            logger.info(f"Completed chunk {i+1} of {len(chunks_to_run)}")