Beispiel #1
0
def main():
    """Entry point"""
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    from ..utils.bids import write_derivative_description, validate_input_dir
    set_start_method('forkserver')
    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args()

    exec_env = os.name

    # special variable set in the container
    if os.getenv('IS_DOCKER_8395080871'):
        exec_env = 'singularity'
        cgroup = Path('/proc/1/cgroup')
        if cgroup.exists() and 'docker' in cgroup.read_text():
            exec_env = 'docker'
            if os.getenv('DOCKER_VERSION_8395080871'):
                exec_env = 'fmriprep-docker'

    sentry_sdk = None
    if not opts.notrack:
        import sentry_sdk
        from ..utils.sentry import sentry_setup
        sentry_setup(opts, exec_env)

    if opts.debug:
        print('WARNING: Option --debug is deprecated and has no effect',
              file=sys.stderr)

    # Validate inputs
    if not opts.skip_bids_validation:
        print(
            "Making sure the input data is BIDS compliant (warnings can be ignored in most "
            "cases).")
        validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)

    # FreeSurfer license
    default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    license_file = opts.fs_license_file or Path(
        os.getenv('FS_LICENSE', default_license))
    if not license_file.exists():
        raise RuntimeError("""\
ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"""
                           )
    os.environ['FS_LICENSE'] = str(license_file.resolve())

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get('return_code', 0)

        bids_dir = Path(retval.get('bids_dir'))
        output_dir = Path(retval.get('output_dir'))
        work_dir = Path(retval.get('work_dir'))
        plugin_settings = retval.get('plugin_settings', None)
        subject_list = retval.get('subject_list', None)
        fmriprep_wf = retval.get('workflow', None)
        run_uuid = retval.get('run_uuid', None)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    if fmriprep_wf and opts.write_graph:
        fmriprep_wf.write_graph(graph2use="colored",
                                format='svg',
                                simple_form=True)

    retcode = retcode or int(fmriprep_wf is None)
    if retcode != 0:
        sys.exit(retcode)

    # Check workflow for missing commands
    missing = check_deps(fmriprep_wf)
    if missing:
        print("Cannot run fMRIPrep. Missing dependencies:", file=sys.stderr)
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)
    # Clean up master process before running workflow, which may create forks
    gc.collect()

    # Sentry tracking
    if not opts.notrack:
        from ..utils.sentry import start_ping
        start_ping(run_uuid, len(subject_list))

    errno = 1  # Default is error exit unless otherwise set
    try:
        fmriprep_wf.run(**plugin_settings)
    except Exception as e:
        if not opts.notrack:
            from ..utils.sentry import process_crashfile
            crashfolders = [
                output_dir / 'fmriprep' / 'sub-{}'.format(s) / 'log' / run_uuid
                for s in subject_list
            ]
            for crashfolder in crashfolders:
                for crashfile in crashfolder.glob('crash*.*'):
                    process_crashfile(crashfile)

            if "Workflow did not execute cleanly" not in str(e):
                sentry_sdk.capture_exception(e)
        logger.critical('fMRIPrep failed: %s', e)
        raise
    else:
        if opts.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(
                api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(dseg_tsv,
                      str(output_dir / 'fmriprep' / 'desc-aseg_dseg.tsv'))
            _copy_any(dseg_tsv,
                      str(output_dir / 'fmriprep' / 'desc-aparcaseg_dseg.tsv'))
        errno = 0
        logger.log(25, 'fMRIPrep finished without errors')
        if not opts.notrack:
            sentry_sdk.capture_message('fMRIPrep finished without errors',
                                       level='info')
    finally:
        from niworkflows.reports import generate_reports
        from subprocess import check_call, CalledProcessError, TimeoutExpired
        from pkg_resources import resource_filename as pkgrf
        from shutil import copyfile

        citation_files = {
            ext: output_dir / 'fmriprep' / 'logs' / ('CITATION.%s' % ext)
            for ext in ('bib', 'tex', 'md', 'html')
        }

        if citation_files['md'].exists():
            # Generate HTML file resolving citations
            cmd = [
                'pandoc', '-s', '--bibliography',
                pkgrf('fmriprep',
                      'data/boilerplate.bib'), '--filter', 'pandoc-citeproc',
                '--metadata', 'pagetitle="fMRIPrep citation boilerplate"',
                str(citation_files['md']), '-o',
                str(citation_files['html'])
            ]

            logger.info(
                'Generating an HTML version of the citation boilerplate...')
            try:
                check_call(cmd, timeout=10)
            except (FileNotFoundError, CalledProcessError, TimeoutExpired):
                logger.warning('Could not generate CITATION.html file:\n%s',
                               ' '.join(cmd))

            # Generate LaTex file resolving citations
            cmd = [
                'pandoc', '-s', '--bibliography',
                pkgrf('fmriprep', 'data/boilerplate.bib'), '--natbib',
                str(citation_files['md']), '-o',
                str(citation_files['tex'])
            ]
            logger.info(
                'Generating a LaTeX version of the citation boilerplate...')
            try:
                check_call(cmd, timeout=10)
            except (FileNotFoundError, CalledProcessError, TimeoutExpired):
                logger.warning('Could not generate CITATION.tex file:\n%s',
                               ' '.join(cmd))
            else:
                copyfile(pkgrf('fmriprep', 'data/boilerplate.bib'),
                         citation_files['bib'])
        else:
            logger.warning(
                'fMRIPrep could not find the markdown version of '
                'the citation boilerplate (%s). HTML and LaTeX versions'
                ' of it will not be available', citation_files['md'])

        # Generate reports phase
        failed_reports = generate_reports(subject_list,
                                          output_dir,
                                          work_dir,
                                          run_uuid,
                                          packagename='fmriprep')
        write_derivative_description(bids_dir, output_dir / 'fmriprep')

        if failed_reports and not opts.notrack:
            sentry_sdk.capture_message(
                'Report generation failed for %d subjects' % failed_reports,
                level='error')
        sys.exit(int((errno + failed_reports) > 0))
Beispiel #2
0
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow that supports the whole execution
    graph, given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows fmriprep to enforce
    a hard-limited memory-scope.

    """
    from bids import BIDSLayout

    from nipype import logging as nlogging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from niworkflows.reports import generate_reports
    from ..__about__ import __version__
    from ..workflows.base import init_fmriprep_wf

    build_log = nlogging.getLogger('nipype.workflow')

    INIT_MSG = """
    Running fMRIPREP version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.
    """.format

    bids_dir = opts.bids_dir.resolve()
    output_dir = opts.output_dir.resolve()
    work_dir = opts.work_dir.resolve()

    retval['return_code'] = 1
    retval['workflow'] = None
    retval['bids_dir'] = str(bids_dir)
    retval['output_dir'] = str(output_dir)
    retval['work_dir'] = str(work_dir)

    if output_dir == bids_dir:
        build_log.error(
            'The selected output folder is the same as the input BIDS folder. '
            'Please modify the output path (suggestion: %s).', bids_dir /
            'derivatives' / ('fmriprep-%s' % __version__.split('+')[0]))
        retval['return_code'] = 1
        return retval

    output_spaces = parse_spaces(opts)

    # Set up some instrumental utilities
    run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
    retval['run_uuid'] = run_uuid

    # First check that bids_dir looks like a BIDS folder
    layout = BIDSLayout(str(bids_dir),
                        validate=False,
                        ignore=("code", "stimuli", "sourcedata", "models",
                                "derivatives", re.compile(r'^\.')))
    subject_list = collect_participants(
        layout, participant_label=opts.participant_label)
    retval['subject_list'] = subject_list

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault('plugin_args', {})
    else:
        # Defaults
        plugin_settings = {
            'plugin': 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
            }
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nthreads = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    if nthreads is None or opts.nthreads is not None:
        nthreads = opts.nthreads
        if nthreads is None or nthreads < 1:
            nthreads = cpu_count()
        plugin_settings['plugin_args']['n_procs'] = nthreads

    if opts.mem_mb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)

    if 1 < nthreads < omp_nthreads:
        build_log.warning(
            'Per-process threads (--omp-nthreads=%d) exceed total '
            'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
    retval['plugin_settings'] = plugin_settings

    # Set up directories
    log_dir = output_dir / 'fmriprep' / 'logs'
    # Check and create output and working directories
    output_dir.mkdir(exist_ok=True, parents=True)
    log_dir.mkdir(exist_ok=True, parents=True)
    work_dir.mkdir(exist_ok=True, parents=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        'logging': {
            'log_directory': str(log_dir),
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': str(log_dir),
            'crashfile_format': 'txt',
            'get_linked_libs': False,
            'stop_on_first_crash': opts.stop_on_first_crash,
        },
        'monitoring': {
            'enabled': opts.resource_monitor,
            'sample_frequency': '0.5',
            'summary_append': True,
        }
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    # Called with reports only
    if opts.reports_only:
        build_log.log(25, 'Running --reports-only on participants %s',
                      ', '.join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
            retval['run_uuid'] = run_uuid
        retval['return_code'] = generate_reports(subject_list,
                                                 output_dir,
                                                 work_dir,
                                                 run_uuid,
                                                 packagename='fmriprep')
        return retval

    # Build main workflow
    build_log.log(
        25,
        INIT_MSG(version=__version__,
                 bids_dir=bids_dir,
                 subject_list=subject_list,
                 uuid=run_uuid))

    retval['workflow'] = init_fmriprep_wf(
        anat_only=opts.anat_only,
        aroma_melodic_dim=opts.aroma_melodic_dimensionality,
        bold2t1w_dof=opts.bold2t1w_dof,
        cifti_output=opts.cifti_output,
        debug=opts.sloppy,
        dummy_scans=opts.dummy_scans,
        echo_idx=opts.echo_idx,
        err_on_aroma_warn=opts.error_on_aroma_warnings,
        fmap_bspline=opts.fmap_bspline,
        fmap_demean=opts.fmap_no_demean,
        force_syn=opts.force_syn,
        freesurfer=opts.run_reconall,
        hires=opts.hires,
        ignore=opts.ignore,
        layout=layout,
        longitudinal=opts.longitudinal,
        low_mem=opts.low_mem,
        medial_surface_nan=opts.medial_surface_nan,
        omp_nthreads=omp_nthreads,
        output_dir=str(output_dir),
        output_spaces=output_spaces,
        run_uuid=run_uuid,
        regressors_all_comps=opts.return_all_components,
        regressors_fd_th=opts.fd_spike_threshold,
        regressors_dvars_th=opts.dvars_spike_threshold,
        skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
        skull_strip_template=opts.skull_strip_template,
        subject_list=subject_list,
        t2s_coreg=opts.t2s_coreg,
        task_id=opts.task_id,
        use_aroma=opts.use_aroma,
        use_bbr=opts.use_bbr,
        use_syn=opts.use_syn_sdc,
        work_dir=str(work_dir),
    )
    retval['return_code'] = 0

    logs_path = Path(output_dir) / 'fmriprep' / 'logs'
    boilerplate = retval['workflow'].visit_desc()

    if boilerplate:
        citation_files = {
            ext: logs_path / ('CITATION.%s' % ext)
            for ext in ('bib', 'tex', 'md', 'html')
        }
        # To please git-annex users and also to guarantee consistency
        # among different renderings of the same file, first remove any
        # existing one
        for citation_file in citation_files.values():
            try:
                citation_file.unlink()
            except FileNotFoundError:
                pass

        citation_files['md'].write_text(boilerplate)
        build_log.log(
            25, 'Works derived from this fMRIPrep execution should '
            'include the following boilerplate:\n\n%s', boilerplate)
    return retval
Beispiel #3
0
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow that supports the whole execution graph, given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows smriprep to enforce
    a hard-limited memory-scope.

    """
    from shutil import copyfile
    from os import cpu_count
    import uuid
    from time import strftime
    from subprocess import check_call, CalledProcessError, TimeoutExpired
    from pkg_resources import resource_filename as pkgrf

    import json
    from bids import BIDSLayout
    from nipype import logging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from ..__about__ import __version__
    from ..workflows.base import init_smriprep_wf

    logger = logging.getLogger('nipype.workflow')

    INIT_MSG = """
    Running sMRIPrep version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.

    {spaces}
    """.format

    # Set up some instrumental utilities
    run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())

    # First check that bids_dir looks like a BIDS folder
    bids_dir = opts.bids_dir.resolve()
    layout = BIDSLayout(str(bids_dir), validate=False)
    subject_list = collect_participants(
        layout, participant_label=opts.participant_label)

    bids_filters = json.loads(
        opts.bids_filter_file.read_text()) if opts.bids_filter_file else None

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault('plugin_args', {})
    else:
        # Defaults
        plugin_settings = {
            'plugin': 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
            }
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nprocs = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    if nprocs is None or opts.nprocs is not None:
        nprocs = opts.nprocs
        if nprocs is None or nprocs < 1:
            nprocs = cpu_count()
        plugin_settings['plugin_args']['n_procs'] = nprocs

    if opts.mem_gb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nprocs - 1 if nprocs > 1 else cpu_count(), 8)

    if 1 < nprocs < omp_nthreads:
        logger.warning(
            'Per-process threads (--omp-nthreads=%d) exceed total '
            'available CPUs (--nprocs/--ncpus=%d)', omp_nthreads, nprocs)

    # Set up directories
    output_dir = opts.output_dir.resolve()
    log_dir = output_dir / 'smriprep' / 'logs'
    work_dir = opts.work_dir.resolve()

    # Check and create output and working directories
    log_dir.mkdir(parents=True, exist_ok=True)
    work_dir.mkdir(parents=True, exist_ok=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        'logging': {
            'log_directory': str(log_dir),
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': str(log_dir),
            'crashfile_format': 'txt',
            'get_linked_libs': False,
            'stop_on_first_crash': opts.stop_on_first_crash,
        },
        'monitoring': {
            'enabled': opts.resource_monitor,
            'sample_frequency': '0.5',
            'summary_append': True,
        }
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    retval['return_code'] = 0
    retval['plugin_settings'] = plugin_settings
    retval['bids_dir'] = str(bids_dir)
    retval['output_dir'] = str(output_dir)
    retval['work_dir'] = str(work_dir)
    retval['subject_list'] = subject_list
    retval['run_uuid'] = run_uuid
    retval['workflow'] = None

    # Called with reports only
    if opts.reports_only:
        from niworkflows.reports import generate_reports

        logger.log(25, 'Running --reports-only on participants %s',
                   ', '.join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
        retval['return_code'] = generate_reports(subject_list,
                                                 str(output_dir),
                                                 run_uuid,
                                                 packagename="smriprep")
        return retval

    logger.log(
        25,
        INIT_MSG(version=__version__,
                 bids_dir=bids_dir,
                 subject_list=subject_list,
                 uuid=run_uuid,
                 spaces=opts.output_spaces))

    # Build main workflow
    retval['workflow'] = init_smriprep_wf(
        debug=opts.sloppy,
        fast_track=opts.fast_track,
        freesurfer=opts.run_reconall,
        fs_subjects_dir=opts.fs_subjects_dir,
        hires=opts.hires,
        layout=layout,
        longitudinal=opts.longitudinal,
        low_mem=opts.low_mem,
        omp_nthreads=omp_nthreads,
        output_dir=str(output_dir),
        run_uuid=run_uuid,
        skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
        skull_strip_mode=opts.skull_strip_mode,
        skull_strip_template=opts.skull_strip_template[0],
        spaces=opts.output_spaces,
        subject_list=subject_list,
        work_dir=str(work_dir),
        bids_filters=bids_filters,
    )
    retval['return_code'] = 0

    boilerplate = retval['workflow'].visit_desc()
    (log_dir / 'CITATION.md').write_text(boilerplate)
    logger.log(
        25, 'Works derived from this sMRIPrep execution should '
        'include the following boilerplate:\n\n%s', boilerplate)

    # Generate HTML file resolving citations
    cmd = [
        'pandoc', '-s', '--bibliography',
        pkgrf('smriprep',
              'data/boilerplate.bib'), '--filter', 'pandoc-citeproc',
        '--metadata', 'pagetitle="sMRIPrep citation boilerplate"',
        str(log_dir / 'CITATION.md'), '-o',
        str(log_dir / 'CITATION.html')
    ]
    try:
        check_call(cmd, timeout=10)
    except (FileNotFoundError, CalledProcessError, TimeoutExpired):
        logger.warning('Could not generate CITATION.html file:\n%s',
                       ' '.join(cmd))

    # Generate LaTex file resolving citations
    cmd = [
        'pandoc', '-s', '--bibliography',
        pkgrf('smriprep', 'data/boilerplate.bib'), '--natbib',
        str(log_dir / 'CITATION.md'), '-o',
        str(log_dir / 'CITATION.tex')
    ]
    try:
        check_call(cmd, timeout=10)
    except (FileNotFoundError, CalledProcessError, TimeoutExpired):
        logger.warning('Could not generate CITATION.tex file:\n%s',
                       ' '.join(cmd))
    else:
        copyfile(pkgrf('smriprep', 'data/boilerplate.bib'),
                 str(log_dir / 'CITATION.bib'))
    return retval
Beispiel #4
0
def main():
    """Entry point."""
    import os
    import sys
    import gc
    from multiprocessing import Process, Manager
    from .parser import parse_args
    from ..utils.bids import write_derivative_description

    parse_args()

    popylar = None
    if not config.execution.notrack:
        import popylar
        from ..__about__ import __ga_id__
        config.loggers.cli.info(
            "Your usage of dmriprep is being recorded using popylar (https://popylar.github.io/). ",  # noqa
            "For details, see https://nipreps.github.io/dmriprep/usage.html. ",
            "To opt out, call dmriprep with a `--notrack` flag")
        popylar.track_event(__ga_id__, 'run', 'cli_run')

    # CRITICAL Save the config to a file. This is necessary because the execution graph
    # is built as a separate process to keep the memory footprint low. The most
    # straightforward way to communicate with the child process is via the filesystem.
    config_file = config.execution.work_dir / '.dmriprep.toml'
    config.to_filename(config_file)

    # CRITICAL Call build_workflow(config_file, retval) in a subprocess.
    # Because Python on Linux does not ever free virtual memory (VM), running the
    # workflow construction jailed within a process preempts excessive VM buildup.
    with Manager() as mgr:
        from .workflow import build_workflow
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(str(config_file), retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get('return_code', 0)
        dmriprep_wf = retval.get('workflow', None)

    # CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
    # function executed constrained in a process may change the config (and thus the global
    # state of dMRIPrep).
    config.load(config_file)

    if config.execution.reports_only:
        sys.exit(int(retcode > 0))

    if dmriprep_wf and config.execution.write_graph:
        dmriprep_wf.write_graph(graph2use="colored",
                                format='svg',
                                simple_form=True)

    retcode = retcode or (dmriprep_wf is None) * os.EX_SOFTWARE
    if retcode != 0:
        sys.exit(retcode)

    # Generate boilerplate
    with Manager() as mgr:
        from .workflow import build_boilerplate
        p = Process(target=build_boilerplate,
                    args=(str(config_file), dmriprep_wf))
        p.start()
        p.join()

    if config.execution.boilerplate_only:
        sys.exit(int(retcode > 0))

    # Clean up master process before running workflow, which may create forks
    gc.collect()

    if popylar is not None:
        popylar.track_event(__ga_id__, 'run', 'started')

    config.loggers.workflow.log(
        15, '\n'.join(['dMRIPrep config:'] +
                      ['\t\t%s' % s for s in config.dumps().splitlines()]))
    config.loggers.workflow.log(25, 'dMRIPrep started!')
    errno = 1  # Default is error exit unless otherwise set
    try:
        dmriprep_wf.run(**config.nipype.get_plugin())
    except Exception as e:
        if not config.execution.notrack:
            popylar.track_event(__ga_id__, 'run', 'error')
        config.loggers.workflow.critical('dMRIPrep failed: %s', e)
        raise
    else:
        config.loggers.workflow.log(25, 'dMRIPrep finished successfully!')

        # Bother users with the boilerplate only iff the workflow went okay.
        if (config.execution.output_dir / 'dmriprep' / 'logs' /
                'CITATION.md').exists():
            config.loggers.workflow.log(
                25, 'Works derived from this dMRIPrep execution should '
                'include the following boilerplate:\n\n%s',
                (config.execution.output_dir / 'dmriprep' / 'logs' /
                 'CITATION.md').read_text())

        if config.workflow.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(
                api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / 'dmriprep' /
                    'desc-aseg_dseg.tsv'))
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / 'dmriprep' /
                    'desc-aparcaseg_dseg.tsv'))
        errno = 0
    finally:
        from niworkflows.reports import generate_reports
        from pkg_resources import resource_filename as pkgrf

        # Generate reports phase
        failed_reports = generate_reports(config.execution.participant_label,
                                          config.execution.output_dir,
                                          config.execution.work_dir,
                                          config.execution.run_uuid,
                                          config=pkgrf(
                                              'dmriprep',
                                              'config/reports-spec.yml'),
                                          packagename='dmriprep')
        write_derivative_description(config.execution.bids_dir,
                                     config.execution.output_dir / 'dmriprep')

        if failed_reports and not config.execution.notrack:
            popylar.track_event(__ga_id__, 'run', 'reporting_error')
        sys.exit(int((errno + failed_reports) > 0))
Beispiel #5
0
def main():
    """Entry point."""
    import os
    import sys
    import gc
    from multiprocessing import Process, Manager
    from .parser import parse_args
    from ..utils.bids import write_derivative_description

    parse_args()

    sentry_sdk = None
    if not config.execution.notrack:
        import sentry_sdk
        from ..utils.sentry import sentry_setup
        sentry_setup()

    # CRITICAL Save the config to a file. This is necessary because the execution graph
    # is built as a separate process to keep the memory footprint low. The most
    # straightforward way to communicate with the child process is via the filesystem.
    config_file = config.execution.work_dir / '.fmriprep.toml'
    config.to_filename(config_file)

    # CRITICAL Call build_workflow(config_file, retval) in a subprocess.
    # Because Python on Linux does not ever free virtual memory (VM), running the
    # workflow construction jailed within a process preempts excessive VM buildup.
    with Manager() as mgr:
        from .workflow import build_workflow
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(str(config_file), retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get('return_code', 0)
        fmriprep_wf = retval.get('workflow', None)

    # CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
    # function executed constrained in a process may change the config (and thus the global
    # state of fMRIPrep).
    config.load(config_file)

    if config.execution.reports_only:
        sys.exit(int(retcode > 0))

    if fmriprep_wf and config.execution.write_graph:
        fmriprep_wf.write_graph(graph2use="colored",
                                format='svg',
                                simple_form=True)

    retcode = retcode or (fmriprep_wf is None) * os.EX_SOFTWARE
    if retcode != 0:
        sys.exit(retcode)

    # Generate boilerplate
    with Manager() as mgr:
        from .workflow import build_boilerplate
        p = Process(target=build_boilerplate,
                    args=(str(config_file), fmriprep_wf))
        p.start()
        p.join()

    if config.execution.boilerplate_only:
        sys.exit(int(retcode > 0))

    # Clean up master process before running workflow, which may create forks
    gc.collect()

    # Sentry tracking
    if sentry_sdk is not None:
        with sentry_sdk.configure_scope() as scope:
            scope.set_tag('run_uuid', config.execution.run_uuid)
            scope.set_tag('npart', len(config.execution.participant_label))
        sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
        sentry_sdk.capture_message('fMRIPrep started', level='info')

    config.loggers.workflow.log(
        15, '\n'.join(['fMRIPrep config:'] +
                      ['\t\t%s' % s for s in config.dumps().splitlines()]))
    config.loggers.workflow.log(25, 'fMRIPrep started!')
    errno = 1  # Default is error exit unless otherwise set
    try:
        fmriprep_wf.run(**config.nipype.get_plugin())
    except Exception as e:
        if not config.execution.notrack:
            from ..utils.sentry import process_crashfile
            crashfolders = [
                config.execution.output_dir / 'fmriprep' / 'sub-{}'.format(s) /
                'log' / config.execution.run_uuid
                for s in config.execution.participant_label
            ]
            for crashfolder in crashfolders:
                for crashfile in crashfolder.glob('crash*.*'):
                    process_crashfile(crashfile)

            if "Workflow did not execute cleanly" not in str(e):
                sentry_sdk.capture_exception(e)
        config.loggers.workflow.critical('fMRIPrep failed: %s', e)
        raise
    else:
        config.loggers.workflow.log(25, 'fMRIPrep finished successfully!')
        if not config.execution.notrack:
            success_message = 'fMRIPrep finished without errors'
            sentry_sdk.add_breadcrumb(message=success_message, level='info')
            sentry_sdk.capture_message(success_message, level='info')

        # Bother users with the boilerplate only iff the workflow went okay.
        if (config.execution.output_dir / 'fmriprep' / 'logs' /
                'CITATION.md').exists():
            config.loggers.workflow.log(
                25, 'Works derived from this fMRIPrep execution should '
                'include the following boilerplate:\n\n%s',
                (config.execution.output_dir / 'fmriprep' / 'logs' /
                 'CITATION.md').read_text())

        if config.workflow.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(
                api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / 'fmriprep' /
                    'desc-aseg_dseg.tsv'))
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / 'fmriprep' /
                    'desc-aparcaseg_dseg.tsv'))
        errno = 0
    finally:
        from niworkflows.reports import generate_reports
        from pkg_resources import resource_filename as pkgrf

        # Generate reports phase
        failed_reports = generate_reports(config.execution.participant_label,
                                          config.execution.output_dir,
                                          config.execution.work_dir,
                                          config.execution.run_uuid,
                                          config=pkgrf(
                                              'fmriprep',
                                              'data/reports-spec.yml'),
                                          packagename='fmriprep')
        write_derivative_description(config.execution.bids_dir,
                                     config.execution.output_dir / 'fmriprep')

        if failed_reports and not config.execution.notrack:
            sentry_sdk.capture_message(
                'Report generation failed for %d subjects' % failed_reports,
                level='error')
        sys.exit(int((errno + failed_reports) > 0))
Beispiel #6
0
def build_opts(opts):
    """Trigger a new process that builds the workflow graph, based on the input options."""
    import os
    from pathlib import Path
    import logging
    import sys
    import gc
    import warnings
    from multiprocessing import set_start_method, Process, Manager
    from nipype import logging as nlogging
    from niworkflows.utils.misc import check_valid_fs_license

    set_start_method('forkserver')

    logging.addLevelName(
        25, 'IMPORTANT')  # Add a new level between INFO and WARNING
    logging.addLevelName(15,
                         'VERBOSE')  # Add a new level between INFO and DEBUG
    logger = logging.getLogger('cli')

    def _warn_redirect(message,
                       category,
                       filename,
                       lineno,
                       file=None,
                       line=None):
        logger.warning('Captured warning (%s): %s', category, message)

    warnings.showwarning = _warn_redirect

    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    if opts.fs_license_file is not None:
        os.environ["FS_LICENSE"] = os.path.abspath(opts.fs_license_file)

    if not check_valid_fs_license():
        raise RuntimeError(
            'ERROR: a valid license file is required for FreeSurfer to run. '
            'sMRIPrep looked for an existing license file at several paths, in this '
            'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
            'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
            'Get it (for free) by registering at https://'
            'surfer.nmr.mgh.harvard.edu/registration.html')

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    errno = 0

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        smriprep_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if smriprep_wf is None:
        sys.exit(1)

    if opts.write_graph:
        smriprep_wf.write_graph(graph2use="colored",
                                format='svg',
                                simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(smriprep_wf)
    if missing:
        print("Cannot run sMRIPrep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        smriprep_wf.run(**plugin_settings)
    except RuntimeError:
        errno = 1
    else:
        if opts.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(
                api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(
                dseg_tsv,
                str(Path(output_dir) / 'smriprep' / 'desc-aseg_dseg.tsv'))
            _copy_any(
                dseg_tsv,
                str(Path(output_dir) / 'smriprep' / 'desc-aparcaseg_dseg.tsv'))
        logger.log(25, 'sMRIPrep finished without errors')
    finally:
        from niworkflows.reports import generate_reports
        from ..utils.bids import write_derivative_description

        logger.log(25, 'Writing reports for participants: %s',
                   ', '.join(subject_list))
        # Generate reports phase
        errno += generate_reports(subject_list,
                                  output_dir,
                                  run_uuid,
                                  packagename='smriprep')
        write_derivative_description(bids_dir,
                                     str(Path(output_dir) / 'smriprep'))
    sys.exit(int(errno > 0))
Beispiel #7
0
def build_workflow(config_file, retval):
    """Create the Nipype Workflow that supports the whole execution graph."""
    from niworkflows.utils.bids import collect_participants, check_pipeline_version
    from niworkflows.reports import generate_reports
    from .. import config
    from ..utils.misc import check_deps
    from ..workflows.base import init_fmriprep_wf

    config.load(config_file)
    build_log = config.loggers.workflow

    output_dir = config.execution.output_dir
    version = config.environment.version

    retval["return_code"] = 1
    retval["workflow"] = None

    # warn if older results exist: check for dataset_description.json in output folder
    msg = check_pipeline_version(
        version, output_dir / "fmriprep" / "dataset_description.json")
    if msg is not None:
        build_log.warning(msg)

    # Please note this is the input folder's dataset_description.json
    dset_desc_path = config.execution.bids_dir / "dataset_description.json"
    if dset_desc_path.exists():
        from hashlib import sha256

        desc_content = dset_desc_path.read_bytes()
        config.execution.bids_description_hash = sha256(
            desc_content).hexdigest()

    # First check that bids_dir looks like a BIDS folder
    subject_list = collect_participants(
        config.execution.layout,
        participant_label=config.execution.participant_label)

    # Called with reports only
    if config.execution.reports_only:
        from pkg_resources import resource_filename as pkgrf

        build_log.log(25, "Running --reports-only on participants %s",
                      ", ".join(subject_list))
        retval["return_code"] = generate_reports(
            subject_list,
            config.execution.output_dir,
            config.execution.run_uuid,
            config=pkgrf("fmriprep", "data/reports-spec.yml"),
            packagename="fmriprep",
        )
        return retval

    # Build main workflow
    init_msg = f"""
    Running fMRIPREP version {config.environment.version}:
      * BIDS dataset path: {config.execution.bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {config.execution.run_uuid}.
      * Output spaces: {config.execution.output_spaces}."""

    if config.execution.anat_derivatives:
        init_msg += f"""
      * Anatomical derivatives: {config.execution.anat_derivatives}."""

    if config.execution.fs_subjects_dir:
        init_msg += f"""
      * Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."""
    build_log.log(25, init_msg)

    retval["workflow"] = init_fmriprep_wf()

    # Check workflow for missing commands
    missing = check_deps(retval["workflow"])
    if missing:
        build_log.critical(
            "Cannot run fMRIPrep. Missing dependencies:%s",
            "\n\t* ".join(
                [""] +
                [f"{cmd} (Interface: {iface})" for iface, cmd in missing]),
        )
        retval["return_code"] = 127  # 127 == command not found.
        return retval

    config.to_filename(config_file)
    build_log.info(
        "fMRIPrep workflow graph with %d nodes built successfully.",
        len(retval["workflow"]._get_all_nodes()),
    )
    retval["return_code"] = 0
    return retval
Beispiel #8
0
def build_workflow(config_file, retval):
    """Create the Nipype Workflow that supports the whole execution graph."""
    from niworkflows.utils.bids import collect_participants, check_pipeline_version
    from niworkflows.utils.misc import check_valid_fs_license
    from niworkflows.reports import generate_reports
    from .. import config
    from ..utils.misc import check_deps
    from ..workflows.base import init_fmriprep_wf

    config.load(config_file)
    build_log = config.loggers.workflow

    output_dir = config.execution.output_dir
    version = config.environment.version

    retval["return_code"] = 1
    retval["workflow"] = None

    # warn if older results exist: check for dataset_description.json in output folder
    msg = check_pipeline_version(
        version, output_dir / "fmriprep-rodents" / "dataset_description.json")
    if msg is not None:
        build_log.warning(msg)

    # Please note this is the input folder's dataset_description.json
    dset_desc_path = config.execution.bids_dir / "dataset_description.json"
    if dset_desc_path.exists():
        from hashlib import sha256

        desc_content = dset_desc_path.read_bytes()
        config.execution.bids_description_hash = sha256(
            desc_content).hexdigest()

    # First check that bids_dir looks like a BIDS folder
    subject_list = collect_participants(
        config.execution.layout,
        participant_label=config.execution.participant_label)

    # Called with reports only
    if config.execution.reports_only:
        from pkg_resources import resource_filename as pkgrf

        build_log.log(25, "Running --reports-only on participants %s",
                      ", ".join(subject_list))
        retval["return_code"] = generate_reports(
            subject_list,
            config.execution.output_dir,
            config.execution.run_uuid,
            config=pkgrf("fmriprep_rodents", "data/reports-spec.yml"),
            packagename="fmriprep-rodents",
        )
        return retval

    # Build main workflow
    init_msg = f"""
    Running fMRIPREP version {config.environment.version}:
      * BIDS dataset path: {config.execution.bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {config.execution.run_uuid}.
      * Output spaces: {config.execution.output_spaces}."""

    if config.execution.anat_derivatives:
        init_msg += f"""
      * Anatomical derivatives: {config.execution.anat_derivatives}."""

    if config.execution.fs_subjects_dir:
        init_msg += f"""
      * Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."""
    build_log.log(25, init_msg)

    retval["workflow"] = init_fmriprep_wf()

    # Check for FS license after building the workflow
    if not check_valid_fs_license():
        build_log.critical("""\
ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"""
                           )
        retval["return_code"] = 126  # 126 == Command invoked cannot execute.
        return retval

    # Check workflow for missing commands
    missing = check_deps(retval["workflow"])
    if missing:
        build_log.critical(
            "Cannot run fMRIPrep. Missing dependencies:%s",
            "\n\t* ".join(
                [""] +
                [f"{cmd} (Interface: {iface})" for iface, cmd in missing]),
        )
        retval["return_code"] = 127  # 127 == command not found.
        return retval

    config.to_filename(config_file)
    build_log.info(
        "fMRIPrep workflow graph with %d nodes built successfully.",
        len(retval["workflow"]._get_all_nodes()),
    )
    retval["return_code"] = 0
    return retval
Beispiel #9
0
def main():
    """Entry point."""
    from os import EX_SOFTWARE
    from pathlib import Path
    import sys
    import gc
    from multiprocessing import Process, Manager
    from .parser import parse_args
    from ..utils.bids import write_derivative_description

    parse_args()

    sentry_sdk = None
    if not config.execution.notrack:
        import sentry_sdk
        from ..utils.sentry import sentry_setup

        sentry_setup()

    # CRITICAL Save the config to a file. This is necessary because the execution graph
    # is built as a separate process to keep the memory footprint low. The most
    # straightforward way to communicate with the child process is via the filesystem.
    config_file = config.execution.work_dir / f"config-{config.execution.run_uuid}.toml"
    config.to_filename(config_file)

    # CRITICAL Call build_workflow(config_file, retval) in a subprocess.
    # Because Python on Linux does not ever free virtual memory (VM), running the
    # workflow construction jailed within a process preempts excessive VM buildup.
    with Manager() as mgr:
        from .workflow import build_workflow

        retval = mgr.dict()
        p = Process(target=build_workflow, args=(str(config_file), retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get("return_code", 0)
        fmriprep_wf = retval.get("workflow", None)

    # CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
    # function executed constrained in a process may change the config (and thus the global
    # state of fMRIPrep).
    config.load(config_file)

    if config.execution.reports_only:
        sys.exit(int(retcode > 0))

    if fmriprep_wf and config.execution.write_graph:
        fmriprep_wf.write_graph(graph2use="colored",
                                format="svg",
                                simple_form=True)

    retcode = retcode or (fmriprep_wf is None) * EX_SOFTWARE
    if retcode != 0:
        sys.exit(retcode)

    # Generate boilerplate
    with Manager() as mgr:
        from .workflow import build_boilerplate

        p = Process(target=build_boilerplate,
                    args=(str(config_file), fmriprep_wf))
        p.start()
        p.join()

    if config.execution.boilerplate_only:
        sys.exit(int(retcode > 0))

    # Clean up master process before running workflow, which may create forks
    gc.collect()

    # Sentry tracking
    if sentry_sdk is not None:
        with sentry_sdk.configure_scope() as scope:
            scope.set_tag("run_uuid", config.execution.run_uuid)
            scope.set_tag("npart", len(config.execution.participant_label))
        sentry_sdk.add_breadcrumb(message="fMRIPrep started", level="info")
        sentry_sdk.capture_message("fMRIPrep started", level="info")

    config.loggers.workflow.log(
        15,
        "\n".join(["fMRIPrep config:"] +
                  ["\t\t%s" % s for s in config.dumps().splitlines()]),
    )
    config.loggers.workflow.log(25, "fMRIPrep started!")
    errno = 1  # Default is error exit unless otherwise set
    try:
        fmriprep_wf.run(**config.nipype.get_plugin())
    except Exception as e:
        if not config.execution.notrack:
            from ..utils.sentry import process_crashfile

            crashfolders = [
                config.execution.output_dir / "fmriprep" / "sub-{}".format(s) /
                "log" / config.execution.run_uuid
                for s in config.execution.participant_label
            ]
            for crashfolder in crashfolders:
                for crashfile in crashfolder.glob("crash*.*"):
                    process_crashfile(crashfile)

            if "Workflow did not execute cleanly" not in str(e):
                sentry_sdk.capture_exception(e)
        config.loggers.workflow.critical("fMRIPrep failed: %s", e)
        raise
    else:
        config.loggers.workflow.log(25, "fMRIPrep finished successfully!")
        if not config.execution.notrack:
            success_message = "fMRIPrep finished without errors"
            sentry_sdk.add_breadcrumb(message=success_message, level="info")
            sentry_sdk.capture_message(success_message, level="info")

        # Bother users with the boilerplate only iff the workflow went okay.
        boiler_file = config.execution.output_dir / "fmriprep" / "logs" / "CITATION.md"
        if boiler_file.exists():
            if config.environment.exec_env in (
                    "singularity",
                    "docker",
                    "fmriprep-docker",
            ):
                boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
                    config.execution.output_dir)
            config.loggers.workflow.log(
                25,
                "Works derived from this fMRIPrep execution should include the "
                f"boilerplate text found in {boiler_file}.",
            )

        if config.workflow.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any

            dseg_tsv = str(
                api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / "fmriprep" /
                    "desc-aseg_dseg.tsv"),
            )
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / "fmriprep" /
                    "desc-aparcaseg_dseg.tsv"),
            )
        errno = 0
    finally:
        from niworkflows.reports import generate_reports
        from pkg_resources import resource_filename as pkgrf

        # Generate reports phase
        failed_reports = generate_reports(
            config.execution.participant_label,
            config.execution.output_dir,
            config.execution.run_uuid,
            config=pkgrf("fmriprep", "data/reports-spec.yml"),
            packagename="fmriprep",
        )
        write_derivative_description(config.execution.bids_dir,
                                     config.execution.output_dir / "fmriprep")

        if failed_reports and not config.execution.notrack:
            sentry_sdk.capture_message(
                "Report generation failed for %d subjects" % failed_reports,
                level="error",
            )
        sys.exit(int((errno + failed_reports) > 0))
Beispiel #10
0
def build_workflow(config_file, retval):
    """Create the Nipype Workflow that supports the whole execution graph."""
    from niworkflows.utils.bids import collect_participants, check_pipeline_version
    from niworkflows.reports import generate_reports
    from .. import config
    from ..utils.misc import check_deps
    from ..workflows.base import init_fmriprep_wf

    config.load(config_file)
    build_log = config.loggers.workflow

    output_dir = config.execution.output_dir
    version = config.environment.version

    retval['return_code'] = 1
    retval['workflow'] = None

    # warn if older results exist: check for dataset_description.json in output folder
    msg = check_pipeline_version(
        version, output_dir / 'fmriprep' / 'dataset_description.json')
    if msg is not None:
        build_log.warning(msg)

    # Please note this is the input folder's dataset_description.json
    dset_desc_path = config.execution.bids_dir / 'dataset_description.json'
    if dset_desc_path.exists():
        from hashlib import sha256
        desc_content = dset_desc_path.read_bytes()
        config.execution.bids_description_hash = sha256(
            desc_content).hexdigest()

    # First check that bids_dir looks like a BIDS folder
    subject_list = collect_participants(
        config.execution.layout,
        participant_label=config.execution.participant_label)

    # Called with reports only
    if config.execution.reports_only:
        from pkg_resources import resource_filename as pkgrf

        build_log.log(25, 'Running --reports-only on participants %s',
                      ', '.join(subject_list))
        retval['return_code'] = generate_reports(subject_list,
                                                 config.execution.output_dir,
                                                 config.execution.work_dir,
                                                 config.execution.run_uuid,
                                                 config=pkgrf(
                                                     'fmriprep',
                                                     'data/reports-spec.yml'),
                                                 packagename='fmriprep')
        return retval

    # Build main workflow
    INIT_MSG = """
    Running fMRIPREP version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.
      * Output spaces: {spaces}.
    """.format
    build_log.log(
        25,
        INIT_MSG(version=config.environment.version,
                 bids_dir=config.execution.bids_dir,
                 subject_list=subject_list,
                 uuid=config.execution.run_uuid,
                 spaces=config.execution.output_spaces))

    retval['workflow'] = init_fmriprep_wf()

    # Check workflow for missing commands
    missing = check_deps(retval['workflow'])
    if missing:
        build_log.critical(
            "Cannot run fMRIPrep. Missing dependencies:%s", '\n\t* %s'.join([
                "{} (Interface: {})".format(cmd, iface)
                for iface, cmd in missing
            ]))
        retval['return_code'] = 127  # 127 == command not found.
        return retval

    config.to_filename(config_file)
    build_log.info("fMRIPrep workflow graph with %d nodes built successfully.",
                   len(retval['workflow']._get_all_nodes()))
    retval['return_code'] = 0
    return retval
Beispiel #11
0
def main():
    """Entry point"""
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    from ..utils.bids import write_derivative_description, validate_input_dir
    from ..__about__ import __ga_id__
    set_start_method('forkserver')
    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args()

    exec_env = os.name

    if not opts.notrack:
        logger.info(
            "Your usage of dmriprep is being recorded using popylar (https://popylar.github.io/). ",  # noqa
            "For details, see https://nipreps.github.io/dmriprep/usage.html. ",
            "To opt out, call dmriprep with a `--notrack` flag")
        import popylar
        popylar.track_event(__ga_id__, 'run', 'cli_run')

    # Validate inputs
    if not opts.skip_bids_validation:
        print(
            "Making sure the input data is BIDS compliant (warnings can be ignored in most "
            "cases).")
        validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)

    # FreeSurfer license
    default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    license_file = opts.fs_license_file or Path(
        os.getenv('FS_LICENSE', default_license))
    if not license_file.exists():
        raise RuntimeError("""\
ERROR: a valid license file is required for FreeSurfer to run. dMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"""
                           )
    os.environ['FS_LICENSE'] = str(license_file.resolve())

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get('return_code', 0)

        bids_dir = Path(retval.get('bids_dir'))
        output_dir = Path(retval.get('output_dir'))
        work_dir = Path(retval.get('work_dir'))
        plugin_settings = retval.get('plugin_settings', None)
        subject_list = retval.get('subject_list', None)
        dmriprep_wf = retval.get('workflow', None)
        run_uuid = retval.get('run_uuid', None)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    if dmriprep_wf and opts.write_graph:
        dmriprep_wf.write_graph(graph2use="colored",
                                format='svg',
                                simple_form=True)

    retcode = retcode or int(dmriprep_wf is None)
    if retcode != 0:
        sys.exit(retcode)

    # Check workflow for missing commands
    missing = check_deps(dmriprep_wf)
    if missing:
        print("Cannot run dMRIPrep. Missing dependencies:", file=sys.stderr)
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)
    # Clean up master process before running workflow, which may create forks
    gc.collect()

    errno = 1  # Default is error exit unless otherwise set
    try:
        dmriprep_wf.run(**plugin_settings)
    except Exception as e:
        if not opts.notrack:
            popylar.track_event(__ga_id__, 'run', 'cli_error')
        logger.critical('dMRIPrep failed: %s', e)
        raise e
    else:
        if opts.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(
                api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(dseg_tsv,
                      str(output_dir / 'dmriprep' / 'desc-aseg_dseg.tsv'))
            _copy_any(dseg_tsv,
                      str(output_dir / 'dmriprep' / 'desc-aparcaseg_dseg.tsv'))
        errno = 0
        logger.log(25, 'dMRIPrep finished without errors')
        if not opts.notrack:
            popylar.track_event(__ga_id__, 'run', 'cli_finished')

    finally:
        from niworkflows.reports import generate_reports
        from subprocess import check_call, CalledProcessError, TimeoutExpired
        from pkg_resources import resource_filename as pkgrf
        from shutil import copyfile

        citation_files = {
            ext: output_dir / 'dmriprep' / 'logs' / ('CITATION.%s' % ext)
            for ext in ('bib', 'tex', 'md', 'html')
        }

        if citation_files['md'].exists():
            # Generate HTML file resolving citations
            cmd = [
                'pandoc', '-s', '--bibliography',
                pkgrf('dmriprep',
                      'data/boilerplate.bib'), '--filter', 'pandoc-citeproc',
                '--metadata', 'pagetitle="dMRIPrep citation boilerplate"',
                str(citation_files['md']), '-o',
                str(citation_files['html'])
            ]

            logger.info(
                'Generating an HTML version of the citation boilerplate...')
            try:
                check_call(cmd, timeout=10)
            except (FileNotFoundError, CalledProcessError, TimeoutExpired):
                logger.warning('Could not generate CITATION.html file:\n%s',
                               ' '.join(cmd))

            # Generate LaTex file resolving citations
            cmd = [
                'pandoc', '-s', '--bibliography',
                pkgrf('dmriprep', 'data/boilerplate.bib'), '--natbib',
                str(citation_files['md']), '-o',
                str(citation_files['tex'])
            ]
            logger.info(
                'Generating a LaTeX version of the citation boilerplate...')
            try:
                check_call(cmd, timeout=10)
            except (FileNotFoundError, CalledProcessError, TimeoutExpired):
                logger.warning('Could not generate CITATION.tex file:\n%s',
                               ' '.join(cmd))
            else:
                copyfile(pkgrf('dmriprep', 'data/boilerplate.bib'),
                         citation_files['bib'])
        else:
            logger.warning(
                'dMRIPrep could not find the markdown version of '
                'the citation boilerplate (%s). HTML and LaTeX versions'
                ' of it will not be available', citation_files['md'])

        # Generate reports phase
        failed_reports = generate_reports(subject_list,
                                          output_dir,
                                          work_dir,
                                          run_uuid,
                                          config=pkgrf(
                                              'dmriprep',
                                              'config/reports-spec.yml'),
                                          packagename='dmriprep')
        write_derivative_description(bids_dir, output_dir / 'dmriprep')

        sys.exit(int((errno + failed_reports) > 0))
Beispiel #12
0
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow that supports the whole execution graph, given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows smriprep to enforce
    a hard-limited memory-scope.

    """
    from shutil import copyfile
    from os import cpu_count
    import uuid
    from time import strftime
    from subprocess import check_call, CalledProcessError, TimeoutExpired
    from pkg_resources import resource_filename as pkgrf

    import json
    from bids import BIDSLayout
    from nipype import logging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from ..__about__ import __version__
    from ..workflows.base import init_smriprep_wf

    logger = logging.getLogger("nipype.workflow")

    INIT_MSG = """
    Running sMRIPrep version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.

    {spaces}
    """.format

    # Set up some instrumental utilities
    run_uuid = "%s_%s" % (strftime("%Y%m%d-%H%M%S"), uuid.uuid4())

    # First check that bids_dir looks like a BIDS folder
    bids_dir = opts.bids_dir.resolve()
    layout = BIDSLayout(str(bids_dir), validate=False)
    subject_list = collect_participants(
        layout, participant_label=opts.participant_label)

    bids_filters = (json.loads(opts.bids_filter_file.read_text())
                    if opts.bids_filter_file else None)

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml

        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault("plugin_args", {})
    else:
        # Defaults
        plugin_settings = {
            "plugin": "MultiProc",
            "plugin_args": {
                "raise_insufficient": False,
                "maxtasksperchild": 1,
            },
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nprocs = plugin_settings["plugin_args"].get("n_procs")
    # Permit overriding plugin config with specific CLI options
    if nprocs is None or opts.nprocs is not None:
        nprocs = opts.nprocs
        if nprocs is None or nprocs < 1:
            nprocs = cpu_count()
        plugin_settings["plugin_args"]["n_procs"] = nprocs

    if opts.mem_gb:
        plugin_settings["plugin_args"]["memory_gb"] = opts.mem_gb

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nprocs - 1 if nprocs > 1 else cpu_count(), 8)

    if 1 < nprocs < omp_nthreads:
        logger.warning(
            "Per-process threads (--omp-nthreads=%d) exceed total "
            "available CPUs (--nprocs/--ncpus=%d)",
            omp_nthreads,
            nprocs,
        )

    # Set up directories
    output_dir = opts.output_dir.resolve()
    log_dir = output_dir / "smriprep" / "logs"
    work_dir = opts.work_dir.resolve()

    # Check and create output and working directories
    log_dir.mkdir(parents=True, exist_ok=True)
    work_dir.mkdir(parents=True, exist_ok=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        "logging": {
            "log_directory": str(log_dir),
            "log_to_file": True
        },
        "execution": {
            "crashdump_dir": str(log_dir),
            "crashfile_format": "txt",
            "get_linked_libs": False,
            "stop_on_first_crash": opts.stop_on_first_crash,
        },
        "monitoring": {
            "enabled": opts.resource_monitor,
            "sample_frequency": "0.5",
            "summary_append": True,
        },
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    retval["return_code"] = 0
    retval["plugin_settings"] = plugin_settings
    retval["bids_dir"] = str(bids_dir)
    retval["output_dir"] = str(output_dir)
    retval["work_dir"] = str(work_dir)
    retval["subject_list"] = subject_list
    retval["run_uuid"] = run_uuid
    retval["workflow"] = None

    # Called with reports only
    if opts.reports_only:
        from niworkflows.reports import generate_reports

        logger.log(25, "Running --reports-only on participants %s",
                   ", ".join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
        retval["return_code"] = generate_reports(subject_list,
                                                 str(output_dir),
                                                 run_uuid,
                                                 packagename="smriprep")
        return retval

    output_spaces = opts.output_spaces
    if not output_spaces.is_cached():
        output_spaces.checkpoint()

    logger.log(
        25,
        INIT_MSG(
            version=__version__,
            bids_dir=bids_dir,
            subject_list=subject_list,
            uuid=run_uuid,
            spaces=output_spaces,
        ),
    )

    # Build main workflow
    retval["workflow"] = init_smriprep_wf(
        debug=opts.sloppy,
        fast_track=opts.fast_track,
        freesurfer=opts.run_reconall,
        fs_subjects_dir=opts.fs_subjects_dir,
        hires=opts.hires,
        layout=layout,
        longitudinal=opts.longitudinal,
        low_mem=opts.low_mem,
        omp_nthreads=omp_nthreads,
        output_dir=str(output_dir),
        run_uuid=run_uuid,
        skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
        skull_strip_mode=opts.skull_strip_mode,
        skull_strip_template=opts.skull_strip_template[0],
        spaces=output_spaces,
        subject_list=subject_list,
        work_dir=str(work_dir),
        bids_filters=bids_filters,
    )
    retval["return_code"] = 0

    boilerplate = retval["workflow"].visit_desc()
    (log_dir / "CITATION.md").write_text(boilerplate)
    logger.log(
        25,
        "Works derived from this sMRIPrep execution should "
        "include the following boilerplate:\n\n%s",
        boilerplate,
    )

    # Generate HTML file resolving citations
    cmd = [
        "pandoc",
        "-s",
        "--bibliography",
        pkgrf("smriprep", "data/boilerplate.bib"),
        "--citeproc",
        "--metadata",
        'pagetitle="sMRIPrep citation boilerplate"',
        str(log_dir / "CITATION.md"),
        "-o",
        str(log_dir / "CITATION.html"),
    ]
    try:
        check_call(cmd, timeout=10)
    except (FileNotFoundError, CalledProcessError, TimeoutExpired):
        logger.warning("Could not generate CITATION.html file:\n%s",
                       " ".join(cmd))

    # Generate LaTex file resolving citations
    cmd = [
        "pandoc",
        "-s",
        "--bibliography",
        pkgrf("smriprep", "data/boilerplate.bib"),
        "--natbib",
        str(log_dir / "CITATION.md"),
        "-o",
        str(log_dir / "CITATION.tex"),
    ]
    try:
        check_call(cmd, timeout=10)
    except (FileNotFoundError, CalledProcessError, TimeoutExpired):
        logger.warning("Could not generate CITATION.tex file:\n%s",
                       " ".join(cmd))
    else:
        copyfile(pkgrf("smriprep", "data/boilerplate.bib"),
                 str(log_dir / "CITATION.bib"))
    return retval
Beispiel #13
0
def build_workflow(opts, retval):
    from nipype import logging as nlogging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from niworkflows.reports import generate_reports
    from ..__about__ import __version__
    from time import strftime
    import uuid
    from ..workflows.base import init_base_wf

    build_log = nlogging.getLogger('nipype.workflow')

    INIT_MSG = """
    #     Running atlasTransform version {version}:
    #       * BIDS dataset path: {bids_dir}.
    #       * Participant list: {subject_list}.
    #       * Run identifier: {uuid}.
    #     """.format

    pass

    bids_dir = opts.bids_dir.resolve()
    output_dir = opts.output_dir.resolve()
    work_dir = opts.work_dir.resolve()

    retval['return_code'] = 1
    retval['workflow'] = None
    retval['bids_dir'] = str(bids_dir)
    retval['output_dir'] = str(output_dir)
    retval['work_dir'] = str(work_dir)

    if output_dir == bids_dir:
        build_log.error(
            'The selected output folder is the same as the input BIDS folder. '
            'Please modify the output path (suggestion: %s).', bids_dir /
            'derivatives' / ('atlasTransform-%s' % __version__.split('+')[0]))
        retval['return_code'] = 1
        return retval

    # Set up some instrumental utilities
    run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
    retval['run_uuid'] = run_uuid

    # First check that bids_dir looks like a BIDS folder
    layout = BIDSLayout(str(bids_dir),
                        validate=False,
                        ignore=("code", "stimuli", "sourcedata", "models",
                                "derivatives", re.compile(r'^\.')))
    subject_list = collect_participants(
        layout, participant_label=opts.participant_label)
    retval['subject_list'] = subject_list

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault('plugin_args', {})
    else:
        # Defaults
        plugin_settings = {
            'plugin': 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
            }
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nthreads = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    if nthreads is None or opts.nthreads is not None:
        nthreads = opts.nthreads
        if nthreads is None or nthreads < 1:
            nthreads = cpu_count()
        plugin_settings['plugin_args']['n_procs'] = nthreads

    if opts.mem_mb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)

    if 1 < nthreads < omp_nthreads:
        build_log.warning(
            'Per-process threads (--omp-nthreads=%d) exceed total '
            'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
    retval['plugin_settings'] = plugin_settings

    # Set up directories
    log_dir = output_dir / 'atlasTransform' / 'logs'
    # Check and create output and working directories
    output_dir.mkdir(exist_ok=True, parents=True)
    log_dir.mkdir(exist_ok=True, parents=True)
    work_dir.mkdir(exist_ok=True, parents=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        'logging': {
            'log_directory': str(log_dir),
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': str(log_dir),
            'crashfile_format': 'txt',
            'get_linked_libs': False,
            'stop_on_first_crash': opts.stop_on_first_crash,
        },
        'monitoring': {
            'enabled': opts.resource_monitor,
            'sample_frequency': '0.5',
            'summary_append': True,
        }
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    # Called with reports only
    if opts.reports_only:
        build_log.log(25, 'Running --reports-only on participants %s',
                      ', '.join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
            retval['run_uuid'] = run_uuid
        retval['return_code'] = generate_reports(subject_list,
                                                 output_dir,
                                                 work_dir,
                                                 run_uuid,
                                                 packagename='atlasTransform')
        return retval

    # Build main workflow
    build_log.log(
        25,
        INIT_MSG(version=__version__,
                 bids_dir=bids_dir,
                 subject_list=subject_list,
                 uuid=run_uuid))

    retval['workflow'] = init_base_wf(
        opts=opts,
        layout=layout,
        run_uuid=run_uuid,
        subject_list=subject_list,
        work_dir=str(work_dir),
        output_dir=str(output_dir),
    )
    retval['return_code'] = 0

    logs_path = Path(output_dir) / 'atlasTransform' / 'logs'
    boilerplate = retval['workflow'].visit_desc()

    if boilerplate:
        citation_files = {
            ext: logs_path / ('CITATION.%s' % ext)
            for ext in ('bib', 'tex', 'md', 'html')
        }
        # To please git-annex users and also to guarantee consistency
        # among different renderings of the same file, first remove any
        # existing one
        for citation_file in citation_files.values():
            try:
                citation_file.unlink()
            except FileNotFoundError:
                pass

        citation_files['md'].write_text(boilerplate)
        build_log.log(
            25, 'Works derived from this atlasTransform execution should '
            'include the following boilerplate:\n\n%s', boilerplate)
    return retval