def build_workflow(config_file, retval): """Create the Nipype Workflow that supports the whole execution graph.""" from niworkflows.utils.bids import collect_participants, check_pipeline_version from niworkflows.utils.misc import check_valid_fs_license from niworkflows.reports import generate_reports from .. import config from ..utils.misc import check_deps from ..workflows.base import init_fmriprep_wf config.load(config_file) build_log = config.loggers.workflow output_dir = config.execution.output_dir version = config.environment.version retval["return_code"] = 1 retval["workflow"] = None # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( version, output_dir / "fmriprep-rodents" / "dataset_description.json") if msg is not None: build_log.warning(msg) # Please note this is the input folder's dataset_description.json dset_desc_path = config.execution.bids_dir / "dataset_description.json" if dset_desc_path.exists(): from hashlib import sha256 desc_content = dset_desc_path.read_bytes() config.execution.bids_description_hash = sha256( desc_content).hexdigest() # First check that bids_dir looks like a BIDS folder subject_list = collect_participants( config.execution.layout, participant_label=config.execution.participant_label) # Called with reports only if config.execution.reports_only: from pkg_resources import resource_filename as pkgrf build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list)) retval["return_code"] = generate_reports( subject_list, config.execution.output_dir, config.execution.run_uuid, config=pkgrf("fmriprep_rodents", "data/reports-spec.yml"), packagename="fmriprep-rodents", ) return retval # Build main workflow init_msg = f""" Running fMRIPREP version {config.environment.version}: * BIDS dataset path: {config.execution.bids_dir}. * Participant list: {subject_list}. * Run identifier: {config.execution.run_uuid}. * Output spaces: {config.execution.output_spaces}.""" if config.execution.anat_derivatives: init_msg += f""" * Anatomical derivatives: {config.execution.anat_derivatives}.""" if config.execution.fs_subjects_dir: init_msg += f""" * Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}.""" build_log.log(25, init_msg) retval["workflow"] = init_fmriprep_wf() # Check for FS license after building the workflow if not check_valid_fs_license(): build_log.critical("""\ ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \ license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \ 2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \ (for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html""" ) retval["return_code"] = 126 # 126 == Command invoked cannot execute. return retval # Check workflow for missing commands missing = check_deps(retval["workflow"]) if missing: build_log.critical( "Cannot run fMRIPrep. Missing dependencies:%s", "\n\t* ".join( [""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]), ) retval["return_code"] = 127 # 127 == command not found. return retval config.to_filename(config_file) build_log.info( "fMRIPrep workflow graph with %d nodes built successfully.", len(retval["workflow"]._get_all_nodes()), ) retval["return_code"] = 0 return retval
def build_workflow(config_file, retval): """Create the Nipype Workflow that supports the whole execution graph.""" from niworkflows.utils.bids import collect_participants, check_pipeline_version from niworkflows.reports import generate_reports from .. import config from ..utils.misc import check_deps from ..workflows.base import init_fmriprep_wf config.load(config_file) build_log = config.loggers.workflow output_dir = config.execution.output_dir version = config.environment.version retval['return_code'] = 1 retval['workflow'] = None # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( version, output_dir / 'fmriprep' / 'dataset_description.json') if msg is not None: build_log.warning(msg) # Please note this is the input folder's dataset_description.json dset_desc_path = config.execution.bids_dir / 'dataset_description.json' if dset_desc_path.exists(): from hashlib import sha256 desc_content = dset_desc_path.read_bytes() config.execution.bids_description_hash = sha256( desc_content).hexdigest() # First check that bids_dir looks like a BIDS folder subject_list = collect_participants( config.execution.layout, participant_label=config.execution.participant_label) # Called with reports only if config.execution.reports_only: from pkg_resources import resource_filename as pkgrf build_log.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list)) retval['return_code'] = generate_reports(subject_list, config.execution.output_dir, config.execution.work_dir, config.execution.run_uuid, config=pkgrf( 'fmriprep', 'data/reports-spec.yml'), packagename='fmriprep') return retval # Build main workflow INIT_MSG = """ Running fMRIPREP version {version}: * BIDS dataset path: {bids_dir}. * Participant list: {subject_list}. * Run identifier: {uuid}. * Output spaces: {spaces}. """.format build_log.log( 25, INIT_MSG(version=config.environment.version, bids_dir=config.execution.bids_dir, subject_list=subject_list, uuid=config.execution.run_uuid, spaces=config.execution.output_spaces)) retval['workflow'] = init_fmriprep_wf() # Check workflow for missing commands missing = check_deps(retval['workflow']) if missing: build_log.critical( "Cannot run fMRIPrep. Missing dependencies:%s", '\n\t* %s'.join([ "{} (Interface: {})".format(cmd, iface) for iface, cmd in missing ])) retval['return_code'] = 127 # 127 == command not found. return retval config.to_filename(config_file) build_log.info("fMRIPrep workflow graph with %d nodes built successfully.", len(retval['workflow']._get_all_nodes())) retval['return_code'] = 0 return retval
def build_workflow(config_file, retval): """Create the Nipype Workflow that supports the whole execution graph.""" from niworkflows.utils.bids import collect_participants, check_pipeline_version from niworkflows.reports import generate_reports from .. import config from ..utils.misc import check_deps from ..workflows.base import init_fmriprep_wf config.load(config_file) build_log = config.loggers.workflow output_dir = config.execution.output_dir version = config.environment.version retval["return_code"] = 1 retval["workflow"] = None # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( version, output_dir / "fmriprep" / "dataset_description.json") if msg is not None: build_log.warning(msg) # Please note this is the input folder's dataset_description.json dset_desc_path = config.execution.bids_dir / "dataset_description.json" if dset_desc_path.exists(): from hashlib import sha256 desc_content = dset_desc_path.read_bytes() config.execution.bids_description_hash = sha256( desc_content).hexdigest() # First check that bids_dir looks like a BIDS folder subject_list = collect_participants( config.execution.layout, participant_label=config.execution.participant_label) # Called with reports only if config.execution.reports_only: from pkg_resources import resource_filename as pkgrf build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list)) retval["return_code"] = generate_reports( subject_list, config.execution.output_dir, config.execution.run_uuid, config=pkgrf("fmriprep", "data/reports-spec.yml"), packagename="fmriprep", ) return retval # Build main workflow init_msg = f""" Running fMRIPREP version {config.environment.version}: * BIDS dataset path: {config.execution.bids_dir}. * Participant list: {subject_list}. * Run identifier: {config.execution.run_uuid}. * Output spaces: {config.execution.output_spaces}.""" if config.execution.anat_derivatives: init_msg += f""" * Anatomical derivatives: {config.execution.anat_derivatives}.""" if config.execution.fs_subjects_dir: init_msg += f""" * Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}.""" build_log.log(25, init_msg) retval["workflow"] = init_fmriprep_wf() # Check workflow for missing commands missing = check_deps(retval["workflow"]) if missing: build_log.critical( "Cannot run fMRIPrep. Missing dependencies:%s", "\n\t* ".join( [""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]), ) retval["return_code"] = 127 # 127 == command not found. return retval config.to_filename(config_file) build_log.info( "fMRIPrep workflow graph with %d nodes built successfully.", len(retval["workflow"]._get_all_nodes()), ) retval["return_code"] = 0 return retval
def build_workflow(opts, retval): """ Create the Nipype Workflow that supports the whole execution graph, given the inputs. All the checks and the construction of the workflow are done inside this function that has pickleable inputs and output dictionary (``retval``) to allow isolation using a ``multiprocessing.Process`` that allows fmriprep to enforce a hard-limited memory-scope. """ from bids import BIDSLayout from nipype import logging as nlogging, config as ncfg from niworkflows.utils.bids import collect_participants, check_pipeline_version from niworkflows.reports import generate_reports from ..__about__ import __version__ from ..workflows.base import init_fmriprep_wf build_log = nlogging.getLogger('nipype.workflow') INIT_MSG = """ Running fMRIPREP version {version}: * BIDS dataset path: {bids_dir}. * Participant list: {subject_list}. * Run identifier: {uuid}. {spaces} """.format bids_dir = opts.bids_dir.resolve() output_dir = opts.output_dir.resolve() work_dir = opts.work_dir.resolve() if opts.clean_workdir: from niworkflows.utils.misc import clean_directory build_log.log("Clearing previous fMRIPrep working directory: %s" % work_dir) if not clean_directory(work_dir): build_log.warning( "Could not clear all contents of working directory: %s" % work_dir) retval['return_code'] = 1 retval['workflow'] = None retval['bids_dir'] = str(bids_dir) retval['output_dir'] = str(output_dir) retval['work_dir'] = str(work_dir) if output_dir == bids_dir: build_log.error( 'The selected output folder is the same as the input BIDS folder. ' 'Please modify the output path (suggestion: %s).', bids_dir / 'derivatives' / ('fmriprep-%s' % __version__.split('+')[0])) retval['return_code'] = 1 return retval # warn if older results exist msg = check_pipeline_version( __version__, output_dir / 'fmriprep' / 'dataset_description.json') if msg is not None: build_log.warning(msg) if bids_dir in work_dir.parents: build_log.error( 'The selected working directory is a subdirectory of the input BIDS folder. ' 'Please modify the output path.') retval['return_code'] = 1 return retval # Set up some instrumental utilities run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4()) retval['run_uuid'] = run_uuid # First check that bids_dir looks like a BIDS folder layout = BIDSLayout(str(bids_dir), validate=False, ignore=("code", "stimuli", "sourcedata", "models", "derivatives", re.compile(r'^\.'))) subject_list = collect_participants( layout, participant_label=opts.participant_label) retval['subject_list'] = subject_list # Load base plugin_settings from file if --use-plugin if opts.use_plugin is not None: from yaml import load as loadyml with open(opts.use_plugin) as f: plugin_settings = loadyml(f) plugin_settings.setdefault('plugin_args', {}) else: # Defaults plugin_settings = { 'plugin': 'MultiProc', 'plugin_args': { 'raise_insufficient': False, 'maxtasksperchild': 1, } } # Resource management options # Note that we're making strong assumptions about valid plugin args # This may need to be revisited if people try to use batch plugins nthreads = plugin_settings['plugin_args'].get('n_procs') # Permit overriding plugin config with specific CLI options if nthreads is None or opts.nthreads is not None: nthreads = opts.nthreads if nthreads is None or nthreads < 1: nthreads = cpu_count() plugin_settings['plugin_args']['n_procs'] = nthreads if opts.mem_mb: plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024 omp_nthreads = opts.omp_nthreads if omp_nthreads == 0: omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8) if 1 < nthreads < omp_nthreads: build_log.warning( 'Per-process threads (--omp-nthreads=%d) exceed total ' 'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads) retval['plugin_settings'] = plugin_settings # Set up directories log_dir = output_dir / 'fmriprep' / 'logs' # Check and create output and working directories output_dir.mkdir(exist_ok=True, parents=True) log_dir.mkdir(exist_ok=True, parents=True) work_dir.mkdir(exist_ok=True, parents=True) # Nipype config (logs and execution) ncfg.update_config({ 'logging': { 'log_directory': str(log_dir), 'log_to_file': True }, 'execution': { 'crashdump_dir': str(log_dir), 'crashfile_format': 'txt', 'get_linked_libs': False, 'stop_on_first_crash': opts.stop_on_first_crash, }, 'monitoring': { 'enabled': opts.resource_monitor, 'sample_frequency': '0.5', 'summary_append': True, } }) if opts.resource_monitor: ncfg.enable_resource_monitor() # Called with reports only if opts.reports_only: from pkg_resources import resource_filename as pkgrf build_log.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list)) if opts.run_uuid is not None: run_uuid = opts.run_uuid retval['run_uuid'] = run_uuid retval['return_code'] = generate_reports(subject_list, output_dir, work_dir, run_uuid, config=pkgrf( 'fmriprep', 'data/reports-spec.yml'), packagename='fmriprep') return retval # Build main workflow build_log.log( 25, INIT_MSG(version=__version__, bids_dir=bids_dir, subject_list=subject_list, uuid=run_uuid, spaces=opts.output_spaces)) retval['workflow'] = init_fmriprep_wf( anat_only=opts.anat_only, aroma_melodic_dim=opts.aroma_melodic_dimensionality, bold2t1w_dof=opts.bold2t1w_dof, cifti_output=opts.cifti_output, debug=opts.sloppy, dummy_scans=opts.dummy_scans, echo_idx=opts.echo_idx, err_on_aroma_warn=opts.error_on_aroma_warnings, fmap_bspline=opts.fmap_bspline, fmap_demean=opts.fmap_no_demean, force_syn=opts.force_syn, freesurfer=opts.run_reconall, fs_subjects_dir=opts.fs_subjects_dir, hires=opts.hires, ignore=opts.ignore, layout=layout, longitudinal=opts.longitudinal, low_mem=opts.low_mem, medial_surface_nan=opts.medial_surface_nan, omp_nthreads=omp_nthreads, output_dir=str(output_dir), run_uuid=run_uuid, regressors_all_comps=opts.return_all_components, regressors_fd_th=opts.fd_spike_threshold, regressors_dvars_th=opts.dvars_spike_threshold, skull_strip_fixed_seed=opts.skull_strip_fixed_seed, skull_strip_template=opts.skull_strip_template[0], spaces=parse_spaces(opts), subject_list=subject_list, t2s_coreg=opts.t2s_coreg, task_id=opts.task_id, use_aroma=opts.use_aroma, use_bbr=opts.use_bbr, use_syn=opts.use_syn_sdc, work_dir=str(work_dir), ) retval['return_code'] = 0 logs_path = Path(output_dir) / 'fmriprep' / 'logs' boilerplate = retval['workflow'].visit_desc() if boilerplate: citation_files = { ext: logs_path / ('CITATION.%s' % ext) for ext in ('bib', 'tex', 'md', 'html') } # To please git-annex users and also to guarantee consistency # among different renderings of the same file, first remove any # existing one for citation_file in citation_files.values(): try: citation_file.unlink() except FileNotFoundError: pass citation_files['md'].write_text(boilerplate) build_log.log( 25, 'Works derived from this fMRIPrep execution should ' 'include the following boilerplate:\n\n%s', boilerplate) return retval