def _list_outputs(self): from bids import BIDSLayout layout = BIDSLayout.load(self.inputs.database_path) organization = self._get_organization() ( contrast_entities, effect_maps, variance_maps, dof_maps, brain_masks, ) = self._merge_maps(organization=organization, layout=layout) ( design_matrices, contrast_matrices, covariance_matrices, ) = self._produce_matrices(contrast_entities=contrast_entities, layout=layout) return { "effect_maps": effect_maps, "variance_maps": variance_maps, "dof_maps": dof_maps, "contrast_metadata": contrast_entities, "contrast_matrices": contrast_matrices, "design_matrices": design_matrices, "covariance_matrices": covariance_matrices, "brain_mask": brain_masks, }
def build_workflow(opts, retval): """ Create the Nipype Workflow for a graph given the inputs. All the checks and the construction of the workflow are done inside this function that has pickleable inputs and output dictionary (``retval``) to allow isolation using a ``multiprocessing.Process`` that allows funcworks to enforce a hard-limited memory-scope. """ from bids import BIDSLayout from nipype import logging as nlogging, config as ncfg from ..workflows.base import init_funcworks_wf from .. import __version__ build_log = nlogging.getLogger("nipype.workflow") output_dir = opts.output_dir.resolve() bids_dir = opts.bids_dir.resolve() work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir.resolve() retval["return_code"] = 1 retval["workflow"] = None retval["bids_dir"] = bids_dir retval["output_dir"] = output_dir retval["work_dir"] = work_dir if not opts.database_path: database_path = str(opts.work_dir.resolve() / "dbcache") layout = BIDSLayout( bids_dir, derivatives=opts.derivatives, validate=True, database_file=database_path, reset_database=True, ) else: database_path = opts.database_path layout = BIDSLayout.load(database_path) if output_dir == bids_dir: build_log.error( "The selected output folder is the same as the input BIDS folder. " "Please modify the output path (suggestion: %s).", (bids_dir / "derivatives" / ("funcworks-%s" % __version__.split("+")[0])), ) retval["return_code"] = 1 return retval if bids_dir in opts.work_dir.parents: build_log.error("The selected working directory is a subdirectory " "of the input BIDS folder. " "Please modify the output path.") retval["return_code"] = 1 return retval # Set up some instrumental utilities runtime_uuid = "%s_%s" % (strftime("%Y%m%d-%H%M%S"), uuid.uuid4()) retval["runtime_uuid"] = runtime_uuid if opts.participant_label: retval["participant_label"] = opts.participant_label else: retval["participant_label"] = layout.get_subjects() # Load base plugin_settings from file if --use-plugin plugin_settings = { "plugin": "MultiProc", "plugin_args": { "raise_insufficient": False, "maxtasksperchild": 1 }, } if opts.use_plugin is not None: with open(opts.use_plugin) as f: plugin_settings = json.load(f) # Resource management options # Note that we're making strong assumptions about valid plugin args # This may need to be revisited if people try to use batch plugins # nthreads = plugin_settings['plugin_args'].get('n_procs') # Permit overriding plugin config with specific CLI options # if nthreads is None or opts.nthreads is not None: # nthreads = opts.nthreads # if nthreads is None or nthreads < 1: # nthreads = cpu_count() # plugin_settings['plugin_args']['n_procs'] = nthreads # if opts.mem_mb: # plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024 # omp_nthreads = opts.omp_nthreads # if omp_nthreads == 0: # omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8) # if 1 < nthreads < omp_nthreads: # build_log.warning( # 'Per-process threads (--omp-nthreads=%d) exceed total ' # 'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads) retval["plugin_settings"] = plugin_settings # Set up directories # Check and create output and working directories output_dir.mkdir(exist_ok=True, parents=True) work_dir.mkdir(exist_ok=True, parents=True) # Nipype config (logs and execution) ncfg.update_config({ "logging": { "log_to_file": True }, "execution": { "crashfile_format": "txt", "get_linked_libs": False, # 'stop_on_first_crash': opts.stop_on_first_crash, }, "monitoring": { "enabled": opts.resource_monitor, "sample_frequency": "0.5", "summary_append": True, }, }) if opts.resource_monitor: ncfg.enable_resource_monitor() # Called with reports only # if opts.reports_only: # build_log.log(25, 'Running --reports-only on participants %s', # ', '.join(opts.participant_label)) # if opts.runtime_uuid is not None: # runtime_uuid = opts.runtime_uuid # retval['runtime_uuid'] = runtime_uuid # retval['return_code'] = generate_reports( # opts.participant_label, output_dir, work_dir, runtime_uuid, # packagename='funcworks') # return retval # Build main workflow build_log.log( 25, (f""" Running FUNCWORKS version {__version__}: * BIDS dataset path: {bids_dir}. * Participant list: {retval['participant_label']}. * Run identifier: {runtime_uuid}. """), ) if not opts.model_file: model_file = Path(bids_dir) / "models" / "model-default_smdl.json" if not model_file.exists(): raise ValueError("Default Model File not Found") else: model_file = opts.model_file retval["workflow"] = init_funcworks_wf( model_file=model_file, bids_dir=opts.bids_dir, output_dir=opts.output_dir, work_dir=opts.work_dir, database_path=str(database_path), participants=retval["participant_label"], analysis_level=opts.analysis_level, smoothing=opts.smoothing, runtime_uuid=runtime_uuid, use_rapidart=opts.use_rapidart, detrend_poly=opts.detrend_poly, align_volumes=opts.align_volumes, smooth_autocorrelations=opts.smooth_autocorrelations, despike=opts.despike, ) retval["return_code"] = 0 """ logs_path = Path(output_dir) / 'funcworks' / 'logs' boilerplate = retval['workflow'].visit_desc() if boilerplate: citation_files = { ext: logs_path / ('CITATION.%s' % ext) for ext in ('bib', 'tex', 'md', 'html') } # To please git-annex users and also to guarantee consistency # among different renderings of the same file, first remove any # existing one for citation_file in citation_files.values(): try: citation_file.unlink() except FileNotFoundError: pass citation_files['md'].write_text(boilerplate) build_log.log(25, 'Works derived from this FUNCWorks execution should ' 'include the following boilerplate:\n\n%s', boilerplate) """ return retval
def _list_outputs(self): ents = self.inputs.entities run_info = self.inputs.run_info regressor_names = run_info.conditions confound_names = run_info.regressor_names output_dir = Path(self.inputs.output_dir) layout = BIDSLayout.load(self.inputs.database_path) image_pattern = ( "reports/[sub-{subject}/][ses-{session}/]" "figures/[run-{run}/]" "[sub-{subject}_][ses-{session}_]" "task-{task}[_acq-{acquisition}]" "[_rec-{reconstruction}][_run-{run}][_echo-{echo}]_" "{suffix<design|corr|contrasts>}.svg" ) design_matrix_patt = ( "[sub-{subject}/][ses-{session}/]" "[sub-{subject}_][ses-{session}_]" "task-{task}_[acq-{acquisition}_]" "[rec-{reconstruction}_][run-{run}_]" "[echo-{echo}_]{suffix<design>}.tsv" ) (design_matrix, corr_matrix, contrast_matrix) = self._parse_matrices( regressor_names=regressor_names, confound_names=confound_names, mat_file=self.inputs.mat_file, con_file=self.inputs.con_file, ) des_plot = self._plot_matrix( matrix=design_matrix, path_pattern=image_pattern, suffix="design", cmap="viridis", layout=layout, ) con_plot = self._plot_matrix( matrix=contrast_matrix, path_pattern=image_pattern, suffix="contrasts", cmap="RdBu_r", layout=layout, ) corr_plot = self._plot_corr_matrix( corr_matrix=corr_matrix, path_pattern=image_pattern, regressor_names=regressor_names, cmap="RdBu_r", layout=layout, ) ents.update({"suffix": "design"}) design_path = layout.build_path(ents, path_patterns=design_matrix_patt, validate=False) design_path = output_dir / design_path design_path.parent.mkdir(exist_ok=True, parents=True) design_matrix.to_csv(design_path, sep="\t", index=None) return { "design_matrix": str(design_path), "design_plot": str(des_plot), "contrasts_plot": str(con_plot), "correlation_plot": str(corr_plot), }
def _run_interface(self, runtime): from bids import BIDSLayout layout = BIDSLayout.load(database_path=self.inputs.database_path) fixed_entities = self.inputs.fixed_entities functional_entities = { **fixed_entities, "datatype": "func", "desc": "preproc", "extension": "nii.gz", "suffix": "bold", } functional_files = layout.get(**functional_entities) if len(functional_files) == 0: raise FileNotFoundError( f"Unable to find functional image with " f"specified entities {functional_entities}") outputs = dict( mask_files=[], reference_files=[], events_files=[], metadata_files=[], regressor_files=[], entities=[], ) for file in functional_files: ents = layout.parse_file_entities(file.path) if "space" not in ents: ents["space"] = None file_ents = dict( events={ **ents, "desc": None, "extension": "tsv", "suffix": "events", "space": None, }, metadata={ **ents, "extension": "json" }, regressor={ **ents, "desc": "confounds", "space": None, "suffix": "regressors", "extension": "tsv", }, mask={ **ents, "desc": "brain", "suffix": "mask" }, reference={ **ents, "suffix": "boldref", "desc": None }, ) if self.inputs.align_volumes and "run" not in ents: raise ValueError( f"Attempted to align to when run entity is not present in " f"{file.path}.") elif self.inputs.align_volumes: file_ents["mask"]["run"] = self.inputs.align_volumes file_ents["reference"]["run"] = self.inputs.align_volumes ents.pop("suffix", None) ents.pop("desc", None) outputs["entities"].append(ents) for filetype, entities in file_ents.items(): files = layout.get(**entities) if len(files) > 1: raise ValueError( f"More than one {filetype} produced for given " f"entities {entities}\n" f"{[x.path for x in files]}" f"{ents}") elif len(files) == 0: raise FileNotFoundError( f"No {filetype} found for given entities " f"{entities}") else: outputs[f"{filetype}_files"].append(files[0]) self._results["functional_files"] = functional_files self._results["mask_files"] = outputs["mask_files"] self._results["reference_files"] = outputs["reference_files"] self._results["metadata_files"] = outputs["metadata_files"] self._results["events_files"] = outputs["events_files"] self._results["regressor_files"] = outputs["regressor_files"] self._results["entities"] = outputs["entities"] return runtime