def analysis(): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) json_file = join(layout_path, 'models', 'ds-005_type-test_model.json') analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=['01', '02']) return analysis
def _run_interface(self, runtime): from bids.analysis import Analysis from bids.layout import BIDSLayout import re force_index = [ # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign # Iterate over empty tuple if undefined for ign in self.inputs.force_index or () ] ignore = [ # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign # Iterate over empty tuple if undefined for ign in self.inputs.ignore or () ] # If empty, then None derivatives = self.inputs.derivatives or None layout = BIDSLayout(self.inputs.bids_dir, force_index=force_index, ignore=ignore, derivatives=derivatives) selectors = self.inputs.selectors analysis = Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) return runtime
def analysis(): layout_path = join(get_test_data_path(), "ds005") layout = BIDSLayout(layout_path) json_file = join(layout_path, "models", "ds-005_type-test_model.json") analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=["01", "02"]) return analysis
def test_auto_model_analysis(model): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) # Test to make sure an analaysis can be setup from the generated model analysis = Analysis(layout, model) analysis.setup(scan_length=480) assert model['name'] == 'ds005_mixedgamblestask' # run level block = model['blocks'][0] assert block['name'] == 'run' assert block['level'] == 'run' assert block['transformations'][0]['name'] == 'factor' assert block['model']['HRF_variables'][0] == 'trial_type.parametric gain' assert block['contrasts'][0]['name'] == 'run_parametric gain' assert block['contrasts'][0]['weights'] == [1] # subject level block = model['blocks'][1] assert block['name'] == 'subject' assert block['level'] == 'subject' assert block['model']['variables'][0] == 'run_parametric gain' assert block['contrasts'][0]['name'] == 'subject_run_parametric gain' # dataset level block = model['blocks'][2] assert block['name'] == 'dataset' assert block['level'] == 'dataset' assert block['model']['variables'][0] == 'subject_run_parametric gain' assert block['contrasts'][0][ 'name'] == 'dataset_subject_run_parametric gain'
def analysis_force_auto_contrasts(): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path, exclude='derivatives/') json_file = join(layout_path, 'models', 'ds-005_type-test_model.json') analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=['01', '02'], auto_contrasts=True) return analysis
def build_analysis(analysis, predictor_events, bids_dir, run_id=None, build=True): tmp_dir = Path(mkdtemp()) entities = [{}] if run_id is not None: # Get entities of runs, and add to kwargs for rid in run_id: for run in analysis['runs']: if rid == run['id']: entities.append(get_entities(run)) break entities = merge_dictionaries(*entities) entities['scan_length'] = max([r['duration'] for r in analysis['runs']]) entities['task'] = analysis['task_name'] # Write out all events paths = writeout_events(analysis, predictor_events, tmp_dir) if build is False: bids_analysis = None else: # Load events and try applying transformations bids_layout = BIDSLayout(bids_dir, derivatives=str(tmp_dir), validate=False) bids_analysis = BIDSAnalysis(bids_layout, deepcopy(analysis.get('model'))) bids_analysis.setup(**entities) return tmp_dir, paths, bids_analysis
def test_auto_model_analysis(model): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) # Test to make sure an analaysis can be setup from the generated model analysis = Analysis(layout, model) analysis.setup(scan_length=480) assert model['Name'] == 'ds005_mixedgamblestask' # run level block = model['Steps'][0] assert block['Name'] == 'Run' assert block['Level'] == 'Run' assert block['Transformations'][0]['Name'] == 'Factor' assert block['Contrasts'][0]['Name'] == 'run_parametric gain' assert block['Contrasts'][0]['Weights'] == [1] # subject level block = model['Steps'][1] assert block['Name'] == 'Subject' assert block['Level'] == 'Subject' assert block['Model']['X'][0] == 'run_parametric gain' assert block['Contrasts'][0]['Name'] == 'subject_run_parametric gain' # dataset level block = model['Steps'][2] assert block['Name'] == 'Dataset' assert block['Level'] == 'Dataset' assert block['Model']['X'][0] == 'subject_run_parametric gain' assert block['Contrasts'][0]['Name'] == 'dataset_subject_run_parametric gain'
def _run_interface(self, runtime): from bids.analysis import Analysis from bids.layout import BIDSLayout layout = BIDSLayout.load(database_path=self.inputs.database_path) selectors = self.inputs.selectors analysis = Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) return runtime
def test_incremental_data_loading(): layout_path = join(get_test_data_path(), "ds005") layout = BIDSLayout(layout_path) json_file = join(layout_path, "models", "ds-005_type-test_model.json") analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=["01"], run=[1], finalize=False) analysis.setup(scan_length=480, subject=["02"], run=[2], finalize=False) analysis.finalize() assert len(analysis["run"].get_collections()) == 2
def test_incremental_data_loading(): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) json_file = join(layout_path, 'models', 'ds-005_type-test_model.json') analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=['01'], run=[1], finalize=False) analysis.setup(scan_length=480, subject=['02'], run=[2], finalize=False) analysis.finalize() assert len(analysis['run'].get_collections()) == 2
def _run_interface(self, runtime): from bids.analysis import Analysis from bids.layout import BIDSLayout include = self.inputs.include_pattern exclude = self.inputs.exclude_pattern derivatives = self.inputs.derivatives if not isdefined(include): include = None if not isdefined(exclude): exclude = None if not isdefined(derivatives): exclude = False layout = BIDSLayout(self.inputs.bids_dir, include=include, exclude=exclude, derivatives=derivatives) selectors = self.inputs.selectors analysis = Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, desc='preproc', **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) return runtime
def run_fitlins(argv=None): import re from nipype import logging as nlogging warnings.showwarning = _warn_redirect opts = get_parser().parse_args(argv) force_index = [ # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign # Iterate over empty tuple if undefined for ign in opts.force_index or () ] ignore = [ # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign # Iterate over empty tuple if undefined for ign in opts.ignore or () ] log_level = 25 + 5 * (opts.quiet - opts.verbose) logger.setLevel(log_level) nlogging.getLogger('nipype.workflow').setLevel(log_level) nlogging.getLogger('nipype.interface').setLevel(log_level) nlogging.getLogger('nipype.utils').setLevel(log_level) if not opts.space: # make it an explicit None opts.space = None if not opts.desc_label: # make it an explicit None opts.desc_label = None ncpus = opts.n_cpus if ncpus < 1: ncpus = cpu_count() plugin_settings = { 'plugin': 'MultiProc', 'plugin_args': { 'n_procs': ncpus, 'raise_insufficient': False, 'maxtasksperchild': 1, } } if opts.mem_gb: plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb model = default_path(opts.model, opts.bids_dir, 'model-default_smdl.json') if opts.model in (None, 'default') and not op.exists(model): model = 'default' derivatives = True if not opts.derivatives else opts.derivatives # Need this when specifying args directly (i.e. neuroscout) # god bless neuroscout, but let's make it work for others! if isinstance(derivatives, list) and len(derivatives) == 1: # WRONG AND EVIL to those who have spaces in their paths... bad bad practice # TODO - fix neuroscout derivatives = derivatives[0].split(" ") pipeline_name = 'fitlins' if opts.derivative_label: pipeline_name += '_' + opts.derivative_label deriv_dir = op.join(opts.output_dir, pipeline_name) os.makedirs(deriv_dir, exist_ok=True) bids.write_derivative_description(opts.bids_dir, deriv_dir) work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir # Go ahead and initialize the layout database if opts.database_path is None: database_path = Path(work_dir) / 'dbcache' reset_database = True else: database_path = opts.database_path reset_database = False layout = BIDSLayout(opts.bids_dir, derivatives=derivatives, ignore=ignore, force_index=force_index, database_path=database_path, reset_database=reset_database) subject_list = None if opts.participant_label is not None: subject_list = bids.collect_participants( layout, participant_label=opts.participant_label) # Build main workflow logger.log(25, INIT_MSG(version=__version__, subject_list=subject_list)) fitlins_wf = init_fitlins_wf( database_path, deriv_dir, analysis_level=opts.analysis_level, model=model, space=opts.space, desc=opts.desc_label, participants=subject_list, base_dir=work_dir, smoothing=opts.smoothing, drop_missing=opts.drop_missing, ) fitlins_wf.config = deepcopy(config.get_fitlins_config()._sections) if opts.work_dir: # dump crashes in working directory (non /tmp) fitlins_wf.config['execution']['crashdump_dir'] = opts.work_dir retcode = 0 if not opts.reports_only: try: fitlins_wf.run(**plugin_settings) except Exception: retcode = 1 models = auto_model(layout) if model == 'default' else [model] run_context = { 'version': __version__, 'command': ' '.join(sys.argv), 'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'), } selectors = {'desc': opts.desc_label, 'space': opts.space} if subject_list is not None: selectors['subject'] = subject_list for model in models: analysis = Analysis(layout, model=model) analysis.setup(**selectors) report_dict = build_report_dict(deriv_dir, work_dir, analysis) write_full_report(report_dict, run_context, deriv_dir) return retcode
def run_fitlins(argv=None): warnings.showwarning = _warn_redirect opts = get_parser().parse_args(argv) if opts.debug: logger.setLevel(logging.DEBUG) if not opts.space: # make it an explicit None opts.space = None subject_list = None if opts.participant_label is not None: subject_list = bids.collect_participants( opts.bids_dir, participant_label=opts.participant_label) ncpus = opts.n_cpus if ncpus < 1: ncpus = cpu_count() plugin_settings = { 'plugin': 'MultiProc', 'plugin_args': { 'n_procs': ncpus, 'raise_insufficient': False, 'maxtasksperchild': 1, } } # Build main workflow logger.log(25, INIT_MSG( version=__version__, subject_list=subject_list) ) model = default_path(opts.model, opts.bids_dir, 'model-default_smdl.json') if opts.model in (None, 'default') and not op.exists(model): model = 'default' derivatives = True if not opts.derivatives else opts.derivatives # Need this when specifying args directly (i.e. neuroscout) # god bless neuroscout, but let's make it work for others! if isinstance(derivatives, list) and len(derivatives) == 1: # WRONG AND EVIL to those who have spaces in their paths... bad bad practice # TODO - fix neuroscout derivatives = derivatives[0].split(" ") pipeline_name = 'fitlins' if opts.derivative_label: pipeline_name += '_' + opts.derivative_label deriv_dir = op.join(opts.output_dir, pipeline_name) os.makedirs(deriv_dir, exist_ok=True) bids.write_derivative_description(opts.bids_dir, deriv_dir) work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir fitlins_wf = init_fitlins_wf( opts.bids_dir, derivatives, deriv_dir, analysis_level=opts.analysis_level, model=model, space=opts.space, desc=opts.desc_label, participants=subject_list, base_dir=work_dir, force_index=opts.force_index, ignore=opts.ignore, smoothing=opts.smoothing, ) if opts.work_dir: # dump crashes in working directory (non /tmp) fitlins_wf.config['execution']['crashdump_dir'] = opts.work_dir # easy to read crashfiles fitlins_wf.config['execution']['crashfile_format'] = 'txt' retcode = 0 if not opts.reports_only: try: fitlins_wf.run(**plugin_settings) except Exception: retcode = 1 layout = BIDSLayout(opts.bids_dir, derivatives=derivatives) models = auto_model(layout) if model == 'default' else [model] run_context = {'version': __version__, 'command': ' '.join(sys.argv), 'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'), } selectors = {'desc': opts.desc_label} if opts.space is not None: selectors['space'] = opts.space if subject_list is not None: selectors['subject'] = subject_list for model in models: analysis = Analysis(layout, model=model) analysis.setup(**selectors) report_dict = build_report_dict(deriv_dir, work_dir, analysis) write_full_report(report_dict, run_context, deriv_dir) return retcode
def run_fitlins(argv=None): warnings.showwarning = _warn_redirect opts = get_parser().parse_args(argv) if opts.debug: logger.setLevel(logging.DEBUG) subject_list = None if opts.participant_label is not None: subject_list = bids.collect_participants( opts.bids_dir, participant_label=opts.participant_label) ncpus = opts.n_cpus if ncpus < 1: ncpus = cpu_count() plugin_settings = { 'plugin': 'MultiProc', 'plugin_args': { 'n_procs': ncpus, 'raise_insufficient': False, 'maxtasksperchild': 1, } } # Build main workflow logger.log(25, INIT_MSG( version=__version__, subject_list=subject_list) ) model = default_path(opts.model, opts.bids_dir, 'model-default_smdl.json') if opts.model in (None, 'default') and not op.exists(model): model = 'default' derivatives = True if not opts.derivatives else opts.derivatives # Need this when specifying args directly (i.e. neuroscout) if len(derivatives) == 1: derivatives = derivatives[0].split(" ") pipeline_name = 'fitlins' if opts.derivative_label: pipeline_name += '_' + opts.derivative_label deriv_dir = op.join(opts.output_dir, pipeline_name) os.makedirs(deriv_dir, exist_ok=True) bids.write_derivative_description(opts.bids_dir, deriv_dir) work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir fitlins_wf = init_fitlins_wf( opts.bids_dir, derivatives, deriv_dir, opts.space, model=model, participants=subject_list, base_dir=work_dir, include_pattern=opts.include, exclude_pattern=opts.exclude ) retcode = 0 try: fitlins_wf.run(**plugin_settings) except Exception: retcode = 1 layout = BIDSLayout(opts.bids_dir) models = auto_model(layout) if model == 'default' else [model] run_context = {'version': __version__, 'command': ' '.join(sys.argv), 'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'), } for model in models: analysis = Analysis(layout, model=model) report_dicts = parse_directory(deriv_dir, work_dir, analysis) write_report('unknown', report_dicts, run_context, deriv_dir) return retcode