Ejemplo n.º 1
0
def graph():
    layout_path = join(get_test_data_path(), "ds005")
    layout = BIDSLayout(layout_path)
    json_file = join(layout_path, "models", "ds-005_type-test_model.json")
    graph = BIDSStatsModelsGraph(layout, json_file)
    graph.load_collections(scan_length=480, subject=["01", "02"])
    return graph
Ejemplo n.º 2
0
def test_incremental_data_loading():
    layout_path = join(get_test_data_path(), "ds005")
    layout = BIDSLayout(layout_path)
    json_file = join(layout_path, "models", "ds-005_type-test_model.json")
    graph = BIDSStatsModelsGraph(layout, json_file)
    graph.load_collections(scan_length=480, subject=["01"], run=[1])
    graph.load_collections(scan_length=480, subject=["02"], run=[2])
    assert len(graph["run"].get_collections()) == 2
Ejemplo n.º 3
0
    def _run_interface(self, runtime):
        from bids.layout import BIDSLayout
        from bids.modeling import BIDSStatsModelsGraph

        layout = BIDSLayout.load(database_path=self.inputs.database_path)
        selectors = self.inputs.selectors

        graph = BIDSStatsModelsGraph(layout, self.inputs.model)
        graph.load_collections(**selectors)

        self._results['all_specs'] = self._load_graph(runtime, graph)

        return runtime
Ejemplo n.º 4
0
def test_automodel_runs(model):
    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path)

    # Test to make sure an analaysis can be setup from the generated model
    graph = BIDSStatsModelsGraph(layout, model)
    graph.load_collections(scan_length=480, subject=["01", "02"])
    outputs = graph["Run"].run()
    assert len(outputs) == 6
    cis = list(chain(*[op.contrasts for op in outputs]))
    assert len(cis) == 6
    outputs = graph["Subject"].run(cis)
    # 2 subjects x 1 contrast
    assert len(outputs) == 2
    cis = list(chain(*[op.contrasts for op in outputs]))
    assert len(cis) == 2
Ejemplo n.º 5
0
def test_auto_model_graph(model):

    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path)

    # Test to make sure an analaysis can be setup from the generated model
    graph = BIDSStatsModelsGraph(layout, model)
    graph.load_collections(scan_length=480)

    assert model['Name'] == 'ds005_mixedgamblestask'

    # run level
    block = model['Nodes'][0]
    assert block['Name'] == 'Run'
    assert block['Level'] == 'Run'
    assert block['Transformations'][0]['Name'] == 'Factor'
    assert block['Contrasts'][0]['Name'] == 'run_parametric gain'
    assert block['Contrasts'][0]['Weights'] == [1]
    assert block['Contrasts'][0]['Test'] == 't'

    # subject level
    block = model['Nodes'][1]
    assert block['Name'] == 'Subject'
    assert block['Level'] == 'Subject'
    assert block['Model']['X'][0] == 'run_parametric gain'
    assert block['Contrasts'][0]['Name'] == 'subject_run_parametric gain'
    assert block['Contrasts'][0]['Test'] == 'FEMA'

    # dataset level
    block = model['Nodes'][2]
    assert block['Name'] == 'Dataset'
    assert block['Level'] == 'Dataset'
    assert block['Model']['X'][0] == 'subject_run_parametric gain'
    assert block['Contrasts'][0][
        'Name'] == 'dataset_subject_run_parametric gain'
    assert block['Contrasts'][0]['Test'] == 't'
Ejemplo n.º 6
0
def run_fitlins(argv=None):
    import re
    from nipype import logging as nlogging

    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args(argv)

    force_index = [
        # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex
        re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign
        # Iterate over empty tuple if undefined
        for ign in opts.force_index or ()
    ]
    ignore = [
        # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex
        re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign
        # Iterate over empty tuple if undefined
        for ign in opts.ignore or ()
    ]

    log_level = 25 + 5 * (opts.quiet - opts.verbose)
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    if not opts.space:
        # make it an explicit None
        opts.space = None
    if not opts.desc_label:
        # make it an explicit None
        opts.desc_label = None

    ncpus = opts.n_cpus
    if ncpus < 1:
        ncpus = cpu_count()

    plugin_settings = {
        'plugin': 'MultiProc',
        'plugin_args': {
            'n_procs': ncpus,
            'raise_insufficient': False,
            'maxtasksperchild': 1,
        },
    }

    if opts.mem_gb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    model = default_path(opts.model, opts.bids_dir, 'model-default_smdl.json')
    if opts.model in (None, 'default') and not op.exists(model):
        model = 'default'

    derivatives = True if not opts.derivatives else opts.derivatives
    # Need this when specifying args directly (i.e. neuroscout)
    # god bless neuroscout, but let's make it work for others!
    if isinstance(derivatives, list) and len(derivatives) == 1:
        # WRONG AND EVIL to those who have spaces in their paths... bad bad practice
        # TODO - fix neuroscout
        derivatives = derivatives[0].split(" ")

    if opts.estimator != 'afni':
        if opts.error_ts:
            raise NotImplementedError(
                "Saving the error time series is only implemented for"
                " the afni estimator. If this is a feature you want"
                f" for {opts.estimator} please let us know on github.")

    if opts.derivative_label:
        logger.warning('--derivative-label no longer has any effect; '
                       'set output directory name directly')
    os.makedirs(opts.output_dir, exist_ok=True)
    fub.write_derivative_description(opts.bids_dir, opts.output_dir,
                                     vars(opts))

    work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir

    # Go ahead and initialize the layout database
    if opts.database_path is None:
        database_path = Path(work_dir) / 'dbcache'
        reset_database = True
    else:
        database_path = opts.database_path
        reset_database = False

    indexer = bids.BIDSLayoutIndexer(ignore=ignore, force_index=force_index)
    layout = bids.BIDSLayout(
        opts.bids_dir,
        derivatives=derivatives,
        database_path=database_path,
        reset_database=reset_database,
        indexer=indexer,
    )

    subject_list = None
    if opts.participant_label is not None:
        subject_list = fub.collect_participants(
            layout, participant_label=opts.participant_label)

    # Build main workflow
    logger.log(25, INIT_MSG(version=__version__, subject_list=subject_list))

    # TODO: Fix AUTO_MODEL
    # if model == 'default':
    #     models = auto_model(layout)
    # else:
    #     import json
    #     if op.exists(model):
    #         model_dict = json.loads(Path(model).read_text())
    #     models = [model_dict]

    model_dict = None
    if model == 'default':
        retcode = 1
        raise NotImplementedError(
            "The default model has not been implemented yet.")
    else:
        import json

        if op.exists(model):
            model_dict = json.loads(Path(model).read_text())

    if not model_dict:
        raise ValueError(
            f'model_dict cannot be empty. Invalid model filepath {model}.')

    graph = BIDSStatsModelsGraph(layout, model_dict)

    fitlins_wf = init_fitlins_wf(
        database_path,
        opts.output_dir,
        graph=graph,
        analysis_level=opts.analysis_level,
        model=model,
        space=opts.space,
        desc=opts.desc_label,
        participants=subject_list,
        base_dir=work_dir,
        smoothing=opts.smoothing,
        drop_missing=opts.drop_missing,
        drift_model=opts.drift_model,
        estimator=opts.estimator,
        errorts=opts.error_ts,
    )
    fitlins_wf.config = deepcopy(config.get_fitlins_config()._sections)

    if opts.work_dir:
        # dump crashes in working directory (non /tmp)
        fitlins_wf.config['execution']['crashdump_dir'] = opts.work_dir
    retcode = 0
    if not opts.reports_only:
        try:
            fitlins_wf.run(**plugin_settings)
        except Exception as e:
            logger.critical(f"FitLins failed: {e}")
            raise

    run_context = {
        'version': __version__,
        'command': ' '.join(sys.argv),
        'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'),
    }

    selectors = {'desc': opts.desc_label, 'space': opts.space}
    if subject_list is not None:
        selectors['subject'] = subject_list

    graph.load_collections(**selectors)
    report_dict = build_report_dict(opts.output_dir, work_dir, graph)
    write_full_report(report_dict, run_context, opts.output_dir)

    return retcode
Ejemplo n.º 7
0
json_file = './model-narps_smdl.json'
spec = json.loads(Path(json_file).read_text())
spec

# %%
graph = BIDSStatsModelsGraph(layout, spec)

# %%
graph.write_graph(format='svg')

# %%
root_node = graph.root_node

# %%
try:
    graph.load_collections()
except ValueError:
    graph.load_collections(scan_length=453)  # TR = 1, nvols = 453

# %%
specs = root_node.run(group_by=root_node.group_by, force_dense=False)
len(specs)

# %%
api(specs[0])

# %%
specs[0]

# %%
plot_design_matrix(specs[0].X)