Beispiel #1
0
    def _run_interface(self, runtime):
        models = self.inputs.model
        if not isinstance(models, list):
            layout = gb.BIDSLayout(self.inputs.bids_dir)

            if not isdefined(models):
                models = layout.get(type='model')
                if not models:
                    raise ValueError("No models found")
            elif models == 'default':
                models = ba.auto_model(layout)

        models = [_ensure_model(m) for m in models]

        if self.inputs.selectors:
            # This is almost certainly incorrect
            models = [
                model for model in models
                if all(val in model['input'].get(key, [val])
                       for key, val in self.inputs.selectors.items())
            ]

        self._results['model_spec'] = models

        return runtime
Beispiel #2
0
    def _list_outputs(self):
        layout = gb.BIDSLayout(self.inputs.base_dir)

        for key in self._outfields:
            if key not in self.inputs.output_query:
                raise ValueError("Define query for all outputs")

        # If infield is not given nm input value, silently ignore
        filters = {}
        for key in self._infields:
            value = getattr(self.inputs, key)
            if isdefined(value):
                filters[key] = value

        outputs = {}
        for key, query in self.inputs.output_query.items():
            args = query.copy()
            args.update(filters)
            filelist = layout.get(return_type=self.inputs.return_type,
                                      **args)
            if len(filelist) == 0:
                msg = 'Output key: %s returned no files' % (
                    key)
                if self.inputs.raise_on_empty:
                    raise IOError(msg)
                else:
                    LOGGER.warning(msg)
                    filelist = Undefined

            outputs[key] = filelist
        return outputs
Beispiel #3
0
def write_report(level, report_dicts, run_context, deriv_dir):
    fl_layout = grabbids.BIDSLayout(deriv_dir,
                                    config=[
                                        'bids', 'derivatives',
                                        pkgr.resource_filename(
                                            'fitlins', 'data/fitlins.json')
                                    ])
    fl_layout.path_patterns = PATH_PATTERNS

    env = jinja2.Environment(loader=jinja2.FileSystemLoader(
        searchpath=pkgr.resource_filename('fitlins', '/')))

    tpl = env.get_template('data/report.tpl')

    for context in report_dicts:
        ents = context['ents'].copy()
        ents['model'] = snake_to_camel(context['model_name'])
        target_file = op.join(deriv_dir, fl_layout.build_path(ents))
        html = tpl.render(
            deroot({
                'level': level,
                **context,
                **run_context
            }, op.dirname(target_file)))
        with open(target_file, 'w') as fobj:
            fobj.write(html)
Beispiel #4
0
def init(model_fname, bids_dir, preproc_dir):
    if preproc_dir is not None:
        config = [('bids', [bids_dir, preproc_dir]),
                  ('derivatives', preproc_dir)]
    else:
        config = None

    layout = grabbids.BIDSLayout(bids_dir, config=config)

    analysis = ba.Analysis(model=model_fname, layout=layout)
    analysis.setup()
    analysis.layout.path_patterns[:0] = PATH_PATTERNS
    return analysis
Beispiel #5
0
    def _run_interface(self, runtime):
        if isdefined(self.inputs.preproc_dir):
            config = [('bids', [self.inputs.bids_dir, self.inputs.preproc_dir]),
                      ('derivatives', self.inputs.preproc_dir)]
        else:
            config = None
        layout = gb.BIDSLayout(self.inputs.bids_dir, config=config)

        bold_files = []
        mask_files = []
        entities = []
        for ents in self.inputs.entities:
            selectors = {**self.inputs.selectors, **ents}
            bold_file = layout.get(extensions=['.nii', '.nii.gz'], **selectors)

            if len(bold_file) == 0:
                raise FileNotFoundError(
                    "Could not find BOLD file in {} with entities {}"
                    "".format(self.inputs.bids_dir, selectors))
            elif len(bold_file) > 1:
                raise ValueError(
                    "Non-unique BOLD file in {} with entities {}.\n"
                    "Matches:\n\t{}"
                    "".format(self.inputs.bids_dir, selectors,
                              "\n\t".join(
                                  '{} ({})'.format(
                                      f.filename,
                                      layout.files[f.filename].entities)
                                  for f in bold_file)))

            # Select exactly matching mask file (may be over-cautious)
            bold_ents = layout.parse_file_entities(
                bold_file[0].filename)
            bold_ents['type'] = 'brainmask'
            mask_file = layout.get(extensions=['.nii', '.nii.gz'], **bold_ents)
            bold_ents.pop('type')

            bold_files.append(bold_file[0].filename)
            mask_files.append(mask_file[0].filename if mask_file else None)
            entities.append(bold_ents)

        self._results['bold_files'] = bold_files
        self._results['mask_files'] = mask_files
        self._results['entities'] = entities

        return runtime
Beispiel #6
0
def parse_directory(deriv_dir, analysis):
    fl_layout = grabbids.BIDSLayout(deriv_dir,
                                    config=[
                                        'bids', 'derivatives',
                                        pkgr.resource_filename(
                                            'fitlins', 'data/fitlins.json')
                                    ])
    contrast_svgs = fl_layout.get(extensions='.svg', type='contrasts')

    analyses = []
    for contrast_svg in contrast_svgs:
        ents = fl_layout.parse_file_entities(contrast_svg.filename)
        ents.pop('type')
        ents.setdefault('subject', None)
        correlation_matrix = fl_layout.get(extensions='.svg',
                                           type='corr',
                                           **ents)
        design_matrix = fl_layout.get(extensions='.svg', type='design', **ents)
        job_desc = {
            'ents': {k: v
                     for k, v in ents.items() if v is not None},
            'dataset': analysis.layout.root,
            'model_name': analysis.model['name'],
            'contrasts_svg': contrast_svg.filename,
        }
        if ents.get('subject'):
            job_desc['subject_id'] = ents.get('subject')
        if correlation_matrix:
            job_desc['correlation_matrix_svg'] = correlation_matrix[0].filename
        if design_matrix:
            job_desc['design_matrix_svg'] = design_matrix[0].filename

        contrasts = fl_layout.get(extensions='.png', type='ortho', **ents)
        # TODO: Split contrasts from estimates
        job_desc['contrasts'] = [{
            'image_file':
            c.filename,
            'name':
            fl_layout.parse_file_entities(c.filename)['contrast']
        } for c in contrasts]
        analyses.append(job_desc)

    return analyses
Beispiel #7
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    if len(args) != 2:
        raise RuntimeError('Usage: bids2netjs.py indir outdir')

    indir  = args[0]
    outdir = args[1]

    basedir = op.dirname(__file__)
    config  = op.join(basedir, 'bids-connectivity-derivatives.json')
    layout  = grabbids.BIDSLayout(indir, config=config)

    if not op.exists(outdir):
        os.makedirs(outdir)

    getConnectivity(layout, indir, outdir)
    getNodeLabels(  layout, indir, outdir)
    getNodeOrders(  layout, indir, outdir)
    getNodeGroups(  layout, indir, outdir)
    getThumbnails(  layout, indir, outdir)
Beispiel #8
0
    def _list_outputs(self):
        base_dir = self.inputs.base_directory

        layout = gb.BIDSLayout(base_dir)
        if self.inputs.path_patterns:
            layout.path_patterns[:0] = self.inputs.path_patterns

        out_files = []
        for entities, in_file in zip(self.inputs.entities,
                                     self.inputs.in_file):
            ents = {**self.inputs.fixed_entities}
            ents.update(entities)

            ents = {k: snake_to_camel(str(v)) for k, v in ents.items()}

            out_fname = os.path.join(
                base_dir, layout.build_path(ents))
            makedirs(os.path.dirname(out_fname), exist_ok=True)

            _copy_or_convert(in_file, out_fname)
            out_files.append(out_fname)

        return {'out_file': out_files}
Beispiel #9
0
    def _run_interface(self, runtime):
        include = self.inputs.include_pattern
        exclude = self.inputs.exclude_pattern
        if not isdefined(include):
            include = None
        if not isdefined(exclude):
            exclude = None

        paths = [(self.inputs.bids_dir, 'bids')]
        if isdefined(self.inputs.preproc_dir):
            paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
        layout = gb.BIDSLayout(paths, include=include, exclude=exclude)

        selectors = self.inputs.selectors

        analysis = ba.Analysis(model=self.inputs.model, layout=layout)
        analysis.setup(drop_na=False, **selectors)
        self._load_level1(runtime, analysis)
        self._load_higher_level(runtime, analysis)

        # Debug - remove, eventually
        runtime.analysis = analysis

        return runtime
Beispiel #10
0
def run_fitlins(argv=None):
    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args(argv)
    if opts.debug:
        logger.setLevel(logging.DEBUG)

    subject_list = None
    if opts.participant_label is not None:
        subject_list = bids.collect_participants(
            opts.bids_dir, participant_label=opts.participant_label)

    ncpus = opts.n_cpus
    if ncpus < 1:
        ncpus = cpu_count()

    plugin_settings = {
        'plugin': 'MultiProc',
        'plugin_args': {
            'n_procs': ncpus,
            'raise_insufficient': False,
            'maxtasksperchild': 1,
        }
    }

    # Build main workflow
    logger.log(25, INIT_MSG(version=__version__, subject_list=subject_list))

    model = default_path(opts.model, opts.bids_dir, 'model.json')
    if opts.model in (None, 'default') and not op.exists(model):
        model = 'default'

    preproc_dir = default_path(opts.preproc_dir,
                               op.join(opts.bids_dir, 'derivatives'),
                               'fmriprep')
    if not op.exists(preproc_dir):
        preproc_dir = default_path(opts.preproc_dir, opts.output_dir,
                                   'fmriprep')
        if not op.exists(preproc_dir):
            raise RuntimeError("Preprocessed data could not be found")

    pipeline_name = 'fitlins'
    if opts.derivative_label:
        pipeline_name += '_' + opts.derivative_label
    deriv_dir = op.join(opts.output_dir, pipeline_name)
    os.makedirs(deriv_dir, exist_ok=True)

    bids.write_derivative_description(opts.bids_dir, deriv_dir)

    # BIDS-Apps prefers 'participant', BIDS-Model prefers 'subject'
    level = 'subject' if opts.analysis_level == 'participant' else opts.analysis_level

    fitlins_wf = init_fitlins_wf(opts.bids_dir,
                                 preproc_dir,
                                 deriv_dir,
                                 opts.space,
                                 model=model,
                                 participants=subject_list,
                                 base_dir=opts.work_dir,
                                 include_pattern=opts.include,
                                 exclude_pattern=opts.exclude)

    try:
        fitlins_wf.run(**plugin_settings)
        if model != 'default':
            retcode = run_model(model, opts.space, level, opts.bids_dir,
                                preproc_dir, deriv_dir)
        else:
            retcode = 0
    except Exception:
        retcode = 1

    layout = gb.BIDSLayout(opts.bids_dir)
    models = ba.auto_model(layout) if model == 'default' else [model]

    run_context = {
        'version': __version__,
        'command': ' '.join(sys.argv),
        'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'),
    }

    for model in models:
        analysis = ba.Analysis(layout, model=model)
        report_dicts = parse_directory(deriv_dir, analysis)
        write_report('unknown', report_dicts, run_context, deriv_dir)

    return retcode
Beispiel #11
0
def init(model_fname, bids_dir, preproc_dir):
    analysis = ba.Analysis(model=model_fname,
                           layout=grabbids.BIDSLayout([bids_dir, preproc_dir]))
    analysis.setup()
    analysis.layout.path_patterns[:0] = PATH_PATTERNS
    return analysis
Beispiel #12
0
def second_level(analysis, block, space, deriv_dir):
    fl_layout = grabbids.BIDSLayout(
        deriv_dir,
        config=['bids', 'derivatives',
                pkgr.resource_filename('fitlins', 'data/fitlins.json')])
    fl_layout.path_patterns[:0] = PATH_PATTERNS

    analyses = []

    # pybids likes to give us a lot of extraneous columns
    cnames = [contrast['name'] for contrast in block.contrasts] + block.model['variables']
    fmri_glm = level2.SecondLevelModel()

    for contrasts, idx, ents in block.get_contrasts(names=cnames):
        if contrasts.empty:
            continue

        data = []
        for in_name, sub_ents in zip(contrasts.index, idx.to_dict(orient='record')):
            # The underlying contrast name might have been added to by a transform
            for option in [in_name] + in_name.split('.'):
                files = fl_layout.get(contrast=snake_to_camel(option),
                                      type='stat', space=space, **sub_ents)
                if files:
                    data.append(files[0].filename)
                    break
            else:
                raise ValueError("Unknown input: {}".format(in_name))

        out_ents = reduce(dict_intersection,
                          map(fl_layout.parse_file_entities, data))

        contrasts_ents = out_ents.copy()
        contrasts_ents['type'] = 'contrasts'
        contrasts_ents.pop('contrast', None)
        contrasts_ents.pop('space', None)
        contrasts_fname = op.join(
            deriv_dir,
            fl_layout.build_path(contrasts_ents, strict=True))

        # Make parent results directory
        os.makedirs(os.path.dirname(contrasts_fname), exist_ok=True)
        plot_and_save(contrasts_fname, plot_contrast_matrix, contrasts,
                      ornt='horizontal')

        job_desc = {
            'ents': out_ents,
            'subject_id': ents.get('subject'),
            'dataset': analysis.layout.root,
            'model_name': analysis.model['name'],
            'contrasts_svg': contrasts_fname,
            }

        for contrast in contrasts:
            out_ents['contrast'] = snake_to_camel(contrast)

            stat_fname = op.join(deriv_dir,
                                 fl_layout.build_path(out_ents, strict=True))

            ortho_ents = out_ents.copy()
            ortho_ents['type'] = 'ortho'
            ortho_fname = op.join(deriv_dir,
                                  analysis.layout.build_path(ortho_ents,
                                                             strict=True))

            desc = {'name': contrast, 'image_file': ortho_fname}
            job_desc.setdefault('contrasts', []).append(desc)

            if op.exists(stat_fname):
                continue

            cols = {'intercept': np.ones(len(data))}
            cname = 'intercept'
            if not np.allclose(contrasts[contrast], 1):
                cname = contrast
                cols[contrast] = contrasts[contrast]

            paradigm = pd.DataFrame(cols)

            fmri_glm.fit(data, design_matrix=paradigm)
            stat_type = [c['type'] for c in block.contrasts if c['name'] == contrast] or ['T']
            stat_type = stat_type[0]
            stat = fmri_glm.compute_contrast(
                cname,
                second_level_stat_type={'T': 't', 'F': 'F'}[stat_type],
                )
            data = stat.get_data()
            masked_vals = data[data != 0]
            if np.isnan(masked_vals).all():
                raise ValueError("nistats was unable to perform this contrast")
            stat.to_filename(stat_fname)

            nlp.plot_glass_brain(stat, colorbar=True,
                                 threshold=sps.norm.isf(0.001), plot_abs=False,
                                 display_mode='lyrz', output_file=ortho_fname)

        analyses.append(job_desc)

    return analyses
Beispiel #13
0
 def __init__(self, root_dir):
     self._root_dir = root_dir
     derivatives_path = op.join(root_dir, self.DERIVATIVES_SUB_PATH)
     makedirs(derivatives_path, exist_ok=True)
     DirectoryRepository.__init__(derivatives_path)
     self._layout = gb.BIDSLayout(self.base_dir)
Beispiel #14
0
    def _run_interface(self, runtime):
        include = self.inputs.include_pattern
        exclude = self.inputs.exclude_pattern
        if not isdefined(include):
            include = None
        if not isdefined(exclude):
            exclude = None

        if isdefined(self.inputs.preproc_dir):
            config = [('bids', [self.inputs.bids_dir, self.inputs.preproc_dir]),
                      ('derivatives', self.inputs.preproc_dir)]
        else:
            config = None
        layout = gb.BIDSLayout(self.inputs.bids_dir, config=config,
                               include=include, exclude=exclude)

        selectors = self.inputs.selectors

        analysis = ba.Analysis(model=self.inputs.model, layout=layout)
        analysis.setup(drop_na=False, **selectors)
        block = analysis.blocks[0]

        entities = []
        session_info = []
        contrast_info = []
        for paradigm, _, ents in block.get_design_matrix(
                block.model['HRF_variables'], mode='sparse', force=True):
            info = {}

            space = layout.get_spaces(type='preproc',
                                      extensions=['.nii', '.nii.gz'])[0]
            preproc_files = layout.get(type='preproc',
                                       extensions=['.nii', '.nii.gz'],
                                       space=space,
                                       **ents)
            if len(preproc_files) != 1:
                raise ValueError('Too many BOLD files found')

            fname = preproc_files[0].filename

            # Required field in seconds
            TR = layout.get_metadata(fname, type='bold')['RepetitionTime']
            dense_vars = set(block.model['variables']) - set(block.model['HRF_variables'])

            _, confounds, _ = block.get_design_matrix(dense_vars,
                                                      mode='dense',
                                                      sampling_rate=1/TR,
                                                      **ents)[0]

            # Note that FMRIPREP includes CosineXX columns to accompany
            # t/aCompCor
            # We may want to add criteria to include HPF columns that are not
            # explicitly listed in the model
            names = [col for col in confounds.columns
                     if col.startswith('NonSteadyStateOutlier') or
                     col in block.model['variables']]

            ent_string = '_'.join('{}-{}'.format(key, val)
                                  for key, val in ents.items())
            events_file = os.path.join(runtime.cwd,
                                       '{}_events.h5'.format(ent_string))
            confounds_file = os.path.join(runtime.cwd,
                                          '{}_confounds.h5'.format(ent_string))
            paradigm.to_hdf(events_file, key='events')
            confounds[names].fillna(0).to_hdf(confounds_file, key='confounds')
            info['events'] = events_file
            info['confounds'] = confounds_file
            info['repetition_time'] = TR

            contrasts = block.get_contrasts([contrast['name']
                                             for contrast in block.contrasts],
                                            **ents)[0][0].T
            # Add test indicator column
            contrasts['type'] = [contrast['type']
                                 for contrast in block.contrasts]

            contrasts_file = os.path.join(runtime.cwd,
                                          '{}_contrasts.h5'.format(ent_string))
            contrasts.to_hdf(contrasts_file, key='contrasts')

            entities.append(ents)
            session_info.append(info)
            contrast_info.append(contrasts_file)

        runtime.analysis = analysis

        self._results['entities'] = entities
        self._results['session_info'] = session_info
        self._results['contrast_info'] = contrast_info
        return runtime