Beispiel #1
0
    def __init__(
        self,
        out_dir,
        run_uuid,
        config=None,
        out_filename="report.html",
        packagename=None,
        reportlets_dir=None,
        subject_id=None,
    ):
        self.root = Path(reportlets_dir or out_dir)

        # Initialize structuring elements
        self.sections = []
        self.errors = []
        self.out_dir = Path(out_dir)
        self.out_filename = out_filename
        self.run_uuid = run_uuid
        self.packagename = packagename
        self.subject_id = subject_id
        if subject_id is not None:
            self.subject_id = (subject_id[4:] if subject_id.startswith("sub-")
                               else subject_id)
            self.out_filename = f"sub-{self.subject_id}.html"

        # Default template from niworkflows
        self.template_path = Path(
            pkgrf("aslprep", "niworkflows/reports/report.tpl"))
        self._load_config(
            Path(config
                 or pkgrf("aslprep", "niworkflows/reports/default.yml")))
        assert self.template_path.exists()
Beispiel #2
0
def run_reports(reportlets_dir, out_dir, subject_label, run_uuid, report_type='qsiprep'):
    """
    Runs the reports

    >>> import os
    >>> from shutil import copytree
    >>> from tempfile import TemporaryDirectory
    >>> filepath = os.path.dirname(os.path.realpath(__file__))
    >>> test_data_path = os.path.realpath(os.path.join(filepath,
    ...                                   '../data/tests/work'))
    >>> curdir = os.getcwd()
    >>> tmpdir = TemporaryDirectory()
    >>> os.chdir(tmpdir.name)
    >>> data_dir = copytree(test_data_path, os.path.abspath('work'))
    >>> os.makedirs('out/qsiprep', exist_ok=True)
    >>> run_reports(os.path.abspath('work/reportlets'),
    ...             os.path.abspath('out'),
    ...             '01', 'madeoutuuid')
    0
    >>> os.chdir(curdir)
    >>> tmpdir.cleanup()

    """
    reportlet_path = str(Path(reportlets_dir) / report_type / ("sub-%s" % subject_label))
    if report_type == 'qsiprep':
        config = pkgrf('qsiprep', 'viz/config.json')
    else:
        config = pkgrf('qsiprep', 'viz/recon_config.json')

    out_filename = 'sub-{}.html'.format(subject_label)
    report = Report(reportlet_path, config, out_dir, run_uuid, out_filename,
                    pipeline_type=report_type)
    return report.generate_report()
Beispiel #3
0
def mock_config():
    """Create a mock config for documentation and testing purposes."""
    from .. import config
    _old_fs = os.getenv('FREESURFER_HOME')
    if not _old_fs:
        os.environ['FREESURFER_HOME'] = mkdtemp()

    filename = Path(pkgrf('aslprep', 'data/tests/config.toml'))
    settings = loads(filename.read_text())
    for sectionname, configs in settings.items():
        if sectionname != 'environment':
            section = getattr(config, sectionname)
            section.load(configs, init=False)
    config.nipype.init()
    config.loggers.init()
    config.init_spaces()

    config.execution.work_dir = Path(mkdtemp())
    config.execution.bids_dir = Path(pkgrf('aslprep', 'data/tests/ds000240')).absolute()
    config.execution.init()

    yield

    if not _old_fs:
        del os.environ["FREESURFER_HOME"]
Beispiel #4
0
    def generate_report(self):
        """

        Returns
        -------

        """
        logs_path = self.out_dir / 'cmp' / 'logs'

        boilerplate = []
        boiler_idx = 0

        if (logs_path / 'CITATION.html').exists():
            text = (logs_path / 'CITATION.html').read_text(encoding='UTF-8')
            text = '<div class="boiler-html">%s</div>' % re.compile(
                '<body>(.*?)</body>',
                re.DOTALL | re.IGNORECASE).findall(text)[0].strip()
            boilerplate.append((boiler_idx, 'HTML', text))
            boiler_idx += 1

        if (logs_path / 'CITATION.md').exists():
            text = '<pre>%s</pre>\n' % (logs_path / 'CITATION.md').read_text(
                encoding='UTF-8')
            boilerplate.append((boiler_idx, 'Markdown', text))
            boiler_idx += 1

        if (logs_path / 'CITATION.tex').exists():
            text = (logs_path / 'CITATION.tex').read_text(encoding='UTF-8')
            text = re.compile(r'\\begin{document}(.*?)\\end{document}',
                              re.DOTALL
                              | re.IGNORECASE).findall(text)[0].strip()
            text = '<pre>%s</pre>\n' % text
            text += '<h3>Bibliography</h3>\n'
            text += '<pre>%s</pre>\n' % Path(
                pkgrf('cmp',
                      'data/boilerplate.bib')).read_text(encoding='UTF-8')
            boilerplate.append((boiler_idx, 'LaTeX', text))
            boiler_idx += 1

        searchpath = pkgrf('cmp', '/')
        env = jinja2.Environment(
            loader=jinja2.FileSystemLoader(searchpath=searchpath),
            trim_blocks=True,
            lstrip_blocks=True)
        report_tpl = env.get_template('viz/report.tpl')
        report_render = report_tpl.render(sections=self.sections,
                                          errors=self.errors,
                                          boilerplate=boilerplate)

        # Write out report
        (self.out_dir / 'cmp' / self.out_filename).write_text(report_render,
                                                              encoding='UTF-8')
        return len(self.errors)
Beispiel #5
0
def test_generated_reportlets(bids_sessions, ordering):
    # make independent report
    out_dir = tempfile.mkdtemp()
    report = Report(Path(bids_sessions),
                    Path(out_dir),
                    'fakeiuud',
                    subject_id='01',
                    packagename='fmriprep')
    config = Path(pkgrf('niworkflows', 'reports/fmriprep.yml'))
    settings = load(config.read_text())
    # change settings to only include some missing ordering
    settings['sections'][3]['ordering'] = ordering
    report.index(settings['sections'])
    # expected number of reportlets
    expected_reportlets_num = len(report.layout.get(extension='svg'))
    # bids_session uses these entities
    needed_entities = ['session', 'task', 'run']
    # the last section is the most recently run
    reportlets_num = len(report.sections[-1].reportlets)
    # if ordering does not contain all the relevent entities
    # then there should be fewer reportlets than expected
    if all(ent in ordering for ent in needed_entities):
        assert reportlets_num == expected_reportlets_num
    else:
        assert reportlets_num < expected_reportlets_num
Beispiel #6
0
def run_reports(reportlets_dir, out_dir, subject_label, run_uuid):
    reportlet_path = os.path.join(reportlets_dir, 'fmriprep', "sub-" + subject_label)
    config = pkgrf('fmriprep', 'viz/config.json')

    out_filename = 'sub-{}.html'.format(subject_label)
    report = Report(reportlet_path, config, out_dir, run_uuid, out_filename)
    return report.generate_report()
Beispiel #7
0
def run_reports(reportlets_dir, out_dir, subject_label, run_uuid):
    """
    Runs the reports

    >>> import os
    >>> from shutil import copytree
    >>> from tempfile import TemporaryDirectory
    >>> filepath = os.path.dirname(os.path.realpath(__file__))
    >>> test_data_path = os.path.realpath(os.path.join(filepath,
    ...                                   '../data/tests/work'))
    >>> curdir = os.getcwd()
    >>> tmpdir = TemporaryDirectory()
    >>> os.chdir(tmpdir.name)
    >>> data_dir = copytree(test_data_path, os.path.abspath('work'))
    >>> os.makedirs('out/fmriprep', exist_ok=True)
    >>> run_reports(os.path.abspath('work/reportlets'),
    ...             os.path.abspath('out'),
    ...             '01', 'madeoutuuid')
    0
    >>> os.chdir(curdir)
    >>> tmpdir.cleanup()

    """
    reportlet_path = os.path.join(reportlets_dir, 'fmriprep', "sub-" + subject_label)
    config = pkgrf('fmriprep', 'viz/config.json')

    out_filename = 'sub-{}.html'.format(subject_label)
    report = Report(reportlet_path, config, out_dir, run_uuid, out_filename)
    return report.generate_report()
Beispiel #8
0
def get_outputnode_spec():
    """
    Generate outputnode's fields from I/O spec file.

    Examples
    --------
    >>> get_outputnode_spec()  # doctest: +NORMALIZE_WHITESPACE
    ['t1w_preproc', 't1w_mask', 't1w_dseg', 't1w_tpms',
    'std_preproc', 'std_mask', 'std_dseg', 'std_tpms',
    'anat2std_xfm', 'std2anat_xfm',
    't1w_aseg', 't1w_aparc',
    't1w2fsnative_xfm', 'fsnative2t1w_xfm',
    'surfaces']

    """
    spec = loads(
        Path(pkgrf('aslprep',
                   'smriprep/data/io_spec.json')).read_text())["queries"]
    fields = [
        '_'.join((m, s)) for m in ('t1w', 'std')
        for s in spec["baseline"].keys()
    ]
    fields += [s for s in spec["std_xfms"].keys()]
    fields += [s for s in spec["surfaces"].keys()]
    return fields
Beispiel #9
0
    def __init__(self,
                 reportlets_dir,
                 out_dir,
                 run_uuid,
                 config=None,
                 subject_id=None,
                 out_filename='report.html',
                 packagename=None):
        self.root = reportlets_dir

        # Initialize structuring elements
        self.sections = []
        self.errors = []
        self.out_dir = Path(out_dir)
        self.out_filename = out_filename
        self.run_uuid = run_uuid
        self.template_path = None
        self.packagename = packagename
        self.subject_id = subject_id
        if subject_id is not None and subject_id.startswith('sub-'):
            self.subject_id = self.subject_id[4:]

        if self.subject_id is not None:
            self.out_filename = 'sub-{}.html'.format(self.subject_id)

        if config is None:
            config = pkgrf('niworkflows', 'reports/fmriprep.yml')
        self._load_config(Path(config))
Beispiel #10
0
    def _get_params_dist(self):
        preparams = {
            'std__by': ['site'],
            'std__with_centering': [True, False],
            'std__with_scaling': [True, False],
            'std__columns': [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
            'sel_cols__columns': [self._ftnames + ['site']],
            'ft_sites__disable': [False, True],
            'ft_noise__disable': [False, True],
        }

        prefix = self._model + '__'
        if self._multiclass:
            prefix += 'estimator__'

        clfparams = _load_parameters(
            (pkgrf('mriqc', 'data/model_selection.yml')
                if self._param_file is None else self._param_file)
        )
        modparams = {prefix + k: v for k, v in list(clfparams[self._model][0].items())}
        if self._debug:
            preparams = {
                'std__by': ['site'],
                'std__with_centering': [True],
                'std__with_scaling': [True],
                'std__columns': [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
                'sel_cols__columns': [self._ftnames + ['site']],
                'ft_sites__disable': [True],
                'ft_noise__disable': [True],
            }
            modparams = {k: [v[0]] for k, v in list(modparams.items())}

        return {**preparams, **modparams}
def test_config_spaces():
    """Check that all necessary spaces are recorded in the config."""
    filename = Path(pkgrf("fprodents", "data/tests/config.toml"))
    settings = loads(filename.read_text())
    for sectionname, configs in settings.items():
        if sectionname != "environment":
            section = getattr(config, sectionname)
            section.load(configs, init=False)
    config.nipype.init()
    config.loggers.init()
    config.init_spaces()

    spaces = config.workflow.spaces
    assert "Fischer344:res-native" not in [
        str(s) for s in spaces.get_standard(full_spec=True)
    ]

    config.init_spaces()
    spaces = config.workflow.spaces

    assert "Fischer344:res-native" in [
        str(s) for s in spaces.get_standard(full_spec=True)
    ]

    config.execution.output_spaces = None
    config.workflow.use_aroma = False
    config.init_spaces()
    spaces = config.workflow.spaces

    assert not [str(s) for s in spaces.get_standard(full_spec=True)]

    assert [
        format_reference((s.fullname, s.spec)) for s in spaces.references
        if s.standard and s.dim == 3
    ] == ["Fischer344"]
Beispiel #12
0
def save_html(template, report_file_name, unique_string, **kwargs):
    ''' save an actual html file with name report_file_name. unique_string's
    first character must be alphabetical; every call to save_html must have a
    unique unique_string. kwargs should all contain valid html that will be sent
    to the jinja2 renderer '''

    if not unique_string[0].isalpha():
        raise ValueError('unique_string must be a valid id value in html; '
                         'the first character must be alphabetical. Received unique_string={}'
                         .format(unique_string))

    # validate html
    validator = HTMLValidator(unique_string=unique_string)
    for html in list(kwargs.keys()):
        validator.feed(html)
        validator.close()

    searchpath = pkgrf('niworkflows', '/')
    env = jinja2.Environment(
        loader=jinja2.FileSystemLoader(searchpath=searchpath),
        trim_blocks=True, lstrip_blocks=True
    )
    report_tpl = env.get_template('viz/' + template)
    kwargs['unique_string'] = unique_string
    report_render = report_tpl.render(kwargs)

    with open(report_file_name, 'w' if PY3 else 'wb') as handle:
        handle.write(report_render)
def run_reports(reportlets_dir, out_dir, subject_label, run_uuid):
    reportlet_path = os.path.join(reportlets_dir, 'fmriprep', "sub-" + subject_label)
    config = pkgrf('fmriprep', 'viz/config.json')

    out_filename = 'sub-{}.html'.format(subject_label)
    report = Report(reportlet_path, config, out_dir, run_uuid, out_filename)
    return report.generate_report()
Beispiel #14
0
def test_generated_reportlets(bids_sessions, ordering):
    # make independent report
    out_dir = tempfile.mkdtemp()
    report = Report(
        Path(out_dir),
        "fakeuuid",
        reportlets_dir=Path(bids_sessions),
        subject_id="01",
        packagename="fmriprep",
    )
    config = Path(pkgrf("niworkflows", "reports/default.yml"))
    settings = load(config.read_text())
    # change settings to only include some missing ordering
    settings["sections"][3]["ordering"] = ordering
    report.index(settings["sections"])
    # expected number of reportlets
    expected_reportlets_num = len(report.layout.get(extension=".svg"))
    # bids_session uses these entities
    needed_entities = ["session", "task", "ceagent", "run"]
    # the last section is the most recently run
    reportlets_num = len(report.sections[-1].reportlets)
    # get the number of figures in the output directory
    out_layout = BIDSLayout(out_dir, config="figures", validate=False)
    out_figs = len(out_layout.get())
    # if ordering does not contain all the relevent entities
    # then there should be fewer reportlets than expected
    if all(ent in ordering for ent in needed_entities):
        assert reportlets_num == expected_reportlets_num == out_figs
    else:
        assert reportlets_num < expected_reportlets_num == out_figs
Beispiel #15
0
def test_report1():
    test_data_path = pkgrf(
        'niworkflows',
        os.path.join('data', 'tests', 'work', 'reportlets'))
    out_dir = tempfile.mkdtemp()

    return Report(Path(test_data_path), Path(out_dir), 'fakeiuud',
                  subject_id='01', packagename='aslprep')
Beispiel #16
0
def mock_config():
    """Create a mock config for documentation and testing purposes."""
    from . import config

    filename = Path(pkgrf("mriqc", "data/config-example.toml"))
    settings = loads(filename.read_text())
    for sectionname, configs in settings.items():
        if sectionname != "environment":
            section = getattr(config, sectionname)
            section.load(configs, init=False)
    config.nipype.init()
    config.loggers.init()

    config.execution.work_dir = Path(mkdtemp())
    config.execution.bids_dir = Path(pkgrf("mriqc",
                                           "data/tests/ds000005")).absolute()
    config.execution.init()

    yield
    def generate_report(self):
        logs_path = self.out_dir / 'fmriprep' / 'logs'

        boilerplate = []
        boiler_idx = 0

        if (logs_path / 'CITATION.html').exists():
            text = (logs_path / 'CITATION.html').read_text(encoding='UTF-8')
            text = '<div class="boiler-html">%s</div>' % re.compile(
                '<body>(.*?)</body>',
                re.DOTALL | re.IGNORECASE).findall(text)[0].strip()
            boilerplate.append((boiler_idx, 'HTML', text))
            boiler_idx += 1

        if (logs_path / 'CITATION.md').exists():
            text = '<pre>%s</pre>\n' % (logs_path / 'CITATION.md').read_text(encoding='UTF-8')
            boilerplate.append((boiler_idx, 'Markdown', text))
            boiler_idx += 1

        if (logs_path / 'CITATION.tex').exists():
            text = (logs_path / 'CITATION.tex').read_text(encoding='UTF-8')
            text = re.compile(
                r'\\begin{document}(.*?)\\end{document}',
                re.DOTALL | re.IGNORECASE).findall(text)[0].strip()
            text = '<pre>%s</pre>\n' % text
            text += '<h3>Bibliography</h3>\n'
            text += '<pre>%s</pre>\n' % Path(
                pkgrf('fmriprep', 'data/boilerplate.bib')).read_text(encoding='UTF-8')
            boilerplate.append((boiler_idx, 'LaTeX', text))
            boiler_idx += 1

        searchpath = pkgrf('fmriprep', '/')
        env = jinja2.Environment(
            loader=jinja2.FileSystemLoader(searchpath=searchpath),
            trim_blocks=True, lstrip_blocks=True
        )
        report_tpl = env.get_template('viz/report.tpl')
        report_render = report_tpl.render(sections=self.sections, errors=self.errors,
                                          boilerplate=boilerplate)

        # Write out report
        (self.out_dir / 'fmriprep' / self.out_filename).write_text(report_render, encoding='UTF-8')
        return len(self.errors)
Beispiel #18
0
 def generate_report(self):
     searchpath = pkgrf('fmriprep', '/')
     env = jinja2.Environment(
         loader=jinja2.FileSystemLoader(searchpath=searchpath),
         trim_blocks=True, lstrip_blocks=True
     )
     report_tpl = env.get_template('viz/report.tpl')
     report_render = report_tpl.render(sections=self.sections, errors=self.errors)
     with open(os.path.join(self.out_dir, "fmriprep", self.out_filename), 'w') as fp:
         fp.write(report_render)
     return len(self.errors)
Beispiel #19
0
    def _get_params(self):
        preparams = [
            {
                'std__by': ['site'],
                'std__with_centering': [True],
                'std__with_scaling': [True],
                'std__columns':
                [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
                'sel_cols__columns': [self._ftnames + ['site']],
                'ft_sites__disable': [False, True],
                'ft_noise__disable': [False, True],
            },
            {
                'std__by': ['site'],
                'std__with_centering': [True, False],
                'std__with_scaling': [True, False],
                'std__columns':
                [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
                'sel_cols__columns': [self._ftnames + ['site']],
                'ft_sites__disable': [True],
                'ft_noise__disable': [True],
            },
        ]

        if self._debug:
            preparams = [
                {
                    'std__by': ['site'],
                    'std__with_centering': [False],
                    'std__with_scaling': [False],
                    'std__columns':
                    [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
                    'sel_cols__columns': [self._ftnames + ['site']],
                    'ft_sites__disable': [True],
                    'ft_noise__disable': [True],
                },
            ]

        prefix = self._model + '__'
        if self._multiclass:
            prefix += 'estimator__'

        clfparams = _load_parameters(
            (pkgrf('mriqc', 'data/classifier_settings.yml')
             if self._param_file is None else self._param_file))
        modparams = {
            prefix + k: v
            for k, v in list(clfparams[self._model][0].items())
        }
        if self._debug:
            modparams = {k: [v[0]] for k, v in list(modparams.items())}

        return [{**prep, **modparams} for prep in preparams]
Beispiel #20
0
def test_report1():
    test_data_path = pkgrf("niworkflows",
                           os.path.join("data", "tests", "work", "reportlets"))
    out_dir = tempfile.mkdtemp()

    return Report(
        Path(out_dir),
        "fakeuuid",
        reportlets_dir=Path(test_data_path),
        subject_id="01",
        packagename="fmriprep",
    )
Beispiel #21
0
    def _get_params(self):

        # Some baseline parameters
        baseparam = {
            'std__by': ['site'],
            'std__columns':
            [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
            'sel_cols__columns': [self._ftnames + ['site']],
        }

        # Load in classifier parameters
        clfparams = _load_parameters(
            (pkgrf('mriqc', 'data/classifier_settings.yml')
             if self._param_file is None else self._param_file))

        # Read preprocessing parameters
        if 'preproc' in clfparams:
            preparams = []
            for el in clfparams['preproc']:
                pcombination = {}
                for pref, subel in list(el.items()):
                    for k, v in list(subel.items()):
                        pcombination[pref + '__' + k] = v
                preparams.append(pcombination)
        else:
            preparams = [{
                'std__with_centering': [True],
                'std__with_scaling': [True],
                'ft_sites__disable': [False],
                'ft_noise__disable': [False],
            }]

        # Set base parameters
        preparams = [{**baseparam, **prep} for prep in preparams]

        # Extract this model parameters
        prefix = self._model + '__'
        if self._multiclass:
            prefix += 'estimator__'
        modparams = {
            prefix + k: v
            for k, v in list(clfparams[self._model][0].items())
        }

        # Merge model parameters + preprocessing
        modparams = [{**prep, **modparams} for prep in preparams]

        # Evaluate just one model if debug
        if self._debug:
            modparams = {k: [v[0]] for k, v in list(modparams.items())}

        return modparams
 def generate_report(self):
     searchpath = pkgrf('fmriprep', '/')
     env = jinja2.Environment(
         loader=jinja2.FileSystemLoader(searchpath=searchpath),
         trim_blocks=True, lstrip_blocks=True
     )
     report_tpl = env.get_template('viz/report.tpl')
     report_render = report_tpl.render(sub_reports=self.sub_reports, errors=self.errors,
                                       date=time.strftime("%Y-%m-%d %H:%M:%S %z"),
                                       version=__version__)
     with open(os.path.join(self.out_dir, "fmriprep", self.out_filename), 'w') as fp:
         fp.write(report_render)
     return len(self.errors)
Beispiel #23
0
def _read_model(model_name):
    from sys import version_info
    import simplejson as json
    from pkg_resources import resource_filename as pkgrf

    with open(pkgrf('regseg', 'data/model_%s.json' % model_name.lower()),
              'rb' if version_info[0] < 3 else 'r') as sfh:
        model = json.load(sfh)

    name = ['%s.surf' % m for m in model.keys()]
    labels = list(model.values())

    return name, labels
Beispiel #24
0
def test_bunch_hash():
    # NOTE: Since the path to the json file is included in the Bunch,
    # the hash will be unique to each machine.
    json_pth = pkgrf("nipype", os.path.join("testing", "data", "realign_json.json"))

    b = nib.Bunch(infile=json_pth, otherthing="blue", yat=True)
    newbdict, bhash = b._get_bunch_hash()
    assert bhash == "d1f46750044c3de102efc847720fc35f"
    # Make sure the hash stored in the json file for `infile` is correct.
    jshash = md5()
    with open(json_pth, "r") as fp:
        jshash.update(fp.read().encode("utf-8"))
    assert newbdict["infile"][0][1] == jshash.hexdigest()
    assert newbdict["yat"] is True
Beispiel #25
0
    def _get_params(self):

        # Some baseline parameters
        baseparam = {
            'std__by': ['site'],
            'std__columns': [[ft for ft in self._ftnames if ft in FEATURE_NORM]],
            'sel_cols__columns': [self._ftnames + ['site']],
        }

        # Load in classifier parameters
        clfparams = _load_parameters(
            (pkgrf('mriqc', 'data/classifier_settings.yml')
                if self._param_file is None else self._param_file)
        )

        # Read preprocessing parameters
        if 'preproc' in clfparams:
            preparams = []
            for el in clfparams['preproc']:
                pcombination = {}
                for pref, subel in list(el.items()):
                    for k, v in list(subel.items()):
                        pcombination[pref + '__' + k] = v
                preparams.append(pcombination)
        else:
            preparams = [{
                'std__with_centering': [True],
                'std__with_scaling': [True],
                'ft_sites__disable': [False],
                'ft_noise__disable': [False],
            }]

        # Set base parameters
        preparams = [{**baseparam, **prep} for prep in preparams]

        # Extract this model parameters
        prefix = self._model + '__'
        if self._multiclass:
            prefix += 'estimator__'
        modparams = {prefix + k: v for k, v in list(clfparams[self._model][0].items())}

        # Merge model parameters + preprocessing
        modparams = [{**prep, **modparams} for prep in preparams]

        # Evaluate just one model if debug
        if self._debug:
            modparams = {k: [v[0]] for k, v in list(modparams.items())}

        return modparams
Beispiel #26
0
def test_bunch_hash():
    # NOTE: Since the path to the json file is included in the Bunch,
    # the hash will be unique to each machine.
    json_pth = pkgrf('nipype',
                     os.path.join('testing', 'data', 'realign_json.json'))

    b = nib.Bunch(infile=json_pth, otherthing='blue', yat=True)
    newbdict, bhash = b._get_bunch_hash()
    assert bhash == 'd1f46750044c3de102efc847720fc35f'
    # Make sure the hash stored in the json file for `infile` is correct.
    jshash = md5()
    with open(json_pth, 'r') as fp:
        jshash.update(fp.read().encode('utf-8'))
    assert newbdict['infile'][0][1] == jshash.hexdigest()
    assert newbdict['yat'] is True
Beispiel #27
0
    def __init__(self,
                 reportlets_dir,
                 out_dir,
                 run_uuid,
                 config=None,
                 subject_id=None,
                 out_filename='report.html',
                 packagename=None):
        self.root = reportlets_dir

        # Add a new figures spec
        try:
            add_config_paths(
                figures=pkgrf('niworkflows', 'reports/figures.json'))
        except ValueError as e:
            if "Configuration 'figures' already exists" != str(e):
                raise

        # Initialize structuring elements
        self.sections = []
        self.errors = []
        self.out_dir = Path(out_dir)
        self.out_filename = out_filename
        self.run_uuid = run_uuid
        self.template_path = None
        self.packagename = packagename
        self.subject_id = subject_id
        if subject_id is not None and subject_id.startswith('sub-'):
            self.subject_id = self.subject_id[4:]

        if self.subject_id is not None:
            self.out_filename = 'sub-{}.html'.format(self.subject_id)

        if config is None:
            config = pkgrf('niworkflows', 'reports/fmriprep.yml')
        self._load_config(Path(config))
Beispiel #28
0
def get_parser():
    from argparse import ArgumentParser
    from argparse import RawTextHelpFormatter
    parser = ArgumentParser(description='MRIQC model selection and held-out evaluation',
                            formatter_class=RawTextHelpFormatter)

    g_clf = parser.add_mutually_exclusive_group()
    g_clf.add_argument('--train', nargs='*',
                       help='training data tables, X and Y, leave empty for ABIDE.')
    g_clf.add_argument('--load-classifier', nargs="?", type=str, default='',
                       help='load a previously saved classifier')

    parser.add_argument('--test', nargs='*',
                        help='test data tables, X and Y, leave empty for DS030.')
    parser.add_argument('-X', '--evaluation-data', help='classify this CSV table of IQMs')

    parser.add_argument('--train-balanced-leaveout', action='store_true', default=False,
                        help='leave out a balanced, random, sample of training examples')
    parser.add_argument('--multiclass', '--ms', action='store_true', default=False,
                        help='do not binarize labels')

    g_input = parser.add_argument_group('Options')
    g_input.add_argument('-P', '--parameters', action='store',
                         default=pkgrf('mriqc', 'data/classifier_settings.yml'))
    g_input.add_argument('-M', '--model', action='store', choices=['rfc', 'xgb'],
                         default='rfc', help='model')

    g_input.add_argument('-S', '--scorer', action='store', default='roc_auc')
    g_input.add_argument('--cv', action='store', default='loso',
                         choices=['kfold', 'loso', 'balanced-kfold', 'batch'])
    g_input.add_argument('--debug', action='store_true', default=False)

    g_input.add_argument('--log-file', nargs="?", action='store', default='',
                         help='write log to this file, leave empty for a default log name')

    g_input.add_argument("-v", "--verbose", dest="verbose_count",
                         action="count", default=0,
                         help="increases log verbosity for each occurence.")
    g_input.add_argument('--njobs', action='store', default=-1, type=int,
                         help='number of jobs')

    g_input.add_argument('-o', '--output', action='store', default='predicted_qa.csv',
                         help='file containing the labels assigned by the classifier')

    g_input.add_argument('-t', '--threshold', action='store', default=0.5, type=float,
                         help='decision threshold of the classifier')

    return parser
Beispiel #29
0
def test_preproc_plus_controllability(bids_singlescan_data, tmpdir):
    preproc_plus_controllability = get_default_cli_args()
    preproc_plus_controllability.bids_dir = WORKING_SINGLE_DIR + "/DSCSDSI"
    preproc_plus_controllability.work_dir = tmpdir
    preproc_plus_controllability.recon_only = False
    preproc_plus_controllability.output_dir = op.join(tmpdir,
                                                      "preproc_control")
    preproc_plus_controllability.recon_spec = pkgrf(
        "qsiprep", "data/pipelines/controllability.json")

    with mock.patch.object(argparse.ArgumentParser,
                           'parse_args',
                           return_value=preproc_plus_controllability):

        with pytest.raises(SystemExit):
            cli_main()
Beispiel #30
0
def test_config_spaces():
    """Check that all necessary spaces are recorded in the config."""
    filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))
    settings = loads(filename.read_text())
    for sectionname, configs in settings.items():
        if sectionname != 'environment':
            section = getattr(config, sectionname)
            section.load(configs, init=False)
    config.nipype.init()
    config.loggers.init()
    config.init_spaces()

    spaces = config.workflow.spaces
    assert "MNI152NLin6Asym:res-2" not in [
        str(s) for s in spaces.get_standard(full_spec=True)
    ]

    assert "MNI152NLin6Asym_res-2" not in [
        format_reference((s.fullname, s.spec)) for s in spaces.references
        if s.standard and s.dim == 3
    ]

    config.workflow.use_aroma = True
    config.init_spaces()
    spaces = config.workflow.spaces

    assert "MNI152NLin6Asym:res-2" in [
        str(s) for s in spaces.get_standard(full_spec=True)
    ]

    assert "MNI152NLin6Asym_res-2" in [
        format_reference((s.fullname, s.spec)) for s in spaces.references
        if s.standard and s.dim == 3
    ]

    config.execution.output_spaces = None
    config.workflow.use_aroma = False
    config.init_spaces()
    spaces = config.workflow.spaces

    assert [str(s) for s in spaces.get_standard(full_spec=True)] == []

    assert [
        format_reference((s.fullname, s.spec)) for s in spaces.references
        if s.standard and s.dim == 3
    ] == ['MNI152NLin2009cAsym']
    _reset_config()
Beispiel #31
0
def _parse_set(arg, default):
    if arg is not None and len(arg) == 0:
        return [pkgrf('mriqc', 'data/csv/%s' % name) for name in (
            'x_%s-0.9.6-2017-06-03-99db97c9be2e.csv' % default,
            'y_%s.csv' % default)]

    if arg is not None and len(arg) not in (0, 2):
        raise RuntimeError('Wrong number of parameters.')

    if len(arg) == 2:
        train_exists = [isfile(fname) for fname in arg]
        if len(train_exists) > 0 and not all(train_exists):
            errors = ['file "%s" not found' % fname
                      for fexists, fname in zip(train_exists, arg)
                      if not fexists]
            raise RuntimeError('Errors (%d) loading training set: %s.' % (
                len(errors), ', '.join(errors)))
    return arg
Beispiel #32
0
def _load_recon_spec(spec_name, sloppy=False):
    prepackaged_dir = pkgrf("qsiprep", "data/pipelines")
    prepackaged = [op.split(fname)[1][:-5] for fname in glob(prepackaged_dir+"/*.json")]
    if op.exists(spec_name):
        recon_spec = spec_name
    elif spec_name in prepackaged:
        recon_spec = op.join(prepackaged_dir + "/{}.json".format(spec_name))
    else:
        raise Exception("{} is not a file that exists or in {}".format(spec_name, prepackaged))
    with open(recon_spec, "r") as f:
        try:
            spec = json.load(f)
        except Exception:
            raise Exception("Unable to read JSON spec. Check the syntax.")
    if sloppy:
        LOGGER.warning("Forcing reconstruction to use unrealistic parameters")
        spec = make_sloppy(spec)
    return spec
Beispiel #33
0
def _load_recon_spec(spec_name):
    prepackaged_dir = pkgrf("qsiprep", "data/pipelines")
    prepackaged = [
        op.split(fname)[1][:-5] for fname in glob(prepackaged_dir + "/*.json")
    ]
    if op.exists(spec_name):
        recon_spec = spec_name
    elif spec_name in prepackaged:
        recon_spec = op.join(prepackaged_dir + "/{}.json".format(spec_name))
    else:
        raise Exception("{} is not a file that exists or in {}".format(
            spec_name, prepackaged))
    with open(recon_spec, "r") as f:
        try:
            spec = json.load(f)
        except Exception:
            raise Exception("Unable to read JSON spec. Check the syntax.")
    return spec
Beispiel #34
0
def _parse_set(arg, default):
    if arg is not None and len(arg) == 0:
        return [pkgrf('mriqc', 'data/csv/%s' % name) for name in (
            'x_%s-0.9.6-2017-06-03-99db97c9be2e.csv' % default,
            'y_%s.csv' % default)]

    if arg is not None and len(arg) not in (0, 2):
        raise RuntimeError('Wrong number of parameters.')

    if len(arg) == 2:
        train_exists = [isfile(fname) for fname in arg]
        if len(train_exists) > 0 and not all(train_exists):
            errors = ['file "%s" not found' % fname
                      for fexists, fname in zip(train_exists, arg)
                      if not fexists]
            raise RuntimeError('Errors (%d) loading training set: %s.' % (
                len(errors), ', '.join(errors)))
    return arg
Beispiel #35
0
    def _load_config(self, config):
        from yaml import safe_load as load
        settings = load(config.read_text())
        self.packagename = self.packagename or settings.get('package', None)

        if self.packagename is not None:
            self.root = self.root / self.packagename
            self.out_dir = self.out_dir / self.packagename

        if self.subject_id is not None:
            self.root = self.root / 'sub-{}'.format(self.subject_id)

        # Default template from niworkflows
        template_path = Path(pkgrf('niworkflows', 'reports/report.tpl'))
        if 'template_path' in settings:
            template_path = config.parent / settings['template_path']
        self.template_path = template_path.absolute()
        self.index(settings['sections'])
Beispiel #36
0
 def generate_report(self):
     searchpath = pkgrf('fmriprep', '/')
     env = jinja2.Environment(
         loader=jinja2.FileSystemLoader(searchpath=searchpath),
         trim_blocks=True,
         lstrip_blocks=True)
     report_tpl = env.get_template('viz/report.tpl')
     # Ignore subreports with no children
     sub_reports = [
         sub_report for sub_report in self.sub_reports
         if len(sub_report.run_reports) > 0 or any(
             elem.files_contents for elem in sub_report.elements)
     ]
     report_render = report_tpl.render(sub_reports=sub_reports,
                                       errors=self.errors)
     with open(os.path.join(self.out_dir, "fmriprep", self.out_filename),
               'w') as fp:
         fp.write(report_render)
     return len(self.errors)
Beispiel #37
0
def test_preproc_pepolar_sdc(tmp_path):
    # Get the empty bids data
    bids_root = pkgrf('qsiprep', 'data/abcd')
    work_dir = str(tmp_path.absolute() / "preproc_pepolar_sdc_work")
    output_dir = str(tmp_path.absolute() / "preproc_pepolar_sdc_output")
    bids_dir = bids_root
    subject_list = ['abcd']
    wf = init_qsiprep_wf(subject_list=subject_list,
                         run_uuid="test",
                         work_dir=work_dir,
                         output_dir=output_dir,
                         bids_dir=bids_dir,
                         ignore=[],
                         debug=False,
                         low_mem=False,
                         anat_only=False,
                         longitudinal=False,
                         freesurfer=False,
                         hires=False,
                         force_spatial_normalization=False,
                         denoise_before_combining=True,
                         dwi_denoise_window=7,
                         combine_all_dwis=True,
                         omp_nthreads=1,
                         skull_strip_template='OASIS',
                         skull_strip_fixed_seed=False,
                         output_spaces=['T1w', 'template'],
                         template='MNI152NLin2009cAsym',
                         output_resolution=1.25,
                         motion_corr_to='iterative',
                         b0_to_t1w_transform='Rigid',
                         hmc_transform='Affine',
                         hmc_model='3dSHORE',
                         impute_slice_threshold=0,
                         write_local_bvecs=False,
                         fmap_bspline=False,
                         fmap_demean=True,
                         use_syn=False,
                         prefer_dedicated_fmaps=True,
                         force_syn=False)

    assert len(wf.list_node_names())
Beispiel #38
0
def _parse_set(arg, default):
    if arg is not None and len(arg) == 0:
        names = f"x_{default}.csv", f"y_{default}.csv"
        return [pkgrf("mriqc", f"data/csv/{name}") for name in names]

    if arg is not None and len(arg) not in (0, 2):
        raise RuntimeError(messages.CLF_WRONG_PARAMETER_COUNT)

    if arg is None:
        return None

    if len(arg) == 2:
        train_exists = [isfile(fname) for fname in arg]
        if len(train_exists) > 0 and not all(train_exists):
            errors = [
                f"file '{fname}' not found"
                for fexists, fname in zip(train_exists, arg) if not fexists
            ]
            runtime_error = messages.CLF_TRAIN_LOAD_ERROR.format(
                n_errors=len(errors), errors=", ".join(errors))
            raise RuntimeError(runtime_error)
    return arg
Beispiel #39
0
def epi_mni_align(settings, name='SpatialNormalization'):
    """
    Uses FSL FLIRT with the BBR cost function to find the transform that
    maps the EPI space into the MNI152-nonlinear-symmetric atlas.

    The input epi_mean is the averaged and brain-masked EPI timeseries

    Returns the EPI mean resampled in MNI space (for checking out registration) and
    the associated "lobe" parcellation in EPI space.

    .. workflow::

      from mriqc.workflows.functional import epi_mni_align
      wf = epi_mni_align({})

    """
    from niworkflows.data import get_mni_icbm152_nlin_asym_09c as get_template
    from niworkflows.interfaces.registration import (
        RobustMNINormalizationRPT as RobustMNINormalization
    )
    from pkg_resources import resource_filename as pkgrf

    # Get settings
    testing = settings.get('testing', False)
    n_procs = settings.get('n_procs', 1)
    ants_nthreads = settings.get('ants_nthreads', DEFAULTS['ants_nthreads'])

    # Init template
    mni_template = get_template()

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['epi_mean', 'epi_mask']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['epi_mni', 'epi_parc', 'report']), name='outputnode')

    epimask = pe.Node(fsl.ApplyMask(), name='EPIApplyMask')

    n4itk = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='SharpenEPI')

    norm = pe.Node(RobustMNINormalization(
        num_threads=ants_nthreads,
        float=settings.get('ants_float', False),
        template='mni_icbm152_nlin_asym_09c',
        reference_image=pkgrf('mriqc', 'data/mni/2mm_T2_brain.nii.gz'),
        flavor='testing' if testing else 'precise',
        moving='EPI',
        generate_report=True,),
        name='EPI2MNI',
        num_threads=n_procs,
        mem_gb=3)

    # Warp segmentation into EPI space
    invt = pe.Node(ants.ApplyTransforms(
        float=True,
        input_image=str(Path(mni_template) / '1mm_parc.nii.gz'),
        dimension=3, default_value=0, interpolation='NearestNeighbor'),
        name='ResampleSegmentation')

    workflow.connect([
        (inputnode, invt, [('epi_mean', 'reference_image')]),
        (inputnode, n4itk, [('epi_mean', 'input_image')]),
        (inputnode, epimask, [('epi_mask', 'mask_file')]),
        (n4itk, epimask, [('output_image', 'in_file')]),
        (epimask, norm, [('out_file', 'moving_image')]),
        (norm, invt, [
            ('inverse_composite_transform', 'transforms')]),
        (invt, outputnode, [('output_image', 'epi_parc')]),
        (norm, outputnode, [('warped_image', 'epi_mni'),
                            ('out_report', 'report')]),

    ])
    return workflow
Beispiel #40
0
def main():
    """Entry point"""
    import yaml
    from io import open
    from argparse import ArgumentParser
    from argparse import RawTextHelpFormatter
    from pkg_resources import resource_filename as pkgrf
    from ..classifier.cv import NestedCVHelper
    from .. import logging, LOG_FORMAT

    warnings.showwarning = warn_redirect

    parser = ArgumentParser(description='MRIQC Nested cross-validation evaluation',
                            formatter_class=RawTextHelpFormatter)
    parser.add_argument('training_data', help='input data')
    parser.add_argument('training_labels', help='input data')

    g_input = parser.add_argument_group('Inputs')
    g_input.add_argument('-P', '--parameters', action='store',
                         default=pkgrf('mriqc', 'data/grid_nested_cv.yml'))

    g_input.add_argument('--cv-inner', action='store', default=10,
                         help='inner loop of cross-validation')
    g_input.add_argument('--cv-outer', action='store', default='loso',
                         help='outer loop of cross-validation')

    g_input.add_argument('--log-file', action='store', help='write log to this file')
    g_input.add_argument('--log-level', action='store', default='INFO',
                         choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'])

    g_input.add_argument('-o', '--output-file', action='store', default='cv_inner_loop.csv',
                         help='the output table with cross validated scores')
    g_input.add_argument('-O', '--output-outer-cv', action='store', default='cv_outer_loop.csv',
                         help='the output table with cross validated scores')

    g_input.add_argument('--njobs', action='store', default=-1, type=int,
                         help='number of jobs')
    g_input.add_argument('--task-id', action='store')


    opts = parser.parse_args()

    logger = logging.getLogger()
    if opts.log_file is not None:
        fhl = logging.FileHandler(opts.log_file)
        fhl.setFormatter(fmt=logging.Formatter(LOG_FORMAT))
        logger.addHandler(fhl)
    logger.setLevel(opts.log_level)

    parameters = None
    if opts.parameters is not None:
        with open(opts.parameters) as paramfile:
            parameters = yaml.load(paramfile)

    cvhelper = NestedCVHelper(opts.training_data, opts.training_labels,
                              n_jobs=opts.njobs, param=parameters,
                              task_id=opts.task_id)

    cvhelper.cv_inner = read_cv(opts.cv_inner)
    cvhelper.cv_outer = read_cv(opts.cv_outer)

    # Run inner loop before setting held-out data, for hygene
    cvhelper.fit()
    with open(opts.output_file, 'a' if PY3 else 'ab') as outfile:
        flock(outfile, LOCK_EX)
        save_headers = op.getsize(opts.output_file) == 0
        cvhelper.get_inner_cv_scores().to_csv(
            outfile, index=False, header=save_headers)
        flock(outfile, LOCK_UN)

    with open(opts.output_outer_cv, 'a' if PY3 else 'ab') as outfile:
        flock(outfile, LOCK_EX)
        save_headers = op.getsize(opts.output_outer_cv) == 0
        cvhelper.get_outer_cv_scores().to_csv(
            outfile, index=False, header=save_headers)
        flock(outfile, LOCK_UN)
Beispiel #41
0
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow that supports the whole execution
    graph, given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows fmriprep to enforce
    a hard-limited memory-scope.

    """
    from subprocess import check_call, CalledProcessError, TimeoutExpired
    from pkg_resources import resource_filename as pkgrf
    from shutil import copyfile

    from nipype import logging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from ..__about__ import __version__
    from ..workflows.base import init_fmriprep_wf
    from ..viz.reports import generate_reports

    logger = logging.getLogger('nipype.workflow')

    INIT_MSG = """
    Running fMRIPREP version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.
    """.format

    output_spaces = opts.output_space or []

    # Validity of some inputs
    # ERROR check if use_aroma was specified, but the correct template was not
    if opts.use_aroma and (opts.template != 'MNI152NLin2009cAsym' or
                           'template' not in output_spaces):
        output_spaces.append('template')
        logger.warning(
            'Option "--use-aroma" requires functional images to be resampled to MNI space. '
            'The argument "template" has been automatically added to the list of output '
            'spaces (option "--output-space").'
        )

    if opts.cifti_output and (opts.template != 'MNI152NLin2009cAsym' or
                              'template' not in output_spaces):
        output_spaces.append('template')
        logger.warning(
            'Option "--cifti-output" requires functional images to be resampled to MNI space. '
            'The argument "template" has been automatically added to the list of output '
            'spaces (option "--output-space").'
        )

    # Check output_space
    if 'template' not in output_spaces and (opts.use_syn_sdc or opts.force_syn):
        msg = ['SyN SDC correction requires T1 to MNI registration, but '
               '"template" is not specified in "--output-space" arguments.',
               'Option --use-syn will be cowardly dismissed.']
        if opts.force_syn:
            output_spaces.append('template')
            msg[1] = (' Since --force-syn has been requested, "template" has been added to'
                      ' the "--output-space" list.')
        logger.warning(' '.join(msg))

    # Set up some instrumental utilities
    run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())

    # First check that bids_dir looks like a BIDS folder
    bids_dir = os.path.abspath(opts.bids_dir)
    subject_list = collect_participants(
        bids_dir, participant_label=opts.participant_label)

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault('plugin_args', {})
    else:
        # Defaults
        plugin_settings = {
            'plugin': 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
            }
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nthreads = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    if nthreads is None or opts.nthreads is not None:
        nthreads = opts.nthreads
        if nthreads is None or nthreads < 1:
            nthreads = cpu_count()
        plugin_settings['plugin_args']['n_procs'] = nthreads

    if opts.mem_mb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)

    if 1 < nthreads < omp_nthreads:
        logger.warning(
            'Per-process threads (--omp-nthreads=%d) exceed total '
            'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)

    # Set up directories
    output_dir = op.abspath(opts.output_dir)
    log_dir = op.join(output_dir, 'fmriprep', 'logs')
    work_dir = op.abspath(opts.work_dir or 'work')  # Set work/ as default

    # Check and create output and working directories
    os.makedirs(output_dir, exist_ok=True)
    os.makedirs(log_dir, exist_ok=True)
    os.makedirs(work_dir, exist_ok=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': log_dir,
            'crashfile_format': 'txt',
            'get_linked_libs': False,
            'stop_on_first_crash': opts.stop_on_first_crash or opts.work_dir is None,
        },
        'monitoring': {
            'enabled': opts.resource_monitor,
            'sample_frequency': '0.5',
            'summary_append': True,
        }
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    retval['return_code'] = 0
    retval['plugin_settings'] = plugin_settings
    retval['bids_dir'] = bids_dir
    retval['output_dir'] = output_dir
    retval['work_dir'] = work_dir
    retval['subject_list'] = subject_list
    retval['run_uuid'] = run_uuid
    retval['workflow'] = None

    # Called with reports only
    if opts.reports_only:
        logger.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
        retval['return_code'] = generate_reports(subject_list, output_dir, work_dir, run_uuid)
        return retval

    # Build main workflow
    logger.log(25, INIT_MSG(
        version=__version__,
        bids_dir=bids_dir,
        subject_list=subject_list,
        uuid=run_uuid)
    )

    template_out_grid = opts.template_resampling_grid
    if opts.output_grid_reference is not None:
        logger.warning(
            'Option --output-grid-reference is deprecated, please use '
            '--template-resampling-grid')
        template_out_grid = template_out_grid or opts.output_grid_reference
    if opts.debug:
        logger.warning('Option --debug is deprecated and has no effect')

    retval['workflow'] = init_fmriprep_wf(
        subject_list=subject_list,
        task_id=opts.task_id,
        echo_idx=opts.echo_idx,
        run_uuid=run_uuid,
        ignore=opts.ignore,
        debug=opts.sloppy,
        low_mem=opts.low_mem,
        anat_only=opts.anat_only,
        longitudinal=opts.longitudinal,
        t2s_coreg=opts.t2s_coreg,
        omp_nthreads=omp_nthreads,
        skull_strip_template=opts.skull_strip_template,
        skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
        work_dir=work_dir,
        output_dir=output_dir,
        bids_dir=bids_dir,
        freesurfer=opts.run_reconall,
        output_spaces=output_spaces,
        template=opts.template,
        medial_surface_nan=opts.medial_surface_nan,
        cifti_output=opts.cifti_output,
        template_out_grid=template_out_grid,
        hires=opts.hires,
        use_bbr=opts.use_bbr,
        bold2t1w_dof=opts.bold2t1w_dof,
        fmap_bspline=opts.fmap_bspline,
        fmap_demean=opts.fmap_no_demean,
        use_syn=opts.use_syn_sdc,
        force_syn=opts.force_syn,
        use_aroma=opts.use_aroma,
        aroma_melodic_dim=opts.aroma_melodic_dimensionality,
        ignore_aroma_err=opts.ignore_aroma_denoising_errors,
    )
    retval['return_code'] = 0

    logs_path = Path(output_dir) / 'fmriprep' / 'logs'
    boilerplate = retval['workflow'].visit_desc()

    if boilerplate:
        (logs_path / 'CITATION.md').write_text(boilerplate)
        logger.log(25, 'Works derived from this fMRIPrep execution should '
                   'include the following boilerplate:\n\n%s', boilerplate)

        # Generate HTML file resolving citations
        cmd = ['pandoc', '-s', '--bibliography',
               pkgrf('fmriprep', 'data/boilerplate.bib'),
               '--filter', 'pandoc-citeproc',
               str(logs_path / 'CITATION.md'),
               '-o', str(logs_path / 'CITATION.html')]
        try:
            check_call(cmd, timeout=10)
        except (FileNotFoundError, CalledProcessError, TimeoutExpired):
            logger.warning('Could not generate CITATION.html file:\n%s',
                           ' '.join(cmd))

        # Generate LaTex file resolving citations
        cmd = ['pandoc', '-s', '--bibliography',
               pkgrf('fmriprep', 'data/boilerplate.bib'),
               '--natbib', str(logs_path / 'CITATION.md'),
               '-o', str(logs_path / 'CITATION.tex')]
        try:
            check_call(cmd, timeout=10)
        except (FileNotFoundError, CalledProcessError, TimeoutExpired):
            logger.warning('Could not generate CITATION.tex file:\n%s',
                           ' '.join(cmd))
        else:
            copyfile(pkgrf('fmriprep', 'data/boilerplate.bib'),
                     (logs_path / 'CITATION.bib'))

    return retval
Beispiel #42
0
    def _run_interface(self, runtime):
        nii = nb.load(self.inputs.in_file)
        zooms = nii.header.get_zooms()
        size_diff = np.array(zooms[:3]) - (self.inputs.pixel_size - 0.1)
        if np.all(size_diff >= -1e-3):
            IFLOGGER.info('Voxel size is large enough')
            self._results['out_file'] = self.inputs.in_file
            if isdefined(self.inputs.in_mask):
                self._results['out_mask'] = self.inputs.in_mask
            return runtime

        IFLOGGER.info('One or more voxel dimensions (%f, %f, %f) are smaller than '
                      'the requested voxel size (%f) - diff=(%f, %f, %f)', zooms[0],
                      zooms[1], zooms[2], self.inputs.pixel_size, size_diff[0],
                      size_diff[1], size_diff[2])

        # Figure out new matrix
        # 1) Get base affine
        aff_base = nii.header.get_base_affine()
        aff_base_inv = np.linalg.inv(aff_base)

        # 2) Find center pixel in mm
        center_idx = (np.array(nii.shape[:3]) - 1) * 0.5
        center_mm = aff_base.dot(center_idx.tolist() + [1])

        # 3) Find extent of each dimension
        min_mm = aff_base.dot([-0.5, -0.5, -0.5, 1])
        max_mm = aff_base.dot((np.array(nii.shape[:3]) - 0.5).tolist() + [1])
        extent_mm = np.abs(max_mm - min_mm)[:3]

        # 4) Find new matrix size
        new_size = np.array(extent_mm / self.inputs.pixel_size, dtype=int)

        # 5) Initialize new base affine
        new_base = aff_base[:3, :3] * np.abs(aff_base_inv[:3, :3]) * self.inputs.pixel_size

        # 6) Find new center
        new_center_idx = (new_size - 1) * 0.5
        new_affine_base = np.eye(4)
        new_affine_base[:3, :3] = new_base
        new_affine_base[:3, 3] = center_mm[:3] - new_base.dot(new_center_idx)

        # 7) Rotate new matrix
        rotation = nii.affine.dot(aff_base_inv)
        new_affine = rotation.dot(new_affine_base)

        # 8) Generate new reference image
        hdr = nii.header.copy()
        hdr.set_data_shape(new_size)
        ref_file = 'resample_ref.nii.gz'
        nb.Nifti1Image(np.zeros(new_size, dtype=nii.get_data_dtype()),
                       new_affine, hdr).to_filename(ref_file)

        out_prefix, ext = op.splitext(op.basename(self.inputs.in_file))
        if ext == '.gz':
            out_prefix, ext2 = op.splitext(out_prefix)
            ext = ext2 + ext

        out_file = op.abspath('%s_resampled%s' % (out_prefix, ext))

        # 9) Resample new image
        ApplyTransforms(
            dimension=3,
            input_image=self.inputs.in_file,
            reference_image=ref_file,
            interpolation='LanczosWindowedSinc',
            transforms=[pkgrf('mriqc', 'data/itk_identity.tfm')],
            output_image=out_file,
        ).run()

        self._results['out_file'] = out_file

        if isdefined(self.inputs.in_mask):
            hdr = nii.header.copy()
            hdr.set_data_shape(new_size)
            hdr.set_data_dtype(np.uint8)
            ref_mask = 'mask_ref.nii.gz'
            nb.Nifti1Image(np.zeros(new_size, dtype=np.uint8),
                           new_affine, hdr).to_filename(ref_mask)

            out_mask = op.abspath('%s_resmask%s' % (out_prefix, ext))
            ApplyTransforms(
                dimension=3,
                input_image=self.inputs.in_mask,
                reference_image=ref_mask,
                interpolation='NearestNeighbor',
                transforms=[pkgrf('mriqc', 'data/itk_identity.tfm')],
                output_image=out_mask,
            ).run()

            self._results['out_mask'] = out_mask

        return runtime
Beispiel #43
0
 def __init__(self):
     super(IndividualTemplate, self).__init__(pkgrf('mriqc', 'data/reports/individual.html'))
Beispiel #44
0
def main():
    """Entry point"""
    import yaml
    import re
    from io import open
    from datetime import datetime
    from .. import logging, LOG_FORMAT, __version__
    from ..classifier.helper import CVHelper

    warnings.showwarning = warn_redirect

    opts = get_parser().parse_args()
    train_path = _parse_set(opts.train, default='abide')
    test_path = _parse_set(opts.test, default='ds030')

    log_level = int(max(3 - opts.verbose_count, 0) * 10)
    if opts.verbose_count > 1:
        log_level = int(max(25 - 5 * opts.verbose_count, 1))

    log = logging.getLogger('mriqc.classifier')
    log.setLevel(log_level)

    base_name = 'mclf_run-%s_mod-%s_ver-%s_class-%d_cv-%s' % (
        datetime.now().strftime('%Y%m%d-%H%M%S'), opts.model,
        re.sub(r'[\+_@]', '.', __version__),
        3 if opts.multiclass else 2, opts.cv,
    )
    log.info('Results will be saved as %s', abspath(base_name + '*'))

    if opts.log_file is None or len(opts.log_file) > 0:
        log_file = opts.log_file if opts.log_file else base_name + '.log'
        fhl = logging.FileHandler(log_file)
        fhl.setFormatter(fmt=logging.Formatter(LOG_FORMAT))
        fhl.setLevel(log_level)
        log.addHandler(fhl)

    parameters = None
    if opts.parameters is not None:
        with open(opts.parameters) as paramfile:
            parameters = yaml.load(paramfile)

    clf_loaded = False
    if opts.train is not None:
        # Initialize model selection helper
        cvhelper = CVHelper(
            X=train_path[0],
            Y=train_path[1],
            n_jobs=opts.njobs,
            param=parameters,
            scorer=opts.scorer,
            b_leaveout=opts.train_balanced_leaveout,
            multiclass=opts.multiclass,
            verbosity=opts.verbose_count,
            split=opts.cv,
            model=opts.model,
            debug=opts.debug,
            basename=base_name,
        )

        if opts.cv == 'batch':
            # Do not set x_test unless we are going to run batch exp.
            cvhelper.setXtest(test_path[0], test_path[1])

        # Perform model selection before setting held-out data, for hygene
        cvhelper.fit()

        # Pickle if required
        cvhelper.save(suffix='data-train_estimator')

    # If no training set is given, need a classifier
    else:
        load_classifier = opts.load_classifier
        if load_classifier is None:
            load_classifier = pkgrf('mriqc', 'data/rfc-nzs-full-1.0.pklz')

        if not isfile(load_classifier):
            msg = 'was not provided'
            if load_classifier != '':
                msg = '("%s") was not found' % load_classifier
            raise RuntimeError(
                'No training samples were given, and the --load-classifier '
                'option %s.' % msg)

        cvhelper = CVHelper(load_clf=load_classifier, n_jobs=opts.njobs)
        clf_loaded = True

    if test_path and opts.cv != 'batch':
        # Set held-out data
        cvhelper.setXtest(test_path[0], test_path[1])
        # Evaluate
        cvhelper.evaluate(matrix=True, scoring=[opts.scorer, 'accuracy'],
                          save_pred=True)

        # Pickle if required
        if not clf_loaded:
            cvhelper.fit_full()
            cvhelper.save(suffix='data-all_estimator')

    if opts.evaluation_data:
        cvhelper.predict_dataset(opts.evaluation_data, out_file=opts.output, thres=opts.threshold)
Beispiel #45
0
 def __init__(self):
     super(GroupTemplate, self).__init__(pkgrf('mriqc', 'data/reports/group.html'))
Beispiel #46
0
def gen_html(csv_file, mod, csv_failed=None, out_file=None):
    import os.path as op
    from os import remove
    from shutil import copy
    import datetime
    from pkg_resources import resource_filename as pkgrf
    from .. import __version__ as ver
    from ..data import GroupTemplate
    from ..utils.misc import check_folder

    if version_info[0] > 2:
        from io import StringIO as TextIO
    else:
        from io import BytesIO as TextIO

    QCGROUPS = {
        'T1w': [
            (['cjv'], None),
            (['cnr'], None),
            (['efc'], None),
            (['fber'], None),
            (['wm2max'], None),
            (['snr_csf', 'snr_gm', 'snr_wm'], None),
            (['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
            (['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'vox'),
            (['qi_1', 'qi_2'], None),
            (['inu_range', 'inu_med'], None),
            (['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
            (['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
            (['tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm'], None),
            (['summary_bg_mean', 'summary_bg_median', 'summary_bg_stdv', 'summary_bg_mad',
              'summary_bg_k', 'summary_bg_p05', 'summary_bg_p95'], None),
            (['summary_csf_mean', 'summary_csf_median', 'summary_csf_stdv', 'summary_csf_mad',
              'summary_csf_k', 'summary_csf_p05', 'summary_csf_p95'], None),
            (['summary_gm_mean', 'summary_gm_median', 'summary_gm_stdv', 'summary_gm_mad',
              'summary_gm_k', 'summary_gm_p05', 'summary_gm_p95'], None),
            (['summary_wm_mean', 'summary_wm_median', 'summary_wm_stdv', 'summary_wm_mad',
              'summary_wm_k', 'summary_wm_p05', 'summary_wm_p95'], None)
        ],
        'T2w': [
            (['cjv'], None),
            (['cnr'], None),
            (['efc'], None),
            (['fber'], None),
            (['wm2max'], None),
            (['snr_csf', 'snr_gm', 'snr_wm'], None),
            (['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
            (['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
            (['qi_1', 'qi_2'], None),
            (['inu_range', 'inu_med'], None),
            (['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
            (['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
            (['tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm'], None),
            (['summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
              'summary_bg_p05', 'summary_bg_p95'], None),
            (['summary_csf_mean', 'summary_csf_stdv', 'summary_csf_k',
              'summary_csf_p05', 'summary_csf_p95'], None),
            (['summary_gm_mean', 'summary_gm_stdv', 'summary_gm_k',
              'summary_gm_p05', 'summary_gm_p95'], None),
            (['summary_wm_mean', 'summary_wm_stdv', 'summary_wm_k',
              'summary_wm_p05', 'summary_wm_p95'], None)
        ],
        'bold': [
            (['efc'], None),
            (['fber'], None),
            (['fwhm', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
            (['gsr_%s' % a for a in ['x', 'y']], None),
            (['snr'], None),
            (['dvars_std', 'dvars_vstd'], None),
            (['dvars_nstd'], None),
            (['fd_mean'], 'mm'),
            (['fd_num'], '# timepoints'),
            (['fd_perc'], '% timepoints'),
            (['spikes_num'], '# slices'),
            (['dummy_trs'], '# TRs'),
            (['gcor'], None),
            (['tsnr'], None),
            (['aor'], None),
            (['aqi'], None),
            (['summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
              'summary_bg_p05', 'summary_bg_p95'], None),
            (['summary_fg_mean', 'summary_fg_stdv', 'summary_fg_k',
              'summary_fg_p05', 'summary_fg_p95'], None),
        ]
    }

    def_comps = list(BIDS_COMP.keys())
    dataframe = pd.read_csv(csv_file, index_col=False,
                            dtype={comp: object for comp in def_comps})

    id_labels = list(set(def_comps) & set(dataframe.columns.ravel().tolist()))
    dataframe['label'] = dataframe[id_labels].apply(_format_labels, axis=1)
    nPart = len(dataframe)

    failed = None
    if csv_failed is not None and op.isfile(csv_failed):
        MRIQC_REPORT_LOG.warn('Found failed-workflows table "%s"', csv_failed)
        failed_df = pd.read_csv(csv_failed, index_col=False)
        cols = list(set(id_labels) & set(failed_df.columns.ravel().tolist()))

        try:
            failed_df = failed_df.sort_values(by=cols)
        except AttributeError:
            #pylint: disable=E1101
            failed_df = failed_df.sort(columns=cols)

        failed = failed_df[cols].apply(myfmt, args=(cols,), axis=1).ravel().tolist()

    csv_groups = []
    datacols = dataframe.columns.ravel().tolist()
    for group, units in QCGROUPS[mod]:
        dfdict = {'iqm': [], 'value': [], 'label': [], 'units': []}


        for iqm in group:
            if iqm in datacols:
                values = dataframe[[iqm]].values.ravel().tolist()
                if values:
                    dfdict['iqm'] += [iqm] * nPart
                    dfdict['units'] += [units] * nPart
                    dfdict['value'] += values
                    dfdict['label'] += dataframe[['label']].values.ravel().tolist()

        # Save only if there are values
        if dfdict['value']:
            csv_df = pd.DataFrame(dfdict)
            csv_str = TextIO()
            csv_df[['iqm', 'value', 'label', 'units']].to_csv(csv_str, index=False)
            csv_groups.append(csv_str.getvalue())

    if out_file is None:
        out_file = op.abspath('group.html')
    tpl = GroupTemplate()
    tpl.generate_conf({
            'modality': mod,
            'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
            'version': ver,
            'csv_groups': csv_groups,
            'failed': failed,
            'boxplots_js': open(pkgrf('mriqc', op.join('data', 'reports',
                                                       'embed_resources',
                                                       'boxplots.js'))).read(),
            'd3_js': open(pkgrf('mriqc', op.join('data', 'reports',
                                                 'embed_resources',
                                                 'd3.min.js'))).read(),
            'boxplots_css': open(pkgrf('mriqc', op.join('data', 'reports',
                                                        'embed_resources',
                                                        'boxplots.css'))).read()
        }, out_file)

    return out_file