Пример #1
0
def read_iqms(feat_file):
    """ Reads in the features """

    bids_comps = list(BIDS_COMP.keys())
    x_df = pd.read_csv(feat_file,
                       index_col=False,
                       dtype={col: str
                              for col in bids_comps})

    # Find present bids bits and sort by them
    bids_comps_present = list(
        set(x_df.columns.ravel().tolist()) & set(bids_comps))
    x_df = x_df.sort_values(by=bids_comps_present)

    # Remove sub- prefix in subject_id
    x_df.subject_id = x_df.subject_id.str.lstrip('sub-')

    # Remove columns that are not IQMs
    feat_names = list(x_df._get_numeric_data().columns.ravel())
    for col in bids_comps:
        try:
            feat_names.remove(col)
        except ValueError:
            pass

    for col in feat_names:
        if col.startswith(('size_', 'spacing_', 'Unnamed')):
            feat_names.remove(col)

    return x_df, feat_names, bids_comps_present
Пример #2
0
def combine_datasets(inputs, rating_label="rater_1"):
    mdata = []
    for dataset_x, dataset_y, sitename in inputs:
        sitedata, _ = read_dataset(
            dataset_x,
            dataset_y,
            rate_label=rating_label,
            binarize=True,
            site_name=sitename,
        )
        sitedata["database"] = [sitename] * len(sitedata)

        if "site" not in sitedata.columns.ravel().tolist():
            sitedata["site"] = [sitename] * len(sitedata)

        mdata.append(sitedata)

    mdata = pd.concat(mdata)

    all_cols = mdata.columns.ravel().tolist()

    bids_comps = list(BIDS_COMP.keys())
    bids_comps_present = list(set(mdata.columns.ravel().tolist()) & set(bids_comps))
    bids_comps_present = [bit for bit in bids_comps if bit in bids_comps_present]

    ordered_cols = bids_comps_present + ["database", "site", "rater_1"]
    ordered_cols += sorted(list(set(all_cols) - set(ordered_cols)))
    return mdata[ordered_cols]
Пример #3
0
    def _run_interface(self, runtime):
        out_file = self._gen_outfile()

        if isdefined(self.inputs.root):
            self._out_dict = self.inputs.root

        root_adds = []
        for key, val in list(self.inputs._outputs.items()):
            if not isdefined(val) or key == 'trait_added':
                continue

            if not self.expr.match(key) is None:
                root_adds.append(key)
                continue

            key, val = _process_name(key, val)
            self._out_dict[key] = val

        for root_key in root_adds:
            val = self.inputs._outputs.get(root_key, None)
            if isinstance(val, dict):
                self._out_dict.update(val)
            else:
                IFLOGGER.warn(
                    'Output "%s" is not a dictionary (value="%s"), '
                    'discarding output.', root_key, str(val))

        id_dict = {}
        for comp in list(BIDS_COMP.keys()):
            comp_val = getattr(self.inputs, comp, None)
            if isdefined(comp_val) and comp_val is not None:
                id_dict[comp] = comp_val

        if self.inputs.modality == 'bold':
            id_dict['qc_type'] = 'func'
        elif self.inputs.modality == 'T1w':
            id_dict['qc_type'] = 'anat'

        if self._out_dict.get('metadata', None) is None:
            self._out_dict['metadata'] = {}

        self._out_dict['metadata'].update(id_dict)

        with open(out_file, 'w') as f:
            f.write(
                json.dumps(self._out_dict,
                           sort_keys=True,
                           indent=2,
                           ensure_ascii=False))

        return runtime
Пример #4
0
def read_iqms(feat_file):
    """Read in a features table."""
    feat_file = Path(feat_file)

    if feat_file.suffix == ".csv":
        bids_comps = list(BIDS_COMP.keys())
        x_df = pd.read_csv(feat_file,
                           index_col=False,
                           dtype={col: str
                                  for col in bids_comps})
        # Find present bids bits and sort by them
        bids_comps_present = list(
            set(x_df.columns.ravel().tolist()) & set(bids_comps))
        bids_comps_present = [
            bit for bit in bids_comps if bit in bids_comps_present
        ]
        x_df = x_df.sort_values(by=bids_comps_present)
        # Remove sub- prefix in subject_id
        x_df.subject_id = x_df.subject_id.str.lstrip("sub-")

        # Remove columns that are not IQMs
        feat_names = list(x_df._get_numeric_data().columns.ravel())
        for col in bids_comps:
            try:
                feat_names.remove(col)
            except ValueError:
                pass
    else:
        bids_comps_present = ["subject_id"]
        x_df = pd.read_csv(feat_file,
                           index_col=False,
                           sep="\t",
                           dtype={"bids_name": str})
        x_df = x_df.sort_values(by=["bids_name"])
        x_df["subject_id"] = x_df.bids_name.str.lstrip("sub-")
        x_df = x_df.drop(columns=["bids_name"])
        x_df.subject_id = [
            "_".join(v.split("_")[:-1]) for v in x_df.subject_id.ravel()
        ]
        feat_names = list(x_df._get_numeric_data().columns.ravel())

    for col in feat_names:
        if col.startswith(("size_", "spacing_", "Unnamed")):
            feat_names.remove(col)

    return x_df, feat_names, bids_comps_present
Пример #5
0
def read_labels(label_file, rate_label='rate', binarize=True):
    """ Reads in the labels """
    # Massage labels table to have the appropriate format

    bids_comps = list(BIDS_COMP.keys())

    y_df = pd.read_csv(label_file,
                       index_col=False,
                       dtype={col: str
                              for col in bids_comps})

    # Find present bids bits and sort by them
    bids_comps_present = list(
        set(y_df.columns.ravel().tolist()) & set(bids_comps))
    y_df = y_df.sort_values(by=bids_comps_present)
    y_df.subject_id = y_df.subject_id.str.lstrip('sub-')

    # Convert string labels to ints
    try:
        y_df.loc[y_df[rate_label].str.contains('fail', case=False, na=False),
                 rate_label] = -1
        y_df.loc[
            y_df[rate_label].str.contains('exclude', case=False, na=False),
            rate_label] = -1
        y_df.loc[y_df[rate_label].str.contains('maybe', case=False, na=False),
                 rate_label] = 0
        y_df.loc[y_df[rate_label].str.contains('may be', case=False, na=False),
                 rate_label] = 0
        y_df.loc[y_df[rate_label].str.contains('ok', case=False, na=False),
                 rate_label] = 1
        y_df.loc[y_df[rate_label].str.contains('good', case=False, na=False),
                 rate_label] = 1
    except AttributeError:
        pass

    y_df[[rate_label]] = y_df[[rate_label]].apply(pd.to_numeric,
                                                  errors='raise')

    if binarize:
        y_df.loc[y_df[rate_label] >= 0, rate_label] = 0
        y_df.loc[y_df[rate_label] < 0, rate_label] = 1

    return y_df[bids_comps_present + ['site', rate_label]]
Пример #6
0
def get_bids_cols(dataframe):
    """ Returns columns corresponding to BIDS bits """
    bids_comps = list(BIDS_COMP.keys())
    bids_comps_present = list(set(dataframe.columns.ravel().tolist()) & set(bids_comps))
    return [bit for bit in bids_comps if bit in bids_comps_present]
Пример #7
0
def read_dataset(
    feat_file,
    label_file,
    merged_name=None,
    binarize=True,
    site_name=None,
    rate_label="rater_1",
    rate_selection="random",
):
    """ Reads in the features and labels """

    x_df, feat_names, _ = read_iqms(feat_file)
    y_df = read_labels(
        label_file,
        rate_label,
        binarize,
        collapse=True,
        site_name=site_name,
        rate_selection=rate_selection,
    )
    if isinstance(rate_label, (list, tuple)):
        rate_label = rate_label[0]

    # Find present bids bits and sort by them
    bids_comps = list(BIDS_COMP.keys())
    bids_comps_x = list(set(x_df.columns.ravel().tolist()) & set(bids_comps))
    bids_comps_x = [bit for bit in bids_comps if bit in bids_comps_x]
    bids_comps_y = list(set(x_df.columns.ravel().tolist()) & set(bids_comps))
    bids_comps_y = [bit for bit in bids_comps if bit in bids_comps_y]

    if bids_comps_x != bids_comps_y:
        raise RuntimeError("Labels and features cannot be merged")

    x_df["bids_ids"] = x_df.subject_id.values.copy()
    y_df["bids_ids"] = y_df.subject_id.values.copy()

    for comp in bids_comps_x[1:]:
        x_df["bids_ids"] = x_df.bids_ids.str.cat(x_df.loc[:, comp].astype(str), sep="_")
        y_df["bids_ids"] = y_df.bids_ids.str.cat(y_df.loc[:, comp].astype(str), sep="_")

    # Remove failed cases from Y, append new columns to X
    y_df = y_df[y_df["bids_ids"].isin(list(x_df.bids_ids.values.ravel()))]

    # Drop indexing column
    del x_df["bids_ids"]
    del y_df["bids_ids"]

    # Merge Y dataframe into X
    x_df = pd.merge(x_df, y_df, on=bids_comps_x, how="left")

    if merged_name is not None:
        x_df.to_csv(merged_name, index=False)

    # Drop samples with invalid rating
    nan_labels = x_df[x_df[rate_label].isnull()].index.ravel().tolist()
    if nan_labels:
        message = DROPPING_NON_NUMERICAL.format(n_labels=len(nan_labels))
        config.loggers.interface.info(message)
        x_df = x_df.drop(nan_labels)

    # Print out some info
    n_samples = len(x_df)
    ds_created_message = CREATED_DATASET.format(
        feat_file=feat_file, label_file=label_file, n_samples=n_samples
    )
    config.loggers.interface.info(ds_created_message)

    # Inform about ratings distribution
    labels = sorted(set(x_df[rate_label].values.ravel().tolist()))
    ldist = [int(np.sum(x_df[rate_label] == label)) for label in labels]

    config.loggers.interface.info(
        "Ratings distribution: %s (%s, %s)",
        "/".join(["%d" % x for x in ldist]),
        "/".join(["%.2f%%" % (100 * x / n_samples) for x in ldist]),
        "accept/exclude" if len(ldist) == 2 else "exclude/doubtful/accept",
    )

    return x_df, feat_names
Пример #8
0
def read_labels(
    label_file,
    rate_label="rater_1",
    binarize=True,
    site_name=None,
    rate_selection="random",
    collapse=True,
):
    """
    Reads in the labels. Massage labels table to have the
    appropriate format
    """

    if isinstance(rate_label, str):
        rate_label = [rate_label]
    output_labels = rate_label

    bids_comps = list(BIDS_COMP.keys())
    y_df = pd.read_csv(
        label_file, index_col=False, dtype={col: str for col in bids_comps}
    )

    # Find present bids bits and sort by them
    bids_comps_present = get_bids_cols(y_df)
    y_df = y_df.sort_values(by=bids_comps_present)
    y_df.subject_id = y_df.subject_id.str.lstrip("sub-")
    y_df[rate_label] = y_df[rate_label].apply(pd.to_numeric, errors="raise")

    if len(rate_label) == 2:
        np.random.seed(42)
        ratermask_1 = ~np.isnan(y_df[[rate_label[0]]].values.ravel())
        ratermask_2 = ~np.isnan(y_df[[rate_label[1]]].values.ravel())

        all_rated = ratermask_1 & ratermask_2
        mergey = np.array(y_df[[rate_label[0]]].values.ravel().tolist())
        mergey[ratermask_2] = y_df[[rate_label[1]]].values.ravel()[ratermask_2]

        subsmpl = np.random.choice(
            np.where(all_rated)[0], int(0.5 * np.sum(all_rated)), replace=False
        )
        all_rated[subsmpl] = False
        mergey[all_rated] = y_df[[rate_label[0]]].values.ravel()[all_rated]
        y_df["merged_ratings"] = mergey.astype(int)

        # Set default name
        if collapse:
            cols = [
                ("indv_%s" % c) if c.startswith("rater") else c
                for c in y_df.columns.ravel().tolist()
            ]
            cols[y_df.columns.get_loc("merged_ratings")] = rate_label[0]
            y_df.columns = cols
            output_labels = [rate_label[0]]
        else:
            output_labels = rate_label
            output_labels.insert(0, "merged_ratings")

    if binarize:
        mask = y_df[output_labels[0]] >= 0
        y_df.loc[mask, output_labels[0]] = 0
        y_df.loc[~mask, output_labels[0]] = 1

    if "site" in y_df.columns.ravel().tolist():
        output_labels.insert(0, "site")
    elif site_name is not None:
        y_df["site"] = [site_name] * len(y_df)
        output_labels.insert(0, "site")

    return y_df[bids_comps_present + output_labels]
Пример #9
0
def individual_html(in_iqms, in_plots=None):
    import os.path as op  #pylint: disable=W0404
    import datetime
    from json import load
    from mriqc import logging, __version__ as ver
    from mriqc.utils.misc import BIDS_COMP
    from mriqc.reports.utils import iqms2html, read_report_snippet
    from mriqc.data import IndividualTemplate
    from io import open  #pylint: disable=W0622
    report_log = logging.getLogger('mriqc.report')

    def _get_details(in_iqms, modality):
        in_prov = in_iqms.pop('provenance', {})
        warn_dict = in_prov.pop('warnings', None)
        sett_dict = in_prov.pop('settings', None)

        wf_details = []
        if modality == 'bold':
            bold_exclude_index = in_iqms.get('dumb_trs')
            if bold_exclude_index is None:
                report_log.warning('Building bold report: no exclude index was found')
            elif bold_exclude_index > 0:
                msg = """\
<span class="problematic">Non-steady state (strong T1 contrast) has been detected in the \
first {} volumes</span>. They were excluded before generating any QC measures and plots."""
                wf_details.append(msg.format(bold_exclude_index))

            hmc_fsl = sett_dict.pop('hmc_fsl')
            if hmc_fsl is not None:
                msg = 'Framewise Displacement was computed using '
                if hmc_fsl:
                    msg += 'FSL <code>mcflirt</code>'
                else:
                    msg += 'AFNI <code>3dvolreg</code>'
                wf_details.append(msg)

            fd_thres = sett_dict.pop('fd_thres')
            if fd_thres is not None:
                wf_details.append(
                    'Framewise Displacement threshold was defined at %f mm' % fd_thres)
        elif modality in ('T1w', 'T2w'):
            if warn_dict.pop('small_air_mask', False):
                wf_details.append(
                    '<span class="problematic">Detected hat mask was too small</span>')

            if warn_dict.pop('large_rot_frame', False):
                wf_details.append(
                    '<span class="problematic">Detected a zero-filled frame, has the original '
                    'image been rotated?</span>')

        return in_prov, wf_details



    with open(in_iqms) as jsonfile:
        iqms_dict = load(jsonfile)

    # Now, the in_iqms file should be correctly named
    fname = op.splitext(op.basename(in_iqms))[0]
    out_file = op.abspath(fname + '.html')

    if in_plots is None:
        in_plots = []

    # Extract and prune metadata
    metadata = iqms_dict.pop('bids_meta', None)
    mod = metadata.pop('modality', None)
    prov, wf_details = _get_details(iqms_dict, mod)

    file_id = [metadata.pop(k, None)
               for k in list(BIDS_COMP.keys())]
    file_id = [comp for comp in file_id if comp is not None]

    pred_qa = None #metadata.pop('mriqc_pred', None)

    config = {
        'modality': mod,
        'sub_id': '_'.join(file_id),
        'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
        'version': ver,
        'imparams': iqms2html(iqms_dict, 'iqms-table'),
        'svg_files': [read_report_snippet(pfile) for pfile in in_plots],
        'workflow_details': wf_details,
        'provenance': iqms2html(prov, 'provenance-table'),
        'metadata': iqms2html(metadata, 'metadata-table'),
        'pred_qa': pred_qa
    }

    if config['metadata'] is None:
        config['workflow_details'].append(
            '<span class="warning">File has no metadata</span> '
            '<span>(sidecar JSON file missing or empty)</span>')

    tpl = IndividualTemplate()
    tpl.generate_conf(config, out_file)

    report_log.info('Generated individual log (%s)', out_file)
    return out_file
Пример #10
0
def individual_html(in_iqms, in_plots=None):
    from os import path as op
    import datetime
    from json import load
    from mriqc import logging, __version__ as ver
    from mriqc.utils.misc import BIDS_COMP
    from mriqc.reports import REPORT_TITLES
    from mriqc.reports.utils import iqms2html, read_report_snippet
    from mriqc.data import IndividualTemplate
    report_log = logging.getLogger('mriqc.report')

    def _get_details(in_iqms, modality):
        in_prov = in_iqms.pop('provenance', {})
        warn_dict = in_prov.pop('warnings', None)
        sett_dict = in_prov.pop('settings', None)

        wf_details = []
        if modality == 'bold':
            bold_exclude_index = in_iqms.get('dumb_trs')
            if bold_exclude_index is None:
                report_log.warning(
                    'Building bold report: no exclude index was found')
            elif bold_exclude_index > 0:
                msg = """\
<span class="problematic">Non-steady state (strong T1 contrast) has been detected in the \
first {} volumes</span>. They were excluded before generating any QC measures and plots."""
                wf_details.append(msg.format(bold_exclude_index))

            hmc_fsl = sett_dict.pop('hmc_fsl')
            if hmc_fsl is not None:
                msg = 'Framewise Displacement was computed using '
                if hmc_fsl:
                    msg += 'FSL <code>mcflirt</code>'
                else:
                    msg += 'AFNI <code>3dvolreg</code>'
                wf_details.append(msg)

            fd_thres = sett_dict.pop('fd_thres')
            if fd_thres is not None:
                wf_details.append(
                    'Framewise Displacement threshold was defined at %f mm' %
                    fd_thres)
        elif modality in ('T1w', 'T2w'):
            if warn_dict.pop('small_air_mask', False):
                wf_details.append(
                    '<span class="problematic">Detected hat mask was too small</span>'
                )

            if warn_dict.pop('large_rot_frame', False):
                wf_details.append(
                    '<span class="problematic">Detected a zero-filled frame, has the original '
                    'image been rotated?</span>')

        return in_prov, wf_details

    with open(in_iqms) as jsonfile:
        iqms_dict = load(jsonfile)

    # Now, the in_iqms file should be correctly named
    fname = op.splitext(op.basename(in_iqms))[0]
    out_file = op.abspath(fname + '.html')

    # Extract and prune metadata
    metadata = iqms_dict.pop('bids_meta', None)
    mod = metadata.pop('modality', None)
    prov, wf_details = _get_details(iqms_dict, mod)

    file_id = [metadata.pop(k, None) for k in list(BIDS_COMP.keys())]
    file_id = [comp for comp in file_id if comp is not None]

    if in_plots is None:
        in_plots = []
    else:
        if any(('melodic_reportlet' in k for k in in_plots)):
            REPORT_TITLES['bold'].insert(3, 'ICA components')

        in_plots = [(REPORT_TITLES[mod][i], read_report_snippet(v))
                    for i, v in enumerate(in_plots)]

    pred_qa = None  # metadata.pop('mriqc_pred', None)
    config = {
        'modality': mod,
        'sub_id': '_'.join(file_id),
        'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
        'version': ver,
        'imparams': iqms2html(iqms_dict, 'iqms-table'),
        'svg_files': in_plots,
        'workflow_details': wf_details,
        'provenance': iqms2html(prov, 'provenance-table'),
        'metadata': iqms2html(metadata, 'metadata-table'),
        'pred_qa': pred_qa
    }

    if config['metadata'] is None:
        config['workflow_details'].append(
            '<span class="warning">File has no metadata</span> '
            '<span>(sidecar JSON file missing or empty)</span>')

    tpl = IndividualTemplate()
    tpl.generate_conf(config, out_file)

    report_log.info('Generated individual log (%s)', out_file)
    return out_file
Пример #11
0
def individual_html(in_iqms, in_plots=None, api_id=None):
    from pathlib import Path
    import datetime
    from json import load
    from mriqc import logging, __version__ as ver
    from mriqc.utils.misc import BIDS_COMP
    from mriqc.reports import REPORT_TITLES
    from mriqc.reports.utils import iqms2html, read_report_snippet
    from mriqc.data import IndividualTemplate

    report_log = logging.getLogger('mriqc.report')

    def _get_details(in_iqms, modality):
        in_prov = in_iqms.pop('provenance', {})
        warn_dict = in_prov.pop('warnings', None)
        sett_dict = in_prov.pop('settings', None)

        wf_details = []
        if modality == 'bold':
            bold_exclude_index = in_iqms.get('dumb_trs')
            if bold_exclude_index is None:
                report_log.warning('Building bold report: no exclude index was found')
            elif bold_exclude_index > 0:
                msg = """\
<span class="problematic">Non-steady state (strong T1 contrast) has been detected in the \
first {} volumes</span>. They were excluded before generating any QC measures and plots."""
                wf_details.append(msg.format(bold_exclude_index))

            hmc_fsl = sett_dict.pop('hmc_fsl')
            if hmc_fsl is not None:
                msg = 'Framewise Displacement was computed using '
                if hmc_fsl:
                    msg += 'FSL <code>mcflirt</code>'
                else:
                    msg += 'AFNI <code>3dvolreg</code>'
                wf_details.append(msg)

            fd_thres = sett_dict.pop('fd_thres')
            if fd_thres is not None:
                wf_details.append(
                    'Framewise Displacement threshold was defined at %f mm' % fd_thres)
        elif modality in ('T1w', 'T2w'):
            if warn_dict.pop('small_air_mask', False):
                wf_details.append(
                    '<span class="problematic">Detected hat mask was too small</span>')

            if warn_dict.pop('large_rot_frame', False):
                wf_details.append(
                    '<span class="problematic">Detected a zero-filled frame, has the original '
                    'image been rotated?</span>')

        return in_prov, wf_details, sett_dict

    in_iqms = Path(in_iqms)
    with in_iqms.open() as jsonfile:
        iqms_dict = load(jsonfile)

    # Now, the in_iqms file should be correctly named
    out_file = str(Path(in_iqms.with_suffix(".html").name).resolve())

    # Extract and prune metadata
    metadata = iqms_dict.pop('bids_meta', None)
    mod = metadata.pop('modality', None)
    prov, wf_details, _ = _get_details(iqms_dict, mod)

    file_id = [metadata.pop(k, None)
               for k in list(BIDS_COMP.keys())]
    file_id = [comp for comp in file_id if comp is not None]

    if in_plots is None:
        in_plots = []
    else:
        if any(('melodic_reportlet' in k for k in in_plots)):
            REPORT_TITLES['bold'].insert(3, ('ICA components', 'ica-comps'))

        in_plots = [(REPORT_TITLES[mod][i] + (read_report_snippet(v), ))
                    for i, v in enumerate(in_plots)]

    pred_qa = None  # metadata.pop('mriqc_pred', None)
    config = {
        'modality': mod,
        'dataset': metadata.pop('dataset', None),
        'bids_name': in_iqms.with_suffix("").name,
        'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
        'version': ver,
        'imparams': iqms2html(iqms_dict, 'iqms-table'),
        'svg_files': in_plots,
        'workflow_details': wf_details,
        'webapi_url': prov.pop('webapi_url'),
        'webapi_port': prov.pop('webapi_port'),
        'provenance': iqms2html(prov, 'provenance-table'),
        'md5sum': prov['md5sum'],
        'metadata': iqms2html(metadata, 'metadata-table'),
        'pred_qa': pred_qa
    }

    if config['metadata'] is None:
        config['workflow_details'].append(
            '<span class="warning">File has no metadata</span> '
            '<span>(sidecar JSON file missing or empty)</span>')

    tpl = IndividualTemplate()
    tpl.generate_conf(config, out_file)

    report_log.info('Generated individual log (%s)', out_file)
    return out_file
Пример #12
0
    def _run_interface(self, runtime):
        out_file = self._gen_outfile()

        if isdefined(self.inputs.root):
            self._out_dict = self.inputs.root

        root_adds = []
        for key, val in list(self.inputs._outputs.items()):
            if not isdefined(val) or key == 'trait_added':
                continue

            if not self.expr.match(key) is None:
                root_adds.append(key)
                continue

            key, val = _process_name(key, val)
            self._out_dict[key] = val

        for root_key in root_adds:
            val = self.inputs._outputs.get(root_key, None)
            if isinstance(val, dict):
                self._out_dict.update(val)
            else:
                IFLOGGER.warn(
                    'Output "%s" is not a dictionary (value="%s"), '
                    'discarding output.', root_key, str(val))

        id_dict = {}
        for comp in list(BIDS_COMP.keys()):
            comp_val = getattr(self.inputs, comp, None)
            if isdefined(comp_val) and comp_val is not None:
                id_dict[comp] = comp_val

        # Predict QA from IQMs and add to metadata
        if self.inputs.modality == 'T1w':
            from pkg_resources import resource_filename as pkgrf
            import numpy as np
            from mriqc.classifier.cv import CVHelper

            cvhelper = CVHelper(load_clf=pkgrf('mriqc',
                                               'data/rfc-nzs-full-1.0.pklz'),
                                n_jobs=1)

            features = tuple(
                [self._out_dict.get(key, None) for key in cvhelper.ftnames])
            id_dict['mriqc_pred'] = int(
                cvhelper.predict(np.array([features]))[0])

        id_dict['modality'] = self.inputs.modality

        if self.inputs.save_extra:
            from mriqc import __version__ as version
            id_dict['version'] = version
            id_dict['software'] = 'mriqc'

        if self._out_dict.get('metadata', None) is None:
            self._out_dict['metadata'] = {}

        self._out_dict['metadata'].update(id_dict)

        with open(out_file, 'w') as f:
            f.write(
                json.dumps(self._out_dict,
                           sort_keys=True,
                           indent=2,
                           ensure_ascii=False))

        return runtime
Пример #13
0
    def _run_interface(self, runtime):
        out_file = self._gen_outfile()

        if isdefined(self.inputs.root):
            self._out_dict = self.inputs.root

        root_adds = []
        for key, val in list(self.inputs._outputs.items()):
            if not isdefined(val) or key == "trait_added":
                continue

            if not self.expr.match(key) is None:
                root_adds.append(key)
                continue

            key, val = _process_name(key, val)
            self._out_dict[key] = val

        for root_key in root_adds:
            val = self.inputs._outputs.get(root_key, None)
            if isinstance(val, dict):
                self._out_dict.update(val)
            else:
                config.loggers.interface.warning(
                    'Output "%s" is not a dictionary (value="%s"), '
                    "discarding output.",
                    root_key,
                    str(val),
                )

        # Fill in the "bids_meta" key
        id_dict = {}
        for comp in list(BIDS_COMP.keys()):
            comp_val = getattr(self.inputs, comp, None)
            if isdefined(comp_val) and comp_val is not None:
                id_dict[comp] = comp_val
        id_dict["modality"] = self.inputs.modality

        if isdefined(self.inputs.metadata) and self.inputs.metadata:
            id_dict.update(self.inputs.metadata)

        if self._out_dict.get("bids_meta") is None:
            self._out_dict["bids_meta"] = {}
        self._out_dict["bids_meta"].update(id_dict)

        if isdefined(self.inputs.dataset):
            self._out_dict["bids_meta"]["dataset"] = self.inputs.dataset

        # Fill in the "provenance" key
        # Predict QA from IQMs and add to metadata
        prov_dict = {}
        if isdefined(self.inputs.provenance) and self.inputs.provenance:
            prov_dict.update(self.inputs.provenance)

        if self._out_dict.get("provenance") is None:
            self._out_dict["provenance"] = {}
        self._out_dict["provenance"].update(prov_dict)

        with open(out_file, "w") as f:
            f.write(
                json.dumps(
                    self._out_dict,
                    sort_keys=True,
                    indent=2,
                    ensure_ascii=False,
                ))

        return runtime
Пример #14
0
def gen_html(csv_file, mod, csv_failed=None, out_file=None):
    import os.path as op
    from os import remove
    from shutil import copy
    import datetime
    from pkg_resources import resource_filename as pkgrf
    from mriqc import __version__ as ver
    from mriqc.data import GroupTemplate
    from mriqc.utils.misc import check_folder

    if version_info[0] > 2:
        from io import StringIO as TextIO
    else:
        from io import BytesIO as TextIO

    QCGROUPS = {
        'T1w': [(['cjv'], None), (['cnr'], None), (['efc'], None),
                (['fber'], None), (['wm2max'], None),
                (['snr_csf', 'snr_gm', 'snr_wm'], None),
                (['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
                (['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
                (['qi_1', 'qi_2'], None), (['inu_range', 'inu_med'], None),
                (['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
                (['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
                (['tpm_overlap_csf', 'tpm_overlap_gm',
                  'tpm_overlap_wm'], None),
                ([
                    'summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
                    'summary_bg_p05', 'summary_bg_p95'
                ], None),
                ([
                    'summary_csf_mean', 'summary_csf_stdv', 'summary_csf_k',
                    'summary_csf_p05', 'summary_csf_p95'
                ], None),
                ([
                    'summary_gm_mean', 'summary_gm_stdv', 'summary_gm_k',
                    'summary_gm_p05', 'summary_gm_p95'
                ], None),
                ([
                    'summary_wm_mean', 'summary_wm_stdv', 'summary_wm_k',
                    'summary_wm_p05', 'summary_wm_p95'
                ], None)],
        'T2w': [(['cjv'], None), (['cnr'], None), (['efc'], None),
                (['fber'], None), (['wm2max'], None),
                (['snr_csf', 'snr_gm', 'snr_wm'], None),
                (['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
                (['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
                (['qi_1', 'qi_2'], None), (['inu_range', 'inu_med'], None),
                (['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
                (['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
                (['tpm_overlap_csf', 'tpm_overlap_gm',
                  'tpm_overlap_wm'], None),
                ([
                    'summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
                    'summary_bg_p05', 'summary_bg_p95'
                ], None),
                ([
                    'summary_csf_mean', 'summary_csf_stdv', 'summary_csf_k',
                    'summary_csf_p05', 'summary_csf_p95'
                ], None),
                ([
                    'summary_gm_mean', 'summary_gm_stdv', 'summary_gm_k',
                    'summary_gm_p05', 'summary_gm_p95'
                ], None),
                ([
                    'summary_wm_mean', 'summary_wm_stdv', 'summary_wm_k',
                    'summary_wm_p05', 'summary_wm_p95'
                ], None)],
        'bold': [
            (['efc'], None),
            (['fber'], None),
            (['fwhm', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
            (['gsr_%s' % a for a in ['x', 'y']], None),
            (['snr'], None),
            (['dvars_std', 'dvars_vstd'], None),
            (['dvars_nstd'], None),
            (['fd_mean'], 'mm'),
            (['fd_num'], '# timepoints'),
            (['fd_perc'], '% timepoints'),
            (['spikes_num'], '# slices'),
            (['gcor'], None),
            (['tsnr'], None),
            (['aor'], None),
            (['aqi'], None),
            ([
                'summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
                'summary_bg_p05', 'summary_bg_p95'
            ], None),
            ([
                'summary_fg_mean', 'summary_fg_stdv', 'summary_fg_k',
                'summary_fg_p05', 'summary_fg_p95'
            ], None),
        ]
    }

    def_comps = list(BIDS_COMP.keys())
    dataframe = pd.read_csv(csv_file,
                            index_col=False,
                            dtype={comp: object
                                   for comp in def_comps})

    id_labels = list(set(def_comps) & set(dataframe.columns.ravel().tolist()))
    dataframe['label'] = dataframe[id_labels].apply(_format_labels, axis=1)
    nPart = len(dataframe)

    failed = None
    if csv_failed is not None and op.isfile(csv_failed):
        MRIQC_REPORT_LOG.warn('Found failed-workflows table "%s"', csv_failed)
        failed_df = pd.read_csv(csv_failed, index_col=False)
        cols = list(set(id_labels) & set(failed_df.columns.ravel().tolist()))

        try:
            failed_df = failed_df.sort_values(by=cols)
        except AttributeError:
            #pylint: disable=E1101
            failed_df = failed_df.sort(columns=cols)

        failed = failed_df[cols].apply(myfmt, args=(cols, ),
                                       axis=1).ravel().tolist()

    csv_groups = []
    datacols = dataframe.columns.ravel().tolist()
    for group, units in QCGROUPS[mod]:
        dfdict = {'iqm': [], 'value': [], 'label': [], 'units': []}

        for iqm in group:
            if iqm in datacols:
                values = dataframe[[iqm]].values.ravel().tolist()
                if values:
                    dfdict['iqm'] += [iqm] * nPart
                    dfdict['units'] += [units] * nPart
                    dfdict['value'] += values
                    dfdict['label'] += dataframe[['label'
                                                  ]].values.ravel().tolist()

        # Save only if there are values
        if dfdict['value']:
            csv_df = pd.DataFrame(dfdict)
            csv_str = TextIO()
            csv_df[['iqm', 'value', 'label', 'units']].to_csv(csv_str,
                                                              index=False)
            csv_groups.append(csv_str.getvalue())

    if out_file is None:
        out_file = op.abspath('group.html')
    tpl = GroupTemplate()
    tpl.generate_conf(
        {
            'modality':
            mod,
            'timestamp':
            datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
            'version':
            ver,
            'csv_groups':
            csv_groups,
            'failed':
            failed,
            'boxplots_js':
            open(
                pkgrf(
                    'mriqc',
                    op.join('data', 'reports', 'embed_resources',
                            'boxplots.js'))).read(),
            'd3_js':
            open(
                pkgrf(
                    'mriqc',
                    op.join('data', 'reports', 'embed_resources',
                            'd3.min.js'))).read(),
            'boxplots_css':
            open(
                pkgrf(
                    'mriqc',
                    op.join('data', 'reports', 'embed_resources',
                            'boxplots.css'))).read()
        }, out_file)

    return out_file
Пример #15
0
def individual_html(in_iqms, in_plots=None, exclude_index=0, wf_details=None):
    import os.path as op  #pylint: disable=W0404
    import datetime
    from json import load
    from mriqc import __version__ as ver
    from mriqc.utils.misc import BIDS_COMP
    from mriqc.reports.utils import iqms2html, anat_flags, read_report_snippet
    from mriqc.data import IndividualTemplate
    from mriqc import logging
    from io import open  #pylint: disable=W0622
    report_log = logging.getLogger('mriqc.report')
    report_log.setLevel(logging.INFO)

    with open(in_iqms) as jsonfile:
        iqms_dict = load(jsonfile)

    # Now, the in_iqms file should be correctly named
    fname = op.splitext(op.basename(in_iqms))[0]
    out_file = op.abspath(fname + '.html')

    if in_plots is None:
        in_plots = []

    if wf_details is None:
        wf_details = []

    # Extract and prune metadata
    metadata = iqms_dict.pop('metadata', None)
    mod = metadata.pop('modality', None)
    file_id = [metadata.pop(k, None) for k in list(BIDS_COMP.keys())]
    file_id = [comp for comp in file_id if comp is not None]

    pred_qa = None  #metadata.pop('mriqc_pred', None)

    # Deal with special IQMs
    if mod in ('T1w', 'T2w'):
        flags = anat_flags(iqms_dict)
        if flags:
            wf_details.append(flags)
    elif mod == 'bold':
        pass
    else:
        RuntimeError('Unknown modality "%s"' % mod)

    config = {
        'modality': mod,
        'sub_id': '_'.join(file_id),
        'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
        'version': ver,
        'imparams': iqms2html(iqms_dict, 'iqms-table'),
        'svg_files': [read_report_snippet(pfile) for pfile in in_plots],
        'exclude_index': exclude_index,
        'workflow_details': wf_details,
        'metadata': iqms2html(metadata, 'metadata-table'),
        'pred_qa': pred_qa
    }

    if config['metadata'] is None:
        config['workflow_details'].append(
            '<span class="warning">File has no metadata</span> '
            '<span>(sidecar JSON file missing or empty)</span>')

    tpl = IndividualTemplate()
    tpl.generate_conf(config, out_file)

    report_log.info('Generated individual log (%s)', out_file)
    return out_file