コード例 #1
0
ファイル: t2smap.py プロジェクト: schudds/tedana
def t2smap_workflow(data,
                    tes,
                    out_dir='.',
                    mask=None,
                    fittype='loglin',
                    fitmode='all',
                    combmode='t2s',
                    debug=False,
                    quiet=False):
    """
    Estimate T2 and S0, and optimally combine data across TEs.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str`, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be spatially
        aligned with `data`.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method.
        'loglin' means to use the the default linear fit to the log of
        the data.
        'curvefit' means to use a monoexponential fit to the raw data,
        which is slightly slower but may be more accurate.
    fitmode : {'all', 'ts'}, optional
        Monoexponential model fitting scheme.
        'all' means that the model is fit, per voxel, across all timepoints.
        'ts' means that the model is fit, per voxel and per timepoint.
        Default is 'all'.
    combmode : {'t2s', 'paid'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default), 'paid' (Poser).

    Other Parameters
    ----------------
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppress logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files, which are described below:

    ==========================    =================================================
    Filename                      Content
    ==========================    =================================================
    T2starmap.nii.gz              Limited estimated T2* 3D map or 4D timeseries.
                                  Will be a 3D map if ``fitmode`` is 'all' and a
                                  4D timeseries if it is 'ts'.
    S0map.nii.gz                  Limited S0 3D map or 4D timeseries.
    desc-full_T2starmap.nii.gz    Full T2* map/timeseries. The difference between
                                  the limited and full maps is that, for voxels
                                  affected by dropout where only one echo contains
                                  good data, the full map uses the single echo's
                                  value while the limited map has a NaN.
    desc-full_S0map.nii.gz        Full S0 map/timeseries.
    desc-optcom_bold.nii.gz       Optimally combined timeseries.
    ==========================    =================================================
    """
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    if debug and not quiet:
        logging.basicConfig(level=logging.DEBUG)
    elif quiet:
        logging.basicConfig(level=logging.WARNING)
    else:
        logging.basicConfig(level=logging.INFO)

    LGR.info('Using output directory: {}'.format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # coerce data to samples x echos x time array
    if isinstance(data, str):
        data = [data]

    LGR.info('Loading input data: {}'.format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    n_samp, n_echos, n_vols = catd.shape
    LGR.debug('Resulting data shape: {}'.format(catd.shape))

    if mask is None:
        LGR.info('Computing adaptive mask')
    else:
        LGR.info('Using user-defined mask')
    mask, masksum = utils.make_adaptive_mask(catd,
                                             mask=mask,
                                             getsum=True,
                                             threshold=1)

    LGR.info('Computing adaptive T2* map')
    if fitmode == 'all':
        (t2s_limited, s0_limited, t2s_full,
         s0_full) = decay.fit_decay(catd, tes, mask, masksum, fittype)
    else:
        (t2s_limited, s0_limited, t2s_full,
         s0_full) = decay.fit_decay_ts(catd, tes, mask, masksum, fittype)

    # set a hard cap for the T2* map/timeseries
    # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
    cap_t2s = stats.scoreatpercentile(t2s_limited.flatten(),
                                      99.5,
                                      interpolation_method='lower')
    cap_t2s_sec = utils.millisec2sec(cap_t2s * 10.)
    LGR.debug('Setting cap on T2* map at {:.5f}s'.format(cap_t2s_sec))
    t2s_limited[t2s_limited > cap_t2s * 10] = cap_t2s

    LGR.info('Computing optimal combination')
    # optimally combine data
    OCcatd = combine.make_optcom(catd,
                                 tes,
                                 masksum,
                                 t2s=t2s_full,
                                 combmode=combmode)

    # clean up numerical errors
    for arr in (OCcatd, s0_limited, t2s_limited):
        np.nan_to_num(arr, copy=False)

    s0_limited[s0_limited < 0] = 0
    t2s_limited[t2s_limited < 0] = 0

    io.filewrite(utils.millisec2sec(t2s_limited),
                 op.join(out_dir, 'T2starmap.nii.gz'), ref_img)
    io.filewrite(s0_limited, op.join(out_dir, 'S0map.nii.gz'), ref_img)
    io.filewrite(utils.millisec2sec(t2s_full),
                 op.join(out_dir, 'desc-full_T2starmap.nii.gz'), ref_img)
    io.filewrite(s0_full, op.join(out_dir, 'desc-full_S0map.nii.gz'), ref_img)
    io.filewrite(OCcatd, op.join(out_dir, 'desc-optcom_bold.nii.gz'), ref_img)
コード例 #2
0
ファイル: t2smap.py プロジェクト: handwerkerd/tedana
def t2smap_workflow(
    data,
    tes,
    out_dir=".",
    mask=None,
    prefix="",
    convention="bids",
    fittype="loglin",
    fitmode="all",
    combmode="t2s",
    debug=False,
    quiet=False,
):
    """
    Estimate T2 and S0, and optimally combine data across TEs.

    Please remember to cite [1]_.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str`, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be spatially
        aligned with `data`.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method.
        'loglin' means to use the the default linear fit to the log of
        the data.
        'curvefit' means to use a monoexponential fit to the raw data,
        which is slightly slower but may be more accurate.
    fitmode : {'all', 'ts'}, optional
        Monoexponential model fitting scheme.
        'all' means that the model is fit, per voxel, across all timepoints.
        'ts' means that the model is fit, per voxel and per timepoint.
        Default is 'all'.
    combmode : {'t2s', 'paid'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default), 'paid' (Poser).

    Other Parameters
    ----------------
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppress logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files, which are described below:

    ============================= =================================================
    Filename                      Content
    ============================= =================================================
    T2starmap.nii.gz              Estimated T2* 3D map or 4D timeseries.
                                  Will be a 3D map if ``fitmode`` is 'all' and a
                                  4D timeseries if it is 'ts'.
    S0map.nii.gz                  S0 3D map or 4D timeseries.
    desc-limited_T2starmap.nii.gz Limited T2* map/timeseries. The difference between
                                  the limited and full maps is that, for voxels
                                  affected by dropout where only one echo contains
                                  good data, the full map uses the T2* estimate
                                  from the first two echos, while the limited map
                                  will have a NaN.
    desc-limited_S0map.nii.gz     Limited S0 map/timeseries. The difference between
                                  the limited and full maps is that, for voxels
                                  affected by dropout where only one echo contains
                                  good data, the full map uses the S0 estimate
                                  from the first two echos, while the limited map
                                  will have a NaN.
    desc-optcom_bold.nii.gz       Optimally combined timeseries.
    ============================= =================================================

    References
    ----------
    .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L.,
           Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S.,
           Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S.,
           Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M.,
           Whitaker, K., & Handwerker, D. A. (2021).
           TE-dependent analysis of multi-echo fMRI with tedana.
           Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.
    """
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    utils.setup_loggers(quiet=quiet, debug=debug)

    LGR.info("Using output directory: {}".format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # coerce data to samples x echos x time array
    if isinstance(data, str):
        data = [data]

    LGR.info("Loading input data: {}".format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    io_generator = io.OutputGenerator(
        ref_img,
        convention=convention,
        out_dir=out_dir,
        prefix=prefix,
        config="auto",
        make_figures=False,
    )
    n_samp, n_echos, n_vols = catd.shape
    LGR.debug("Resulting data shape: {}".format(catd.shape))

    if mask is None:
        LGR.info("Computing adaptive mask")
    else:
        LGR.info("Using user-defined mask")
    mask, masksum = utils.make_adaptive_mask(catd,
                                             mask=mask,
                                             getsum=True,
                                             threshold=1)

    LGR.info("Computing adaptive T2* map")
    if fitmode == "all":
        (t2s_limited, s0_limited, t2s_full,
         s0_full) = decay.fit_decay(catd, tes, mask, masksum, fittype)
    else:
        (t2s_limited, s0_limited, t2s_full,
         s0_full) = decay.fit_decay_ts(catd, tes, mask, masksum, fittype)

    # set a hard cap for the T2* map/timeseries
    # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
    cap_t2s = stats.scoreatpercentile(t2s_full.flatten(),
                                      99.5,
                                      interpolation_method="lower")
    cap_t2s_sec = utils.millisec2sec(cap_t2s * 10.0)
    LGR.debug("Setting cap on T2* map at {:.5f}s".format(cap_t2s_sec))
    t2s_full[t2s_full > cap_t2s * 10] = cap_t2s

    LGR.info("Computing optimal combination")
    # optimally combine data
    OCcatd = combine.make_optcom(catd,
                                 tes,
                                 masksum,
                                 t2s=t2s_full,
                                 combmode=combmode)

    # clean up numerical errors
    for arr in (OCcatd, s0_full, t2s_full):
        np.nan_to_num(arr, copy=False)

    s0_full[s0_full < 0] = 0
    t2s_full[t2s_full < 0] = 0

    io_generator.save_file(
        utils.millisec2sec(t2s_full),
        "t2star img",
    )
    io_generator.save_file(s0_full, "s0 img")
    io_generator.save_file(
        utils.millisec2sec(t2s_limited),
        "limited t2star img",
    )
    io_generator.save_file(
        s0_limited,
        "limited s0 img",
    )
    io_generator.save_file(OCcatd, "combined img")

    # Write out BIDS-compatible description file
    derivative_metadata = {
        "Name":
        "t2smap Outputs",
        "BIDSVersion":
        "1.5.0",
        "DatasetType":
        "derivative",
        "GeneratedBy": [{
            "Name":
            "tedana",
            "Version":
            __version__,
            "Description":
            ("A pipeline estimating T2* from multi-echo fMRI data and "
             "combining data across echoes."),
            "CodeURL":
            "https://github.com/ME-ICA/tedana",
        }],
    }
    io_generator.save_file(derivative_metadata, "data description json")

    LGR.info("Workflow completed")
    utils.teardown_loggers()
コード例 #3
0
ファイル: t2smap.py プロジェクト: jbteves/tedana
def t2smap_workflow(data,
                    tes,
                    out_dir='.',
                    mask=None,
                    prefix='',
                    convention='bids',
                    fittype='loglin',
                    fitmode='all',
                    combmode='t2s',
                    debug=False,
                    quiet=False):
    """
    Estimate T2 and S0, and optimally combine data across TEs.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str`, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be spatially
        aligned with `data`.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method.
        'loglin' means to use the the default linear fit to the log of
        the data.
        'curvefit' means to use a monoexponential fit to the raw data,
        which is slightly slower but may be more accurate.
    fitmode : {'all', 'ts'}, optional
        Monoexponential model fitting scheme.
        'all' means that the model is fit, per voxel, across all timepoints.
        'ts' means that the model is fit, per voxel and per timepoint.
        Default is 'all'.
    combmode : {'t2s', 'paid'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default), 'paid' (Poser).

    Other Parameters
    ----------------
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppress logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files, which are described below:

    ============================= =================================================
    Filename                      Content
    ============================= =================================================
    T2starmap.nii.gz              Estimated T2* 3D map or 4D timeseries.
                                  Will be a 3D map if ``fitmode`` is 'all' and a
                                  4D timeseries if it is 'ts'.
    S0map.nii.gz                  S0 3D map or 4D timeseries.
    desc-limited_T2starmap.nii.gz Limited T2* map/timeseries. The difference between
                                  the limited and full maps is that, for voxels
                                  affected by dropout where only one echo contains
                                  good data, the full map uses the T2* estimate
                                  from the first two echos, while the limited map
                                  will have a NaN.
    desc-limited_S0map.nii.gz     Limited S0 map/timeseries. The difference between
                                  the limited and full maps is that, for voxels
                                  affected by dropout where only one echo contains
                                  good data, the full map uses the S0 estimate
                                  from the first two echos, while the limited map
                                  will have a NaN.
    desc-optcom_bold.nii.gz       Optimally combined timeseries.
    ============================= =================================================
    """
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    utils.setup_loggers(quiet=quiet, debug=debug)

    LGR.info('Using output directory: {}'.format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # coerce data to samples x echos x time array
    if isinstance(data, str):
        data = [data]

    LGR.info('Loading input data: {}'.format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    io_generator = io.OutputGenerator(
        ref_img,
        convention=convention,
        out_dir=out_dir,
        prefix=prefix,
        config="auto",
        make_figures=False,
    )
    n_samp, n_echos, n_vols = catd.shape
    LGR.debug('Resulting data shape: {}'.format(catd.shape))

    if mask is None:
        LGR.info('Computing adaptive mask')
    else:
        LGR.info('Using user-defined mask')
    mask, masksum = utils.make_adaptive_mask(catd,
                                             mask=mask,
                                             getsum=True,
                                             threshold=1)

    LGR.info('Computing adaptive T2* map')
    if fitmode == 'all':
        (t2s_limited, s0_limited, t2s_full,
         s0_full) = decay.fit_decay(catd, tes, mask, masksum, fittype)
    else:
        (t2s_limited, s0_limited, t2s_full,
         s0_full) = decay.fit_decay_ts(catd, tes, mask, masksum, fittype)

    # set a hard cap for the T2* map/timeseries
    # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
    cap_t2s = stats.scoreatpercentile(t2s_full.flatten(),
                                      99.5,
                                      interpolation_method='lower')
    cap_t2s_sec = utils.millisec2sec(cap_t2s * 10.)
    LGR.debug('Setting cap on T2* map at {:.5f}s'.format(cap_t2s_sec))
    t2s_full[t2s_full > cap_t2s * 10] = cap_t2s

    LGR.info('Computing optimal combination')
    # optimally combine data
    OCcatd = combine.make_optcom(catd,
                                 tes,
                                 masksum,
                                 t2s=t2s_full,
                                 combmode=combmode)

    # clean up numerical errors
    for arr in (OCcatd, s0_full, t2s_full):
        np.nan_to_num(arr, copy=False)

    s0_full[s0_full < 0] = 0
    t2s_full[t2s_full < 0] = 0

    io_generator.save_file(
        utils.millisec2sec(t2s_full),
        't2star img',
    )
    io_generator.save_file(s0_full, 's0 img')
    io_generator.save_file(
        utils.millisec2sec(t2s_limited),
        'limited t2star img',
    )
    io_generator.save_file(
        s0_limited,
        'limited s0 img',
    )
    io_generator.save_file(OCcatd, 'combined img')

    # Write out BIDS-compatible description file
    derivative_metadata = {
        "Name":
        "t2smap Outputs",
        "BIDSVersion":
        "1.5.0",
        "DatasetType":
        "derivative",
        "GeneratedBy": [{
            "Name":
            "tedana",
            "Version":
            __version__,
            "Description":
            ("A pipeline estimating T2* from multi-echo fMRI data and "
             "combining data across echoes."),
            "CodeURL":
            "https://github.com/ME-ICA/tedana"
        }]
    }
    io_generator.save_file(derivative_metadata, 'data description json')

    LGR.info("Workflow completed")
    utils.teardown_loggers()
コード例 #4
0
ファイル: tedana.py プロジェクト: jbteves/tedana
def tedana_workflow(data,
                    tes,
                    out_dir='.',
                    mask=None,
                    convention='bids',
                    prefix='',
                    fittype='loglin',
                    combmode='t2s',
                    tedpca='mdl',
                    fixed_seed=42,
                    maxit=500,
                    maxrestart=10,
                    tedort=False,
                    gscontrol=None,
                    no_reports=False,
                    png_cmap='coolwarm',
                    verbose=False,
                    low_mem=False,
                    debug=False,
                    quiet=False,
                    t2smap=None,
                    mixm=None,
                    ctab=None,
                    manacc=None):
    """
    Run the "canonical" TE-Dependent ANAlysis workflow.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str` or None, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be
        spatially aligned with `data`. If an explicit mask is not provided,
        then Nilearn's compute_epi_mask function will be used to derive a mask
        from the first echo's data.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method. 'loglin' uses the the default linear
        fit to the log of the data. 'curvefit' uses a monoexponential fit to
        the raw data, which is slightly slower but may be more accurate.
        Default is 'loglin'.
    combmode : {'t2s'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default).
    tedpca : {'mdl', 'aic', 'kic', 'kundu', 'kundu-stabilize', float}, optional
        Method with which to select components in TEDPCA.
        If a float is provided, then it is assumed to represent percentage of variance
        explained (0-1) to retain from PCA.
        Default is 'mdl'.
    tedort : :obj:`bool`, optional
        Orthogonalize rejected components w.r.t. accepted ones prior to
        denoising. Default is False.
    gscontrol : {None, 'mir', 'gsr'} or :obj:`list`, optional
        Perform additional denoising to remove spatially diffuse noise. Default
        is None.
    verbose : :obj:`bool`, optional
        Generate intermediate and additional files. Default is False.
    no_reports : obj:'bool', optional
        Do not generate .html reports and .png plots. Default is false such
        that reports are generated.
    png_cmap : obj:'str', optional
        Name of a matplotlib colormap to be used when generating figures.
        Cannot be used with --no-png. Default is 'coolwarm'.
    t2smap : :obj:`str`, optional
        Precalculated T2* map in the same space as the input data. Values in
        the map must be in seconds.
    mixm : :obj:`str` or None, optional
        File containing mixing matrix, to be used when re-running the workflow.
        If not provided, ME-PCA and ME-ICA are done. Default is None.
    ctab : :obj:`str` or None, optional
        File containing component table from which to extract pre-computed
        classifications, to be used with 'mixm' when re-running the workflow.
        Default is None.
    manacc : :obj:`list` of :obj:`int` or None, optional
        List of manually accepted components. Can be a list of the components
        numbers or None.
        If provided, this parameter requires ``mixm`` and ``ctab`` to be provided as well.
        Default is None.

    Other Parameters
    ----------------
    fixed_seed : :obj:`int`, optional
        Value passed to ``mdp.numx_rand.seed()``.
        Set to a positive integer value for reproducible ICA results;
        otherwise, set to -1 for varying results across calls.
    maxit : :obj:`int`, optional
        Maximum number of iterations for ICA. Default is 500.
    maxrestart : :obj:`int`, optional
        Maximum number of attempts for ICA. If ICA fails to converge, the
        fixed seed will be updated and ICA will be run again. If convergence
        is achieved before maxrestart attempts, ICA will finish early.
        Default is 10.
    low_mem : :obj:`bool`, optional
        Enables low-memory processing, including the use of IncrementalPCA.
        May increase workflow duration. Default is False.
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppresses logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files. For a complete list of the files
    generated by this workflow, please visit
    https://tedana.readthedocs.io/en/latest/outputs.html
    """
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    # boilerplate
    basename = 'report'
    extension = 'txt'
    repname = op.join(out_dir, (basename + '.' + extension))
    repex = op.join(out_dir, (basename + '*'))
    previousreps = glob(repex)
    previousreps.sort(reverse=True)
    for f in previousreps:
        previousparts = op.splitext(f)
        newname = previousparts[0] + '_old' + previousparts[1]
        os.rename(f, newname)
    refname = op.join(out_dir, '_references.txt')

    # create logfile name
    basename = 'tedana_'
    extension = 'tsv'
    start_time = datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')
    logname = op.join(out_dir, (basename + start_time + '.' + extension))
    utils.setup_loggers(logname, repname, refname, quiet=quiet, debug=debug)

    LGR.info('Using output directory: {}'.format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # Coerce gscontrol to list
    if not isinstance(gscontrol, list):
        gscontrol = [gscontrol]

    # Check value of tedpca *if* it is a float
    tedpca = check_tedpca_value(tedpca, is_parser=False)

    LGR.info('Loading input data: {}'.format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    io_generator = io.OutputGenerator(
        ref_img,
        convention=convention,
        out_dir=out_dir,
        prefix=prefix,
        config="auto",
        verbose=verbose,
    )

    n_samp, n_echos, n_vols = catd.shape
    LGR.debug('Resulting data shape: {}'.format(catd.shape))

    # check if TR is 0
    img_t_r = io_generator.reference_img.header.get_zooms()[-1]
    if img_t_r == 0:
        raise IOError(
            'Dataset has a TR of 0. This indicates incorrect'
            ' header information. To correct this, we recommend'
            ' using this snippet:'
            '\n'
            'https://gist.github.com/jbteves/032c87aeb080dd8de8861cb151bff5d6'
            '\n'
            'to correct your TR to the value it should be.')

    if mixm is not None and op.isfile(mixm):
        mixm = op.abspath(mixm)
        # Allow users to re-run on same folder
        mixing_name = io_generator.get_name("ICA mixing tsv")
        if mixm != mixing_name:
            shutil.copyfile(mixm, mixing_name)
            shutil.copyfile(mixm,
                            op.join(io_generator.out_dir, op.basename(mixm)))
    elif mixm is not None:
        raise IOError('Argument "mixm" must be an existing file.')

    if ctab is not None and op.isfile(ctab):
        ctab = op.abspath(ctab)
        # Allow users to re-run on same folder
        metrics_name = io_generator.get_name("ICA metrics tsv")
        if ctab != metrics_name:
            shutil.copyfile(ctab, metrics_name)
            shutil.copyfile(ctab,
                            op.join(io_generator.out_dir, op.basename(ctab)))
    elif ctab is not None:
        raise IOError('Argument "ctab" must be an existing file.')

    if ctab and not mixm:
        LGR.warning('Argument "ctab" requires argument "mixm".')
        ctab = None
    elif manacc is not None and (not mixm or not ctab):
        LGR.warning('Argument "manacc" requires arguments "mixm" and "ctab".')
        manacc = None
    elif manacc is not None:
        # coerce to list of integers
        manacc = [int(m) for m in manacc]

    if t2smap is not None and op.isfile(t2smap):
        t2smap_file = io_generator.get_name('t2star img')
        t2smap = op.abspath(t2smap)
        # Allow users to re-run on same folder
        if t2smap != t2smap_file:
            shutil.copyfile(t2smap, t2smap_file)
    elif t2smap is not None:
        raise IOError('Argument "t2smap" must be an existing file.')

    RepLGR.info("TE-dependence analysis was performed on input data.")
    if mask and not t2smap:
        # TODO: add affine check
        LGR.info('Using user-defined mask')
        RepLGR.info("A user-defined mask was applied to the data.")
    elif t2smap and not mask:
        LGR.info('Using user-defined T2* map to generate mask')
        t2s_limited_sec = utils.load_image(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = (t2s_limited != 0).astype(int)
    elif t2smap and mask:
        LGR.info('Combining user-defined mask and T2* map to generate mask')
        t2s_limited_sec = utils.load_image(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = utils.load_image(mask)
        mask[t2s_limited == 0] = 0  # reduce mask based on T2* map
    else:
        LGR.info('Computing EPI mask from first echo')
        first_echo_img = io.new_nii_like(io_generator.reference_img,
                                         catd[:, 0, :])
        mask = compute_epi_mask(first_echo_img)
        RepLGR.info("An initial mask was generated from the first echo using "
                    "nilearn's compute_epi_mask function.")

    # Create an adaptive mask with at least 1 good echo, for denoising
    mask_denoise, masksum_denoise = utils.make_adaptive_mask(
        catd,
        mask=mask,
        getsum=True,
        threshold=1,
    )
    LGR.debug('Retaining {}/{} samples for denoising'.format(
        mask_denoise.sum(), n_samp))
    io_generator.save_file(masksum_denoise, "adaptive mask img")

    # Create an adaptive mask with at least 3 good echoes, for classification
    masksum_clf = masksum_denoise.copy()
    masksum_clf[masksum_clf < 3] = 0
    mask_clf = masksum_clf.astype(bool)
    RepLGR.info(
        "A two-stage masking procedure was applied, in which a liberal mask "
        "(including voxels with good data in at least the first echo) was used for "
        "optimal combination, T2*/S0 estimation, and denoising, while a more conservative mask "
        "(restricted to voxels with good data in at least the first three echoes) was used for "
        "the component classification procedure.")
    LGR.debug('Retaining {}/{} samples for classification'.format(
        mask_clf.sum(), n_samp))

    if t2smap is None:
        LGR.info('Computing T2* map')
        t2s_limited, s0_limited, t2s_full, s0_full = decay.fit_decay(
            catd, tes, mask_denoise, masksum_denoise, fittype)

        # set a hard cap for the T2* map
        # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
        cap_t2s = stats.scoreatpercentile(t2s_full.flatten(),
                                          99.5,
                                          interpolation_method='lower')
        LGR.debug('Setting cap on T2* map at {:.5f}s'.format(
            utils.millisec2sec(cap_t2s)))
        t2s_full[t2s_full > cap_t2s * 10] = cap_t2s
        io_generator.save_file(utils.millisec2sec(t2s_full), 't2star img')
        io_generator.save_file(s0_full, 's0 img')

        if verbose:
            io_generator.save_file(utils.millisec2sec(t2s_limited),
                                   'limited t2star img')
            io_generator.save_file(s0_limited, 'limited s0 img')

    # optimally combine data
    data_oc = combine.make_optcom(catd,
                                  tes,
                                  masksum_denoise,
                                  t2s=t2s_full,
                                  combmode=combmode)

    # regress out global signal unless explicitly not desired
    if 'gsr' in gscontrol:
        catd, data_oc = gsc.gscontrol_raw(catd, data_oc, n_echos, io_generator)

    fout = io_generator.save_file(data_oc, 'combined img')
    LGR.info('Writing optimally combined data set: {}'.format(fout))

    if mixm is None:
        # Identify and remove thermal noise from data
        dd, n_components = decomposition.tedpca(catd,
                                                data_oc,
                                                combmode,
                                                mask_clf,
                                                masksum_clf,
                                                t2s_full,
                                                io_generator,
                                                tes=tes,
                                                algorithm=tedpca,
                                                kdaw=10.,
                                                rdaw=1.,
                                                verbose=verbose,
                                                low_mem=low_mem)
        if verbose:
            io_generator.save_file(utils.unmask(dd, mask_clf), 'whitened img')

        # Perform ICA, calculate metrics, and apply decision tree
        # Restart when ICA fails to converge or too few BOLD components found
        keep_restarting = True
        n_restarts = 0
        seed = fixed_seed
        while keep_restarting:
            mmix, seed = decomposition.tedica(dd,
                                              n_components,
                                              seed,
                                              maxit,
                                              maxrestart=(maxrestart -
                                                          n_restarts))
            seed += 1
            n_restarts = seed - fixed_seed

            # Estimate betas and compute selection metrics for mixing matrix
            # generated from dimensionally reduced data using full data (i.e., data
            # with thermal noise)
            LGR.info(
                'Making second component selection guess from ICA results')
            required_metrics = [
                'kappa', 'rho', 'countnoise', 'countsigFT2', 'countsigFS0',
                'dice_FT2', 'dice_FS0', 'signal-noise_t', 'variance explained',
                'normalized variance explained', 'd_table_score'
            ]
            comptable = metrics.collect.generate_metrics(
                catd,
                data_oc,
                mmix,
                masksum_clf,
                tes,
                io_generator,
                'ICA',
                metrics=required_metrics,
            )
            comptable, metric_metadata = selection.kundu_selection_v2(
                comptable, n_echos, n_vols)

            n_bold_comps = comptable[comptable.classification ==
                                     'accepted'].shape[0]
            if (n_restarts < maxrestart) and (n_bold_comps == 0):
                LGR.warning("No BOLD components found. Re-attempting ICA.")
            elif (n_bold_comps == 0):
                LGR.warning(
                    "No BOLD components found, but maximum number of restarts reached."
                )
                keep_restarting = False
            else:
                keep_restarting = False

            RepLGR.disabled = True  # Disable the report to avoid duplicate text
        RepLGR.disabled = False  # Re-enable the report after the while loop is escaped
    else:
        LGR.info('Using supplied mixing matrix from ICA')
        mixing_file = io_generator.get_name("ICA mixing tsv")
        mmix = pd.read_table(mixing_file).values

        if ctab is None:
            required_metrics = [
                'kappa', 'rho', 'countnoise', 'countsigFT2', 'countsigFS0',
                'dice_FT2', 'dice_FS0', 'signal-noise_t', 'variance explained',
                'normalized variance explained', 'd_table_score'
            ]
            comptable = metrics.collect.generate_metrics(
                catd,
                data_oc,
                mmix,
                masksum_clf,
                tes,
                io_generator,
                'ICA',
                metrics=required_metrics,
            )
            comptable, metric_metadata = selection.kundu_selection_v2(
                comptable, n_echos, n_vols)
        else:
            comptable = pd.read_table(ctab)

            if manacc is not None:
                comptable, metric_metadata = selection.manual_selection(
                    comptable, acc=manacc)

    # Write out ICA files.
    comp_names = comptable["Component"].values
    mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
    io_generator.save_file(mixing_df, "ICA mixing tsv")
    betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask_denoise),
                            mask_denoise)
    io_generator.save_file(betas_oc, 'z-scored ICA components img')

    # Save component table and associated json
    io_generator.save_file(comptable, "ICA metrics tsv")
    metric_metadata = metrics.collect.get_metadata(comptable)
    io_generator.save_file(metric_metadata, "ICA metrics json")

    decomp_metadata = {
        "Method": ("Independent components analysis with FastICA "
                   "algorithm implemented by sklearn. "),
    }
    for comp_name in comp_names:
        decomp_metadata[comp_name] = {
            "Description":
            "ICA fit to dimensionally-reduced optimally combined data.",
            "Method": "tedana",
        }
    with open(io_generator.get_name("ICA decomposition json"), "w") as fo:
        json.dump(decomp_metadata, fo, sort_keys=True, indent=4)

    if comptable[comptable.classification == 'accepted'].shape[0] == 0:
        LGR.warning('No BOLD components detected! Please check data and '
                    'results!')

    mmix_orig = mmix.copy()
    if tedort:
        acc_idx = comptable.loc[~comptable.classification.str.
                                contains('rejected')].index.values
        rej_idx = comptable.loc[comptable.classification.str.contains(
            'rejected')].index.values
        acc_ts = mmix[:, acc_idx]
        rej_ts = mmix[:, rej_idx]
        betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0]
        pred_rej_ts = np.dot(acc_ts, betas)
        resid = rej_ts - pred_rej_ts
        mmix[:, rej_idx] = resid
        comp_names = [
            io.add_decomp_prefix(comp,
                                 prefix='ica',
                                 max_value=comptable.index.max())
            for comp in comptable.index.values
        ]
        mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
        io_generator.save_file(mixing_df, "ICA orthogonalized mixing tsv")
        RepLGR.info("Rejected components' time series were then "
                    "orthogonalized with respect to accepted components' time "
                    "series.")

    io.writeresults(data_oc,
                    mask=mask_denoise,
                    comptable=comptable,
                    mmix=mmix,
                    n_vols=n_vols,
                    io_generator=io_generator)

    if 'mir' in gscontrol:
        gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable,
                                     io_generator)

    if verbose:
        io.writeresults_echoes(catd, mmix, mask_denoise, comptable,
                               io_generator)

    # Write out BIDS-compatible description file
    derivative_metadata = {
        "Name":
        "tedana Outputs",
        "BIDSVersion":
        "1.5.0",
        "DatasetType":
        "derivative",
        "GeneratedBy": [{
            "Name":
            "tedana",
            "Version":
            __version__,
            "Description":
            ("A denoising pipeline for the identification and removal "
             "of non-BOLD noise from multi-echo fMRI data."),
            "CodeURL":
            "https://github.com/ME-ICA/tedana"
        }]
    }
    with open(io_generator.get_name("data description json"), "w") as fo:
        json.dump(derivative_metadata, fo, sort_keys=True, indent=4)

    RepLGR.info("This workflow used numpy (Van Der Walt, Colbert, & "
                "Varoquaux, 2011), scipy (Jones et al., 2001), pandas "
                "(McKinney, 2010), scikit-learn (Pedregosa et al., 2011), "
                "nilearn, and nibabel (Brett et al., 2019).")
    RefLGR.info(
        "Van Der Walt, S., Colbert, S. C., & Varoquaux, G. (2011). The "
        "NumPy array: a structure for efficient numerical computation. "
        "Computing in Science & Engineering, 13(2), 22.")
    RefLGR.info("Jones E, Oliphant E, Peterson P, et al. SciPy: Open Source "
                "Scientific Tools for Python, 2001-, http://www.scipy.org/")
    RefLGR.info("McKinney, W. (2010, June). Data structures for statistical "
                "computing in python. In Proceedings of the 9th Python in "
                "Science Conference (Vol. 445, pp. 51-56).")
    RefLGR.info("Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., "
                "Thirion, B., Grisel, O., ... & Vanderplas, J. (2011). "
                "Scikit-learn: Machine learning in Python. Journal of machine "
                "learning research, 12(Oct), 2825-2830.")
    RefLGR.info("Brett, M., Markiewicz, C. J., Hanke, M., Côté, M.-A., "
                "Cipollini, B., McCarthy, P., … freec84. (2019, May 28). "
                "nipy/nibabel. Zenodo. http://doi.org/10.5281/zenodo.3233118")

    RepLGR.info("This workflow also used the Dice similarity index "
                "(Dice, 1945; Sørensen, 1948).")
    RefLGR.info("Dice, L. R. (1945). Measures of the amount of ecologic "
                "association between species. Ecology, 26(3), 297-302.")
    RefLGR.info(
        "Sørensen, T. J. (1948). A method of establishing groups of "
        "equal amplitude in plant sociology based on similarity of "
        "species content and its application to analyses of the "
        "vegetation on Danish commons. I kommission hos E. Munksgaard.")

    with open(repname, 'r') as fo:
        report = [line.rstrip() for line in fo.readlines()]
        report = ' '.join(report)
    with open(refname, 'r') as fo:
        reference_list = sorted(list(set(fo.readlines())))
        references = '\n'.join(reference_list)
    report += '\n\nReferences:\n\n' + references
    with open(repname, 'w') as fo:
        fo.write(report)

    if not no_reports:
        LGR.info(
            'Making figures folder with static component maps and timecourse plots.'
        )

        dn_ts, hikts, lowkts = io.denoise_ts(data_oc, mmix, mask_denoise,
                                             comptable)

        reporting.static_figures.carpet_plot(
            optcom_ts=data_oc,
            denoised_ts=dn_ts,
            hikts=hikts,
            lowkts=lowkts,
            mask=mask_denoise,
            io_generator=io_generator,
            gscontrol=gscontrol,
        )
        reporting.static_figures.comp_figures(
            data_oc,
            mask=mask_denoise,
            comptable=comptable,
            mmix=mmix_orig,
            io_generator=io_generator,
            png_cmap=png_cmap,
        )

        if sys.version_info.major == 3 and sys.version_info.minor < 6:
            warn_msg = ("Reports requested but Python version is less than "
                        "3.6.0. Dynamic reports will not be generated.")
            LGR.warn(warn_msg)
        else:
            LGR.info('Generating dynamic report')
            reporting.generate_report(io_generator, tr=img_t_r)

    LGR.info('Workflow completed')
    utils.teardown_loggers()
    os.remove(refname)
コード例 #5
0
ファイル: tedana.py プロジェクト: yulanl22/tedana
def tedana_workflow(data,
                    tes,
                    out_dir='.',
                    mask=None,
                    fittype='loglin',
                    combmode='t2s',
                    tedpca='mdl',
                    fixed_seed=42,
                    maxit=500,
                    maxrestart=10,
                    tedort=False,
                    gscontrol=None,
                    no_png=False,
                    png_cmap='coolwarm',
                    verbose=False,
                    low_mem=False,
                    debug=False,
                    quiet=False,
                    t2smap=None,
                    mixm=None,
                    ctab=None,
                    manacc=None):
    """
    Run the "canonical" TE-Dependent ANAlysis workflow.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    out_dir : :obj:`str`, optional
        Output directory.
    mask : :obj:`str` or None, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be
        spatially aligned with `data`. If an explicit mask is not provided,
        then Nilearn's compute_epi_mask function will be used to derive a mask
        from the first echo's data.
    fittype : {'loglin', 'curvefit'}, optional
        Monoexponential fitting method. 'loglin' uses the the default linear
        fit to the log of the data. 'curvefit' uses a monoexponential fit to
        the raw data, which is slightly slower but may be more accurate.
        Default is 'loglin'.
    combmode : {'t2s'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default).
    tedpca : {'kundu', 'kundu-stabilize', 'mdl', 'aic', 'kic'}, optional
        Method with which to select components in TEDPCA. Default is 'mdl'.
    tedort : :obj:`bool`, optional
        Orthogonalize rejected components w.r.t. accepted ones prior to
        denoising. Default is False.
    gscontrol : {None, 't1c', 'gsr'} or :obj:`list`, optional
        Perform additional denoising to remove spatially diffuse noise. Default
        is None.
    verbose : :obj:`bool`, optional
        Generate intermediate and additional files. Default is False.
    no_png : obj:'bool', optional
        Do not generate .png plots and figures. Default is false.
    png_cmap : obj:'str', optional
        Name of a matplotlib colormap to be used when generating figures.
        Cannot be used with --no-png. Default is 'coolwarm'.
    t2smap : :obj:`str`, optional
        Precalculated T2* map in the same space as the input data. Values in
        the map must be in seconds.
    mixm : :obj:`str` or None, optional
        File containing mixing matrix, to be used when re-running the workflow.
        If not provided, ME-PCA and ME-ICA are done. Default is None.
    ctab : :obj:`str` or None, optional
        File containing component table from which to extract pre-computed
        classifications, to be used with 'mixm' when re-running the workflow.
        Default is None.
    manacc : :obj:`list`, :obj:`str`, or None, optional
        List of manually accepted components. Can be a list of the components,
        a comma-separated string with component numbers, or None. Default is
        None.

    Other Parameters
    ----------------
    fixed_seed : :obj:`int`, optional
        Value passed to ``mdp.numx_rand.seed()``.
        Set to a positive integer value for reproducible ICA results;
        otherwise, set to -1 for varying results across calls.
    maxit : :obj:`int`, optional
        Maximum number of iterations for ICA. Default is 500.
    maxrestart : :obj:`int`, optional
        Maximum number of attempts for ICA. If ICA fails to converge, the
        fixed seed will be updated and ICA will be run again. If convergence
        is achieved before maxrestart attempts, ICA will finish early.
        Default is 10.
    low_mem : :obj:`bool`, optional
        Enables low-memory processing, including the use of IncrementalPCA.
        May increase workflow duration. Default is False.
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppresses logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files. For a complete list of the files
    generated by this workflow, please visit
    https://tedana.readthedocs.io/en/latest/outputs.html
    """
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    # boilerplate
    basename = 'report'
    extension = 'txt'
    repname = op.join(out_dir, (basename + '.' + extension))
    repex = op.join(out_dir, (basename + '*'))
    previousreps = glob(repex)
    previousreps.sort(reverse=True)
    for f in previousreps:
        previousparts = op.splitext(f)
        newname = previousparts[0] + '_old' + previousparts[1]
        os.rename(f, newname)
    refname = op.join(out_dir, '_references.txt')

    # create logfile name
    basename = 'tedana_'
    extension = 'tsv'
    start_time = datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')
    logname = op.join(out_dir, (basename + start_time + '.' + extension))

    # set logging format
    log_formatter = logging.Formatter(
        '%(asctime)s\t%(name)-12s\t%(levelname)-8s\t%(message)s',
        datefmt='%Y-%m-%dT%H:%M:%S')
    text_formatter = logging.Formatter('%(message)s')

    # set up logging file and open it for writing
    log_handler = logging.FileHandler(logname)
    log_handler.setFormatter(log_formatter)
    # Removing handlers after basicConfig doesn't work, so we use filters
    # for the relevant handlers themselves.
    log_handler.addFilter(ContextFilter())
    sh = logging.StreamHandler()
    sh.addFilter(ContextFilter())

    if quiet:
        logging.basicConfig(level=logging.WARNING, handlers=[log_handler, sh])
    elif debug:
        logging.basicConfig(level=logging.DEBUG, handlers=[log_handler, sh])
    else:
        logging.basicConfig(level=logging.INFO, handlers=[log_handler, sh])

    # Loggers for report and references
    rep_handler = logging.FileHandler(repname)
    rep_handler.setFormatter(text_formatter)
    ref_handler = logging.FileHandler(refname)
    ref_handler.setFormatter(text_formatter)
    RepLGR.setLevel(logging.INFO)
    RepLGR.addHandler(rep_handler)
    RepLGR.setLevel(logging.INFO)
    RefLGR.addHandler(ref_handler)

    LGR.info('Using output directory: {}'.format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # Coerce gscontrol to list
    if not isinstance(gscontrol, list):
        gscontrol = [gscontrol]

    LGR.info('Loading input data: {}'.format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    n_samp, n_echos, n_vols = catd.shape
    LGR.debug('Resulting data shape: {}'.format(catd.shape))

    if no_png and (png_cmap != 'coolwarm'):
        LGR.warning('Overriding --no-png since --png-cmap provided.')
        no_png = False

    # check if TR is 0
    img_t_r = ref_img.header.get_zooms()[-1]
    if img_t_r == 0 and not no_png:
        raise IOError(
            'Dataset has a TR of 0. This indicates incorrect'
            ' header information. To correct this, we recommend'
            ' using this snippet:'
            '\n'
            'https://gist.github.com/jbteves/032c87aeb080dd8de8861cb151bff5d6'
            '\n'
            'to correct your TR to the value it should be.')

    if mixm is not None and op.isfile(mixm):
        mixm = op.abspath(mixm)
        # Allow users to re-run on same folder
        if mixm != op.join(out_dir, 'ica_mixing.tsv'):
            shutil.copyfile(mixm, op.join(out_dir, 'ica_mixing.tsv'))
            shutil.copyfile(mixm, op.join(out_dir, op.basename(mixm)))
    elif mixm is not None:
        raise IOError('Argument "mixm" must be an existing file.')

    if ctab is not None and op.isfile(ctab):
        ctab = op.abspath(ctab)
        # Allow users to re-run on same folder
        if ctab != op.join(out_dir, 'ica_decomposition.json'):
            shutil.copyfile(ctab, op.join(out_dir, 'ica_decomposition.json'))
            shutil.copyfile(ctab, op.join(out_dir, op.basename(ctab)))
    elif ctab is not None:
        raise IOError('Argument "ctab" must be an existing file.')

    if isinstance(manacc, str):
        manacc = [int(comp) for comp in manacc.split(',')]

    if ctab and not mixm:
        LGR.warning('Argument "ctab" requires argument "mixm".')
        ctab = None
    elif manacc is not None and not mixm:
        LGR.warning('Argument "manacc" requires argument "mixm".')
        manacc = None

    if t2smap is not None and op.isfile(t2smap):
        t2smap = op.abspath(t2smap)
        # Allow users to re-run on same folder
        if t2smap != op.join(out_dir, 't2sv.nii.gz'):
            shutil.copyfile(t2smap, op.join(out_dir, 't2sv.nii.gz'))
            shutil.copyfile(t2smap, op.join(out_dir, op.basename(t2smap)))
    elif t2smap is not None:
        raise IOError('Argument "t2smap" must be an existing file.')

    RepLGR.info("TE-dependence analysis was performed on input data.")
    if mask and not t2smap:
        # TODO: add affine check
        LGR.info('Using user-defined mask')
        RepLGR.info("A user-defined mask was applied to the data.")
    elif t2smap and not mask:
        LGR.info('Using user-defined T2* map to generate mask')
        t2s_limited_sec = utils.load_image(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = (t2s_limited != 0).astype(int)
    elif t2smap and mask:
        LGR.info('Combining user-defined mask and T2* map to generate mask')
        t2s_limited_sec = utils.load_image(t2smap)
        t2s_limited = utils.sec2millisec(t2s_limited_sec)
        t2s_full = t2s_limited.copy()
        mask = utils.load_image(mask)
        mask[t2s_limited == 0] = 0  # reduce mask based on T2* map
    else:
        LGR.info('Computing EPI mask from first echo')
        first_echo_img = io.new_nii_like(ref_img, catd[:, 0, :])
        mask = compute_epi_mask(first_echo_img)
        RepLGR.info("An initial mask was generated from the first echo using "
                    "nilearn's compute_epi_mask function.")

    mask, masksum = utils.make_adaptive_mask(catd, mask=mask, getsum=True)
    LGR.debug('Retaining {}/{} samples'.format(mask.sum(), n_samp))
    io.filewrite(masksum, op.join(out_dir, 'adaptive_mask.nii'), ref_img)

    if t2smap is None:
        LGR.info('Computing T2* map')
        t2s_limited, s0_limited, t2s_full, s0_full = decay.fit_decay(
            catd, tes, mask, masksum, fittype)

        # set a hard cap for the T2* map
        # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
        cap_t2s = stats.scoreatpercentile(t2s_limited.flatten(),
                                          99.5,
                                          interpolation_method='lower')
        LGR.debug('Setting cap on T2* map at {:.5f}s'.format(
            utils.millisec2sec(cap_t2s)))
        t2s_limited[t2s_limited > cap_t2s * 10] = cap_t2s
        io.filewrite(utils.millisec2sec(t2s_limited),
                     op.join(out_dir, 't2sv.nii'), ref_img)
        io.filewrite(s0_limited, op.join(out_dir, 's0v.nii'), ref_img)

        if verbose:
            io.filewrite(utils.millisec2sec(t2s_full),
                         op.join(out_dir, 't2svG.nii'), ref_img)
            io.filewrite(s0_full, op.join(out_dir, 's0vG.nii'), ref_img)

    # optimally combine data
    data_oc = combine.make_optcom(catd,
                                  tes,
                                  masksum,
                                  t2s=t2s_full,
                                  combmode=combmode)

    # regress out global signal unless explicitly not desired
    if 'gsr' in gscontrol:
        catd, data_oc = gsc.gscontrol_raw(catd,
                                          data_oc,
                                          n_echos,
                                          ref_img,
                                          out_dir=out_dir)

    if mixm is None:
        # Identify and remove thermal noise from data
        dd, n_components = decomposition.tedpca(catd,
                                                data_oc,
                                                combmode,
                                                mask,
                                                masksum,
                                                t2s_full,
                                                ref_img,
                                                tes=tes,
                                                algorithm=tedpca,
                                                kdaw=10.,
                                                rdaw=1.,
                                                out_dir=out_dir,
                                                verbose=verbose,
                                                low_mem=low_mem)
        mmix_orig = decomposition.tedica(dd, n_components, fixed_seed, maxit,
                                         maxrestart)

        if verbose:
            io.filewrite(utils.unmask(dd, mask),
                         op.join(out_dir, 'ts_OC_whitened.nii.gz'), ref_img)

        LGR.info('Making second component selection guess from ICA results')
        # Estimate betas and compute selection metrics for mixing matrix
        # generated from dimensionally reduced data using full data (i.e., data
        # with thermal noise)
        comptable, metric_maps, betas, mmix = metrics.dependence_metrics(
            catd,
            data_oc,
            mmix_orig,
            masksum,
            tes,
            ref_img,
            reindex=True,
            label='meica_',
            out_dir=out_dir,
            algorithm='kundu_v2',
            verbose=verbose)
        comp_names = [
            io.add_decomp_prefix(comp,
                                 prefix='ica',
                                 max_value=comptable.index.max())
            for comp in comptable.index.values
        ]
        mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
        mixing_df.to_csv(op.join(out_dir, 'ica_mixing.tsv'),
                         sep='\t',
                         index=False)
        betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask), mask)
        io.filewrite(betas_oc, op.join(out_dir, 'ica_components.nii.gz'),
                     ref_img)

        comptable = metrics.kundu_metrics(comptable, metric_maps)
        comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols)
    else:
        LGR.info('Using supplied mixing matrix from ICA')
        mmix_orig = pd.read_table(op.join(out_dir, 'ica_mixing.tsv')).values

        if ctab is None:
            comptable, metric_maps, betas, mmix = metrics.dependence_metrics(
                catd,
                data_oc,
                mmix_orig,
                masksum,
                tes,
                ref_img,
                label='meica_',
                out_dir=out_dir,
                algorithm='kundu_v2',
                verbose=verbose)
            comptable = metrics.kundu_metrics(comptable, metric_maps)
            comptable = selection.kundu_selection_v2(comptable, n_echos,
                                                     n_vols)
        else:
            mmix = mmix_orig.copy()
            comptable = io.load_comptable(ctab)
            if manacc is not None:
                comptable = selection.manual_selection(comptable, acc=manacc)
        betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask), mask)
        io.filewrite(betas_oc, op.join(out_dir, 'ica_components.nii.gz'),
                     ref_img)

    # Save decomposition
    comptable[
        'Description'] = 'ICA fit to dimensionally-reduced optimally combined data.'
    mmix_dict = {}
    mmix_dict['Method'] = ('Independent components analysis with FastICA '
                           'algorithm implemented by sklearn. Components '
                           'are sorted by Kappa in descending order. '
                           'Component signs are flipped to best match the '
                           'data.')
    io.save_comptable(comptable,
                      op.join(out_dir, 'ica_decomposition.json'),
                      label='ica',
                      metadata=mmix_dict)

    if comptable[comptable.classification == 'accepted'].shape[0] == 0:
        LGR.warning('No BOLD components detected! Please check data and '
                    'results!')

    mmix_orig = mmix.copy()
    if tedort:
        acc_idx = comptable.loc[~comptable.classification.str.
                                contains('rejected')].index.values
        rej_idx = comptable.loc[comptable.classification.str.contains(
            'rejected')].index.values
        acc_ts = mmix[:, acc_idx]
        rej_ts = mmix[:, rej_idx]
        betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0]
        pred_rej_ts = np.dot(acc_ts, betas)
        resid = rej_ts - pred_rej_ts
        mmix[:, rej_idx] = resid
        comp_names = [
            io.add_decomp_prefix(comp,
                                 prefix='ica',
                                 max_value=comptable.index.max())
            for comp in comptable.index.values
        ]
        mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
        mixing_df.to_csv(op.join(out_dir, 'ica_orth_mixing.tsv'),
                         sep='\t',
                         index=False)
        RepLGR.info("Rejected components' time series were then "
                    "orthogonalized with respect to accepted components' time "
                    "series.")

    io.writeresults(data_oc,
                    mask=mask,
                    comptable=comptable,
                    mmix=mmix,
                    n_vols=n_vols,
                    ref_img=ref_img,
                    out_dir=out_dir)

    if 't1c' in gscontrol:
        gsc.gscontrol_mmix(data_oc,
                           mmix,
                           mask,
                           comptable,
                           ref_img,
                           out_dir=out_dir)

    if verbose:
        io.writeresults_echoes(catd,
                               mmix,
                               mask,
                               comptable,
                               ref_img,
                               out_dir=out_dir)

    if not no_png:
        LGR.info('Making figures folder with static component maps and '
                 'timecourse plots.')
        # make figure folder first
        if not op.isdir(op.join(out_dir, 'figures')):
            os.mkdir(op.join(out_dir, 'figures'))

        viz.write_comp_figs(data_oc,
                            mask=mask,
                            comptable=comptable,
                            mmix=mmix_orig,
                            ref_img=ref_img,
                            out_dir=op.join(out_dir, 'figures'),
                            png_cmap=png_cmap)

        LGR.info('Making Kappa vs Rho scatter plot')
        viz.write_kappa_scatter(comptable=comptable,
                                out_dir=op.join(out_dir, 'figures'))

        LGR.info('Making Kappa/Rho scree plot')
        viz.write_kappa_scree(comptable=comptable,
                              out_dir=op.join(out_dir, 'figures'))

        LGR.info('Making overall summary figure')
        viz.write_summary_fig(comptable=comptable,
                              out_dir=op.join(out_dir, 'figures'))

    LGR.info('Workflow completed')

    RepLGR.info("This workflow used numpy (Van Der Walt, Colbert, & "
                "Varoquaux, 2011), scipy (Jones et al., 2001), pandas "
                "(McKinney, 2010), scikit-learn (Pedregosa et al., 2011), "
                "nilearn, and nibabel (Brett et al., 2019).")
    RefLGR.info(
        "Van Der Walt, S., Colbert, S. C., & Varoquaux, G. (2011). The "
        "NumPy array: a structure for efficient numerical computation. "
        "Computing in Science & Engineering, 13(2), 22.")
    RefLGR.info("Jones E, Oliphant E, Peterson P, et al. SciPy: Open Source "
                "Scientific Tools for Python, 2001-, http://www.scipy.org/")
    RefLGR.info("McKinney, W. (2010, June). Data structures for statistical "
                "computing in python. In Proceedings of the 9th Python in "
                "Science Conference (Vol. 445, pp. 51-56).")
    RefLGR.info("Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., "
                "Thirion, B., Grisel, O., ... & Vanderplas, J. (2011). "
                "Scikit-learn: Machine learning in Python. Journal of machine "
                "learning research, 12(Oct), 2825-2830.")
    RefLGR.info("Brett, M., Markiewicz, C. J., Hanke, M., Côté, M.-A., "
                "Cipollini, B., McCarthy, P., … freec84. (2019, May 28). "
                "nipy/nibabel. Zenodo. http://doi.org/10.5281/zenodo.3233118")

    RepLGR.info("This workflow also used the Dice similarity index "
                "(Dice, 1945; Sørensen, 1948).")
    RefLGR.info("Dice, L. R. (1945). Measures of the amount of ecologic "
                "association between species. Ecology, 26(3), 297-302.")
    RefLGR.info(
        "Sørensen, T. J. (1948). A method of establishing groups of "
        "equal amplitude in plant sociology based on similarity of "
        "species content and its application to analyses of the "
        "vegetation on Danish commons. I kommission hos E. Munksgaard.")

    with open(repname, 'r') as fo:
        report = [line.rstrip() for line in fo.readlines()]
        report = ' '.join(report)
    with open(refname, 'r') as fo:
        reference_list = sorted(list(set(fo.readlines())))
        references = '\n'.join(reference_list)
    report += '\n\nReferences\n' + references
    with open(repname, 'w') as fo:
        fo.write(report)
    os.remove(refname)

    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
コード例 #6
0
def test_millisec2sec():
    """
    Ensure that millisec2sec returns 1/1000x the input values.
    """
    assert utils.millisec2sec(5000) == 5
    assert utils.millisec2sec(np.array([5000])) == np.array([5])