Ejemplo n.º 1
0
def test_integration_four_echo(skip_integration):
    """ Integration test of the full tedana workflow using four-echo test data
    """

    if skip_integration:
        pytest.skip('Skipping four-echo integration test')
    out_dir = '/tmp/data/four-echo/TED.four-echo'
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)

    # download data and run the test
    download_test_data('https://osf.io/gnj73/download',
                       os.path.dirname(out_dir))
    prepend = '/tmp/data/four-echo/'
    prepend += 'sub-PILOT_ses-01_task-localizerDetection_run-01_echo-'
    suffix = '_space-sbref_desc-preproc_bold+orig.HEAD'
    datalist = [prepend + str(i + 1) + suffix for i in range(4)]
    tedana_workflow(data=datalist,
                    tes=[11.8, 28.04, 44.28, 60.52],
                    out_dir=out_dir,
                    tedpca='kundu-stabilize',
                    gscontrol=['gsr', 't1c'],
                    png_cmap='bone',
                    debug=True,
                    verbose=True)

    # compare the generated output files
    fn = resource_filename('tedana', 'tests/data/fiu_four_echo_outputs.txt')
    check_integration_outputs(fn, out_dir)
Ejemplo n.º 2
0
def test_integration_three_echo(skip_integration):
    """ Integration test of the full tedana workflow using three-echo test data
    """

    if skip_integration:
        pytest.skip('Skipping three-echo integration test')
    out_dir = '/tmp/data/three-echo/TED.three-echo'
    out_dir2 = '/tmp/data/three-echo/TED.three-echo-rerun'
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)

    # download data and run the test
    download_test_data('https://osf.io/rqhfc/download',
                       os.path.dirname(out_dir))
    tedana_workflow(data='/tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz',
                    tes=[14.5, 38.5, 62.5],
                    out_dir=out_dir,
                    low_mem=True,
                    tedpca='mdl')

    # test rerunning the workflow
    tedana_workflow(data='/tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz',
                    tes=[14.5, 38.5, 62.5],
                    out_dir=out_dir2,
                    mixm=os.path.join(out_dir, 'ica_mixing.tsv'),
                    ctab=os.path.join(out_dir, 'ica_decomposition.json'),
                    no_png=True)

    # compare the generated output files
    fn = resource_filename('tedana',
                           'tests/data/cornell_three_echo_outputs.txt')
    check_integration_outputs(fn, out_dir)
Ejemplo n.º 3
0
def test_integration_five_echo(skip_integration):
    """ Integration test of the full tedana workflow using five-echo test data
    """

    if skip_integration:
        pytest.skip('Skipping five-echo integration test')
    out_dir = '/tmp/data/five-echo/TED.five-echo'
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)

    # download data and run the test
    download_test_data('https://osf.io/9c42e/download',
                       os.path.dirname(out_dir))
    prepend = '/tmp/data/five-echo/p06.SBJ01_S09_Task11_e'
    suffix = '.sm.nii.gz'
    datalist = [prepend + str(i + 1) + suffix for i in range(5)]
    tedana_workflow(data=datalist,
                    tes=[15.4, 29.7, 44.0, 58.3, 72.6],
                    out_dir=out_dir,
                    debug=True,
                    verbose=True)

    # Just a check on the component table pending a unit test of load_comptable
    comptable = os.path.join(out_dir, 'ica_decomposition.json')
    df = io.load_comptable(comptable)
    assert isinstance(df, pd.DataFrame)

    # compare the generated output files
    fn = resource_filename('tedana', 'tests/data/tedana_outputs_verbose.txt')
    check_integration_outputs(fn, out_dir)
Ejemplo n.º 4
0
def test_basic_tedana():
    """
    A very simple test, to confirm that tedana creates output
    files.
    """
    workflows.tedana_workflow([op.expanduser('~/data/zcat_ffd.nii.gz')],
                              [14.5, 38.5, 62.5])
    assert op.isfile('comp_table.txt')
Ejemplo n.º 5
0
def test_integration_three_echo():
    """
    An integration test of the full tedana workflow using three-echo test data.
    """
    tedana_workflow(data='/tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz',
                    tes=[14.5, 38.5, 62.5],
                    out_dir='/tmp/data/three-echo/TED.three-echo',
                    tedpca='kundu',
                    png=True)
Ejemplo n.º 6
0
def test_integration_five_echo():
    """
    An integration test of the full tedana workflow using five-echo test data.
    """
    tedana_workflow(
        data='/tmp/data/five-echo/p06.SBJ01_S09_Task11_e[1,2,3,4,5].sm.nii.gz',
        tes=[15.4, 29.7, 44.0, 58.3, 72.6],
        out_dir='/tmp/data/five-echo/TED.five-echo',
        debug=True,
        verbose=True)
Ejemplo n.º 7
0
def run_tedana(files, tes, seed):
    """
    Run tedana workflow across a range of parameters

    Parameters
    ----------
    files : list of str
        Echo-specific preprocessed data files
    tes : list of floats
        Echo times in seconds
    seed : int
        Random seed
    """
    out_dir = '/scratch/tsalo006/reliability_analysis/tedana_outputs/'
    ds_dir = '/home/data/nbc/external-datasets/ds001491/'
    tes = [te * 1000 for te in tes]
    sub = re.findall('sub-[0-9a-zA-Z]+_', files[0])[0][:-1]
    #ds_dir = files[0][:files[0].index(sub)]
    name = 'tedana_seed-{0:03d}'.format(seed)
    ted_dir = op.join(ds_dir, 'derivatives', name, sub, 'func')
    if op.isdir(ted_dir):
        rmtree(ted_dir)
    makedirs(ted_dir)

    mask = op.join(ted_dir, 'nilearn_epi_mask.nii')
    mask_img = compute_epi_mask(files[0])
    mask_img.to_filename(mask)

    tedana_workflow(data=files,
                    tes=tes,
                    fixed_seed=seed,
                    tedpca='mle',
                    mask=mask,
                    out_dir=ted_dir,
                    debug=True,
                    gscontrol=None)
    # Grab the files we care about
    log_file = op.join(ted_dir, 'runlog.tsv')
    out_log_file = op.join(out_dir,
                           '{0}_seed-{1:03d}_log.tsv'.format(sub, seed))
    ct_file = op.join(ted_dir, 'comp_table_ica.txt')
    out_ct_file = op.join(out_dir,
                          '{0}_seed-{1:03d}_comptable.txt'.format(sub, seed))
    dn_file = op.join(ted_dir, 'dn_ts_OC.nii')
    out_dn_file = op.join(out_dir,
                          '{0}_seed-{1:03d}_denoised.nii'.format(sub, seed))
    copyfile(log_file, out_log_file)
    copyfile(ct_file, out_ct_file)
    copyfile(dn_file, out_dn_file)
    if seed != 0:  # keep first seed for t2s map
        rmtree(ted_dir)
Ejemplo n.º 8
0
def test_integration_five_echo(skip_integration):
    """ Integration test of the full tedana workflow using five-echo test data
    """

    if skip_integration:
        pytest.skip('Skipping five-echo integration test')
    out_dir = '/tmp/data/five-echo/TED.five-echo'
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)

    # download data and run the test
    download_test_data('https://osf.io/9c42e/download',
                       os.path.dirname(out_dir))
    prepend = '/tmp/data/five-echo/p06.SBJ01_S09_Task11_e'
    suffix = '.sm.nii.gz'
    datalist = [prepend + str(i + 1) + suffix for i in range(5)]
    tedana_workflow(data=datalist,
                    tes=[15.4, 29.7, 44.0, 58.3, 72.6],
                    out_dir=out_dir,
                    tedpca='aic',
                    fittype='curvefit',
                    tedort=True,
                    verbose=True)

    # Just a check on the component table pending a unit test of load_comptable
    comptable = os.path.join(out_dir, 'ica_decomposition.json')
    df = io.load_comptable(comptable)
    assert isinstance(df, pd.DataFrame)

    out_dir2 = '/tmp/data/five-echo/TED.five-echo-manual'
    acc_comps = df.loc[df['classification'] == 'accepted'].index.values
    mixing = os.path.join(out_dir, 'ica_mixing.tsv')
    tedana_workflow(
        data=datalist,
        tes=[15.4, 29.7, 44.0, 58.3, 72.6],
        out_dir=out_dir2,
        debug=True,
        verbose=True,
        manacc=','.join(acc_comps.astype(str)),
        ctab=comptable,
        mixm=mixing,
    )

    # compare the generated output files
    fn = resource_filename('tedana',
                           'tests/data/nih_five_echo_outputs_verbose.txt')
    check_integration_outputs(fn, out_dir)
Ejemplo n.º 9
0
def run_tedana(files, tes, seed):
    """
    Run tedana workflow across a range of parameters

    Parameters
    ----------
    files : list of str
        Echo-specific preprocessed data files
    tes : list of floats
        Echo times in seconds
    seed : int
        Random seed
    """
    tes = [te * 1000 for te in tes]
    sub = re.findall('sub-[0-9a-zA-Z]+_', files[0])[0][:-1]
    ds_dir = files[0][:files[0].index(sub)]

    for combmode in ['t2s', 'linear']:
        for gscontrol in [['t1c'], ['gsr', 't1c']]:
            for ste in [-1, 0]:
                for tedort in [False, True]:
                    for tedpca in ['mle', 'kundu']:
                        for wvpca in [False, True]:
                            args = {
                                'combmode': combmode,
                                'gscontrol': gscontrol,
                                'ste': ste,
                                'tedort': tedort,
                                'tedpca': tedpca,
                                'wvpca': wvpca
                            }
                            name = make_str(args)
                            name = name + '_seed-{0:03d}'.format(seed)
                            out_dir = op.join(ds_dir, 'derivatives', name, sub,
                                              'func')
                            tedana_workflow(data=files,
                                            tes=tes,
                                            fixed_seed=seed,
                                            out_dir=out_dir,
                                            debug=True,
                                            **args)
Ejemplo n.º 10
0
def test_integration_three_echo(skip_integration):
    """ Integration test of the full tedana workflow using three-echo test data
    """

    if skip_integration:
        pytest.skip('Skipping three-echo integration test')
    out_dir = '/tmp/data/three-echo/TED.three-echo'
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)

    # download data and run the test
    download_test_data('https://osf.io/rqhfc/download',
                       os.path.dirname(out_dir))
    tedana_workflow(data='/tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz',
                    tes=[14.5, 38.5, 62.5],
                    out_dir=out_dir,
                    tedpca='kundu')

    # compare the generated output files
    fn = resource_filename('tedana', 'tests/data/tedana_outputs.txt')
    check_integration_outputs(fn, out_dir)
Ejemplo n.º 11
0
            files, tes, mask_file = get_preproc_data(dset_name, item_cfg,
                                                     DATA_DIR)
            te_str = ','.join([str(te) for te in tes])
            file_str = ','.join(files)
            ds_dir = op.join(DATA_DIR, dset_name, item_cfg['version'],
                             'uncompressed')

            # Run AFNI's meica.py
            out_dir = op.join(ds_dir, 'derivatives/afni/')
            meica_script = op.abspath('../dependencies/afni/meica.py')
            cmd = ('{0} -e {1} -d {2} --prefix {3} '
                   '--OVERWRITE'.format(meica_script, te_str, file_str,
                                        out_dir))
            # subprocess.call(cmd.split(' '))

            # Run ME-ICA/me-ica
            out_dir = op.join(ds_dir, 'derivatives/kundu_v3.2/')
            meica_script = op.abspath('../dependencies/me-ica/meica.py')
            cmd = ('{0} -e {1} -d {2} --prefix {3} '
                   '--OVERWRITE'.format(meica_script, te_str, file_str,
                                        out_dir))
            # subprocess.call(cmd.split(' '))

            # Run ME-ICA/tedana
            # Run v2.5 of the component selection algorithm
            out_dir = op.join(ds_dir, 'derivatives/tedana_v2.5/')
            tedana_workflow(data=files,
                            tes=tes,
                            mask=mask_file,
                            out_dir=out_dir)
Ejemplo n.º 12
0
def run_tedana(files, tes, seed, dset, source, kind):
    """
    Run tedana workflow with a reasonable number of iterations and no
    restarts. The number of iterations is used to allow for convergence
    failures.

    The workflow should be run on data which have been only minimally
    preprocessed (e.g., slice timing and motion correction), but which have not
    yet been subjected to spatial normalization, smoothing, or denoising.

    Parameters
    ----------
    files : list of str
        Echo-specific preprocessed data files
    tes : list of floats
        Echo times in seconds
    seed : int
        Random seed
    dset : str
        Name of the OpenNeuro dataset being analyzed
    source : {'fmriprep', 'afni'}
        Preprocessing pipeline used
    kind : {'simple', 'duration'}
        Whether to allow a large number of iterations (for estimating duration
        and number of iterations required for convergence) or a reasonable
        number of iterations (for evaluating convergence under typical
        parameters)
    """
    if kind == 'simple':
        maxit = 500
    elif kind == 'duration':
        maxit = 100000
    else:
        raise Exception('Unrecognized kind argument: {0}'.format(kind))

    # Constants
    ds_dir = '/home/data/nbc/external-datasets/{0}'.format(dset)
    out_base_dir = '/scratch/tsalo006/reliability_analysis/'

    # Output directory for collected derivatives
    out_dir = op.join(out_base_dir,
                      '{0}_tedana_outputs_{1}_{2}'.format(dset, source, kind))
    tes = [te * 1000 for te in tes]
    sub = re.findall('sub-[0-9a-zA-Z]+_', files[0])[0][:-1]

    # BIDS-structured derivatives folders
    name = 'tedana_{0}_{1}_seed-{2:04d}'.format(source, kind, seed)
    ted_dir = op.join(ds_dir, 'derivatives', name, sub, 'func')
    if op.isdir(ted_dir):
        rmtree(ted_dir)
    makedirs(ted_dir)

    # Use an EPI mask
    mask = op.join(ted_dir, 'nilearn_epi_mask.nii')
    mask_img = compute_epi_mask(files[0])
    mask_img.to_filename(mask)

    tedana_workflow(data=files,
                    tes=tes,
                    fixed_seed=seed,
                    tedpca='mle',
                    mask=mask,
                    out_dir=ted_dir,
                    gscontrol=None,
                    maxit=maxit,
                    maxrestart=1,
                    debug=True,
                    verbose=False)
    # Grab the files we care about
    log_file = sorted(op.join(ted_dir, 'runlog*.tsv'))[::-1][0]
    out_log_file = op.join(out_dir,
                           '{0}_seed-{1:04d}_log.tsv'.format(sub, seed))
    ct_file = op.join(ted_dir, 'comp_table_ica.txt')
    out_ct_file = op.join(out_dir,
                          '{0}_seed-{1:04d}_comptable.txt'.format(sub, seed))
    mmix_file = op.join(ted_dir, 'meica_mix.1D')
    out_mmix_file = op.join(out_dir,
                            '{0}_seed-{1:04d}_mmix.1D'.format(sub, seed))
    dn_file = op.join(ted_dir, 'dn_ts_OC.nii')
    out_dn_file = op.join(out_dir,
                          '{0}_seed-{1:04d}_denoised.nii'.format(sub, seed))
    copyfile(log_file, out_log_file)
    copyfile(ct_file, out_ct_file)
    copyfile(mmix_file, out_mmix_file)
    copyfile(dn_file, out_dn_file)
    if seed != 0:  # keep first seed for t2s map
        rmtree(ted_dir)
Ejemplo n.º 13
0
from tedana.workflows import tedana_workflow
from glob import glob
import os.path as op

in_dirs = [
    '/Users/tsalo/Documents/Laird_DIVA/dwidenoised',
    '/Users/tsalo/Documents/Laird_DIVA/complex-dwidenoised',
    '/Users/tsalo/Documents/Laird_DIVA/dset/sub-Blossom/ses-02/func',
]
tasks = ['localizerDetection', 'localizerEstimation']
echo_times = [11.8, 28.04, 44.28, 60.52]

for in_dir in in_dirs:
    for task in tasks:
        pattern = op.join(in_dir, '*{}*_bold.nii.gz'.format(task))
        files = sorted(glob(pattern))
        out_dir = op.join(in_dir, 'tedana-{}'.format(task))
        if not op.isdir(out_dir):
            tedana_workflow(files,
                            echo_times,
                            fittype='curvefit',
                            out_dir=out_dir,
                            fixed_seed=1)