Beispiel #1
0
    def run_command(self, args):
        """
        """
        from tempfile import mkdtemp
        from clinica.utils.stream import cprint
        from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import T1VolumeParcellation

        pipeline = T1VolumeParcellation(
            caps_directory=self.absolute_path(args.caps_directory),
            tsv_file=self.absolute_path(args.subjects_sessions_tsv))
        assert args.modulation in ['on', 'off']
        pipeline.parameters = {
            'group_id': args.group_id,
            'atlases': args.atlases,
            'wd': self.absolute_path(args.working_directory),
            'n_procs': args.n_procs,
            'modulate': args.modulation
        }

        if args.working_directory is None:
            args.working_directory = mkdtemp()
        pipeline.base_dir = self.absolute_path(args.working_directory)

        if args.n_procs:
            pipeline.run(plugin='MultiProc',
                         plugin_args={'n_procs': args.n_procs})
        else:
            pipeline.run()

        cprint(
            "The " + self._name +
            " pipeline has completed. You can now delete the working directory ("
            + args.working_directory + ").")
Beispiel #2
0
def test_run_T1VolumeParcellation(cmdopt):
    import shutil
    from os.path import abspath, dirname, join

    import numpy as np
    import pandas as pds

    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import (
        T1VolumeParcellation, )

    working_dir = cmdopt
    root = dirname(abspath(join(abspath(__file__), pardir, pardir)))
    root = join(root, "data", "T1VolumeParcellation")
    clean_folder(join(root, "out", "caps"), recreate=False)
    clean_folder(join(working_dir, "T1VolumeParcellation"))

    # Copy data for use of pipeline
    shutil.copytree(join(root, "in", "caps"), join(root, "out", "caps"))

    # Instantiate pipeline
    parameters = {"group_label": "UnitTest"}
    pipeline = T1VolumeParcellation(
        caps_directory=join(root, "in", "caps"),
        tsv_file=join(root, "in", "subjects.tsv"),
        base_dir=join(working_dir, "T1VolumeParcellation"),
        parameters=parameters,
    )
    pipeline.build()
    pipeline.run(plugin="MultiProc",
                 plugin_args={"n_procs": 4},
                 bypass_check=True)

    out_files = [
        join(
            root,
            "out/caps/subjects/sub-ADNI018S4696/ses-M00/t1/spm/dartel/group-UnitTest/atlas_statistics",
            "sub-ADNI018S4696_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability_space-"
            + atlas + "_map-graymatter_statistics.tsv",
        ) for atlas in pipeline.parameters["atlases"]
    ]
    ref_files = [
        join(
            root,
            "ref/sub-ADNI018S4696_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability_space-"
            + atlas + "_map-graymatter_statistics.tsv",
        ) for atlas in pipeline.parameters["atlases"]
    ]

    for i in range(len(out_files)):
        out_csv = pds.read_csv(out_files[i], sep="\t")
        ref_csv = pds.read_csv(ref_files[i], sep="\t")
        assert np.allclose(
            np.array(out_csv.mean_scalar),
            np.array(ref_csv.mean_scalar),
            rtol=1e-8,
            equal_nan=True,
        )

    clean_folder(join(root, "out", "caps"), recreate=False)
    clean_folder(join(working_dir, "T1VolumeParcellation"), recreate=False)
Beispiel #3
0
def test_run_T1VolumeParcellation(cmdopt):
    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import T1VolumeParcellation
    from os.path import dirname, join, abspath, exists
    import shutil
    import pandas as pds
    import numpy as np

    working_dir = cmdopt
    root = dirname(abspath(join(abspath(__file__), pardir)))
    root = join(root, 'data', 'T1VolumeParcellation')
    clean_folder(join(root, 'out', 'caps'), recreate=False)
    clean_folder(join(working_dir, 'T1VolumeParcellation'))

    # Copy data for use of pipeline
    shutil.copytree(join(root, 'in', 'caps'), join(root, 'out', 'caps'))

    # Instantiate pipeline
    pipeline = T1VolumeParcellation(caps_directory=join(root, 'out', 'caps'),
                                    tsv_file=join(root, 'in', 'subjects.tsv'))
    pipeline.parameters['group_id'] = 'UnitTest'
    pipeline.parameters['atlases'] = [
        'AAL2', 'LPBA40', 'Neuromorphometrics', 'AICHA', 'Hammers'
    ]
    pipeline.parameters['modulate'] = 'on'
    pipeline.base_dir = join(working_dir, 'T1VolumeParcellation')
    pipeline.build()
    pipeline.run(plugin='MultiProc',
                 plugin_args={'n_procs': 4},
                 bypass_check=True)

    out_files = [
        join(
            root,
            'out/caps/subjects/sub-ADNI018S4696/ses-M00/t1/spm/dartel/group-UnitTest/atlas_statistics',
            'sub-ADNI018S4696_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability_space-'
            + atlas + '_map-graymatter_statistics.tsv')
        for atlas in pipeline.parameters['atlases']
    ]
    ref_files = [
        join(
            root,
            'ref/sub-ADNI018S4696_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability_space-'
            + atlas + '_map-graymatter_statistics.tsv')
        for atlas in pipeline.parameters['atlases']
    ]

    for i in range(len(out_files)):
        out_csv = pds.read_csv(out_files[i], sep='\t')
        ref_csv = pds.read_csv(ref_files[i], sep='\t')
        assert np.allclose(np.array(out_csv.mean_scalar),
                           np.array(ref_csv.mean_scalar),
                           rtol=1e-8,
                           equal_nan=True)

    clean_folder(join(root, 'out', 'caps'), recreate=False)
def test_instantiate_T1VolumeParcellation():
    from os.path import dirname, join, abspath
    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import T1VolumeParcellation

    root = dirname(abspath(join(abspath(__file__), pardir)))
    root = join(root, 'data', 'T1VolumeParcellation')

    parameters = {'group_label': 'UnitTest'}
    pipeline = T1VolumeParcellation(caps_directory=join(root, 'in', 'caps'),
                                    tsv_file=join(root, 'in', 'subjects.tsv'),
                                    parameters=parameters)
    pipeline.build()
def test_instantiate_T1VolumeParcellation():
    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import T1VolumeParcellation
    from os.path import dirname, join, abspath

    root = dirname(abspath(join(abspath(__file__), pardir)))
    root = join(root, 'data', 'T1VolumeParcellation')
    pipeline = T1VolumeParcellation(caps_directory=join(root, 'in', 'caps'),
                                    tsv_file=join(root, 'in', 'subjects.tsv'))
    pipeline.parameters['group_id'] = 'UnitTest'
    pipeline.parameters['atlases'] = [
        'AAL2', 'LPBA40', 'Neuromorphometrics', 'AICHA', 'Hammers'
    ]
    pipeline.parameters['modulate'] = 'on'
    pipeline.build()
Beispiel #6
0
def test_instantiate_T1VolumeParcellation(cmdopt):

    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import (
        T1VolumeParcellation, )

    input_dir = Path(cmdopt["input"])
    root = input_dir / "T1VolumeParcellation"

    parameters = {"group_label": "UnitTest"}
    pipeline = T1VolumeParcellation(
        caps_directory=fspath(root / "in" / "caps"),
        tsv_file=fspath(root / "in" / "subjects.tsv"),
        parameters=parameters,
    )
    pipeline.build()
Beispiel #7
0
def test_instantiate_T1VolumeParcellation():
    from os.path import abspath, dirname, join

    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import (
        T1VolumeParcellation, )

    root = dirname(abspath(join(abspath(__file__), pardir)))
    root = join(root, "data", "T1VolumeParcellation")

    parameters = {"group_label": "UnitTest"}
    pipeline = T1VolumeParcellation(
        caps_directory=join(root, "in", "caps"),
        tsv_file=join(root, "in", "subjects.tsv"),
        parameters=parameters,
    )
    pipeline.build()
Beispiel #8
0
def run_T1VolumeParcellation(
    input_dir: Path, output_dir: Path, ref_dir: Path, working_dir: Path
) -> None:
    import shutil

    import numpy as np
    import pandas as pds

    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_pipeline import (
        T1VolumeParcellation,
    )

    # Copy necessary data from in to out
    shutil.copytree(input_dir / "caps", output_dir / "caps", copy_function=shutil.copy)

    # Instantiate pipeline
    parameters = {"group_label": "UnitTest"}
    pipeline = T1VolumeParcellation(
        caps_directory=fspath(output_dir / "caps"),
        tsv_file=fspath(input_dir / "subjects.tsv"),
        base_dir=fspath(working_dir),
        parameters=parameters,
    )
    pipeline.build()
    pipeline.run(plugin="MultiProc", plugin_args={"n_procs": 4}, bypass_check=True)

    out_files = [
        fspath(
            output_dir
            / "caps"
            / "subjects"
            / "sub-ADNI018S4696"
            / "ses-M00"
            / "t1"
            / "spm"
            / "dartel"
            / "group-UnitTest"
            / "atlas_statistics"
            / (
                "sub-ADNI018S4696_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability_space-"
                + atlas
                + "_map-graymatter_statistics.tsv"
            )
        )
        for atlas in pipeline.parameters["atlases"]
    ]
    ref_files = [
        fspath(
            ref_dir
            / (
                "sub-ADNI018S4696_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability_space-"
                + atlas
                + "_map-graymatter_statistics.tsv"
            )
        )
        for atlas in pipeline.parameters["atlases"]
    ]

    for i in range(len(out_files)):
        out_csv = pds.read_csv(out_files[i], sep="\t")
        ref_csv = pds.read_csv(ref_files[i], sep="\t")
        assert np.allclose(
            np.array(out_csv.mean_scalar),
            np.array(ref_csv.mean_scalar),
            rtol=1e-8,
            equal_nan=True,
        )