Пример #1
0
def demo_spm_auditory(data_dir="/tmp/spm_auditory_data",
                             output_dir="/tmp/spm_auditory_output",
                             ):
    """Demo for SPM single-subject Auditory

    Parameters
    ----------
    data_dir: string, optional
        where the data is located on your disk, where it will be
        downloaded to
    output_dir: string, optional
        where output will be written to

    """

    # fetch data
    spm_auditory_data = fetch_spm_auditory_data(data_dir)

    # subject data factory
    def subject_factory():
            subject_id = "sub001"

            yield SubjectData(subject_id=subject_id,
                              func=[spm_auditory_data.func],
                              output_dir=os.path.join(output_dir, subject_id))

    # invoke demon to run demo
    _demo_runner(subject_factory(), "SPM single-subject Auditory")
Пример #2
0
def test_bug_fix_issue_36_on_realign():
    from pypreprocess.datasets import fetch_spm_auditory_data
    sd = fetch_spm_auditory_data("/tmp/spm_auditory/")

    # shouldn't throw an IndexError
    MRIMotionCorrection(n_sessions=8, quality=1.).fit(
        [sd.func[:2], sd.func[:3]] * 4).transform("/tmp")
Пример #3
0
def demo_spm_auditory(data_dir="/tmp/spm_auditory_data", output_dir="/tmp/spm_auditory_output"):
    """Demo for SPM single-subject Auditory

    Parameters
    ----------
    data_dir: string, optional
        where the data is located on your disk, where it will be
        downloaded to
    output_dir: string, optional
        where output will be written to

    """

    # fetch data
    spm_auditory_data = fetch_spm_auditory_data(data_dir)

    # subject data factory
    def subject_factory():
        subject_id = "sub001"

        yield SubjectData(
            subject_id=subject_id, func=[spm_auditory_data.func], output_dir=os.path.join(output_dir, subject_id)
        )

    # invoke demon to run demo
    _demo_runner(subject_factory(), "SPM single-subject Auditory")
Пример #4
0
def test_bug_fix_issue_36_on_realign():
    from pypreprocess.datasets import fetch_spm_auditory_data
    sd = fetch_spm_auditory_data("/tmp/spm_auditory/")

    # shouldn't throw an IndexError
    MRIMotionCorrection(n_sessions=8, quality=1.).fit(
        [sd.func[:2], sd.func[:3]] * 4).transform("/tmp")
from nipype.caching import Memory as NipypeMemory
import nipype.interfaces.spm as spm

# file containing configuration for preprocessing the data
this_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
jobfile = os.path.join(this_dir, "spm_auditory_preproc.ini")

# set dataset dir
if len(sys.argv) > 1:
    dataset_dir = sys.argv[1]
else:
    dataset_dir = os.path.join(this_dir, "spm_auditory")


# fetch spm auditory data
fetch_spm_auditory_data(dataset_dir)

# preprocess the data
subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0]

# construct experimental paradigm
stats_start_time = time.ctime()
tr = 7.
n_scans = 96
_duration = 6
epoch_duration = _duration * tr
conditions = ['rest', 'active'] * 8
duration = epoch_duration * np.ones(len(conditions))
onset = np.linspace(0, (len(conditions) - 1) * epoch_duration,
                    len(conditions))
paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration)
# run preproc pipeline
do_subjects_preproc(_abide_factory(), fwhm=[8, 8, 8],
                    output_dir=ABIDE_OUTPUT_DIR,
                    dataset_id='ABIDE',
                    # do_report=False,
                    # do_dartel=True
                    )

if 0x0:
    for (with_anat, do_segment, do_normalize,
         fwhm, hard_link_output) in itertools.product(
        [False, True], [False, True], [False, True], [0, 8, [8, 8, 8]],
        [False, True]):
        # load spm auditory data

        sd = fetch_spm_auditory_data(os.path.join(
                os.environ['HOME'], 'CODE/datasets/spm_auditory'))
        subject_data1 = SubjectData(func=[sd.func],
                                    anat=sd.anat if with_anat else None)
        subject_data1.output_dir = "/tmp/kimbo/sub001/"

        # load spm multimodal fmri data
        sd = fetch_spm_multimodal_fmri_data(os.path.join(
                os.environ['HOME'], 'CODE/datasets/spm_multimodal_fmri'))
        subject_data2 = SubjectData(func=[sd.func1, sd.func2],
                                    anat=sd.anat if with_anat else None,
                                   session_id=['Session 1', "Session 2"])
        subject_data2.output_dir = "/tmp/kiki/sub001/"

        do_subjects_preproc([subject_data1, subject_data2],
                            do_dartel=True,
                            do_segment=do_segment,
Пример #7
0
"""
:Author: DOHMATOB Elvis Dopgima
:Synopsis: single_subject_pipeline.py demo

"""

import os
from pypreprocess.datasets import fetch_spm_auditory_data
from pypreprocess.purepython_preproc_utils import do_subject_preproc
from pypreprocess.subject_data import SubjectData
import nibabel

# fetch data
sd = fetch_spm_auditory_data(os.path.join(os.path.abspath('.'),
                                          "spm_auditory"))
sd.output_dir = "/tmp/sub001"
sd.func = [sd.func]

# preproc data
do_subject_preproc(sd.__dict__, concat=False, coregister=True,
                   stc=True, cv_tc=True, realign=True,
                   report=True)
Пример #8
0
    def _spm_auditory_factory():
        sd = fetch_spm_auditory_data(os.path.join(
                os.environ['HOME'], "CODE/datasets/spm_auditory"))

        return sd.func[0], sd.anat
from pypreprocess.reporting.base_reporter import ProgressReport
from nipype.caching import Memory as NipypeMemory
import nipype.interfaces.spm as spm

# file containing configuration for preprocessing the data
this_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
jobfile = os.path.join(this_dir, "spm_auditory_preproc.ini")

# set dataset dir
if len(sys.argv) > 1:
    dataset_dir = sys.argv[1]
else:
    dataset_dir = os.path.join(this_dir, "spm_auditory")

# fetch spm auditory data
fetch_spm_auditory_data(dataset_dir)

# preprocess the data
subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0]

# construct experimental paradigm
stats_start_time = time.ctime()
tr = 7.
n_scans = 96
_duration = 6
epoch_duration = _duration * tr
conditions = ['rest', 'active'] * 8
duration = epoch_duration * np.ones(len(conditions))
onset = np.linspace(0, (len(conditions) - 1) * epoch_duration, len(conditions))
paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration)
hfcut = 2 * 2 * epoch_duration
Пример #10
0
    def _spm_auditory_factory():
        sd = fetch_spm_auditory_data(
            os.path.join(os.environ['HOME'], "CODE/datasets/spm_auditory"))

        return sd.func[0], sd.anat
"""
:Synopsis:  Step-by-step example usage of purepython_preroc_pipeline module
:Author: DOHMATOB Elvis Dopgima <*****@*****.**>

"""

from pypreprocess.datasets import fetch_spm_auditory_data
from pypreprocess.slice_timing import fMRISTC
from pypreprocess.realign import MRIMotionCorrection
from pypreprocess.coreg import Coregister
from pypreprocess.external.joblib import Memory
import os

# create cache
mem = Memory('/tmp/stepwise_cache', verbose=100)

# fetch input data
sd = fetch_spm_auditory_data(
    os.path.join(os.environ['HOME'], "CODE/datasets/spm_auditory"))
n_sessions = 1  # this dataset has 1 session (i.e 1 fMRI acquisiton or run)

do_subject_preproc(sd.__dict__(),
                   concat=False,
                   coregister=True,
                   stc=True,
                   cv_tc=True,
                   realign=True,
                   report=True)
Пример #12
0
# construct experimental paradigm
stats_start_time = time.ctime()
tr = 7.
n_scans = 96
_duration = 6
epoch_duration = _duration * tr
conditions = ['rest', 'active'] * 8
duration = epoch_duration * np.ones(len(conditions))
onset = np.linspace(0, (len(conditions) - 1) * epoch_duration,
                    len(conditions))
paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration)
hfcut = 2 * 2 * epoch_duration

# fetch spm auditory data
_subject_data = fetch_spm_auditory_data(DATA_DIR)
subject_data = SubjectData()
subject_data.func = _subject_data.func
subject_data.anat = _subject_data.anat
subject_data.subject_id = "sub001"
subject_data.output_dir = os.path.join(OUTPUT_DIR, subject_data.subject_id)

# preprocess the data
results = do_subjects_preproc(
    [subject_data],
    output_dir=OUTPUT_DIR,
    func_to_anat=True,
    # fwhm=8.,
    do_segment=False,
    do_normalize=False,
    dataset_id="SPM single-subject auditory",