Example #1
0
def preproc(jobfile):
    '''Launch the preprocesses on data with pypreprocess python module.
    See: https://github.com/neurospin/pypreprocess/
    Tape: python preprocess.py <jobfile>

    Keyword arguments:
    jobfile -- text file for initialisation of processes step and configuration
    '''
    do_subjects_preproc(jobfile, report=True)
def run_subject_preproc(jobfile, subject, session=None):
    """ Create jobfile and run it on """
    output_name = os.path.join(
        '/tmp', os.path.basename(jobfile)[:-4] + '_%s.ini' % subject)
    _adapt_jobfile(jobfile, subject, output_name, session)
    # Read the jobfile
    list_subjects, params = _generate_preproc_pipeline(output_name)
    # Preproc and Dump data
    subject_data = do_subjects_preproc(output_name, report=True)
    return subject_data
# file containing configuration for preprocessing the data
this_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
jobfile = os.path.join(this_dir, "spm_auditory_preproc.ini")

# set dataset dir
if len(sys.argv) > 1:
    dataset_dir = sys.argv[1]
else:
    dataset_dir = os.path.join(this_dir, "spm_auditory")


# fetch spm auditory data
fetch_spm_auditory_data(dataset_dir)

# preprocess the data
subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0]

# construct experimental paradigm
stats_start_time = time.ctime()
tr = 7.
n_scans = 96
_duration = 6
epoch_duration = _duration * tr
conditions = ['rest', 'active'] * 8
duration = epoch_duration * np.ones(len(conditions))
onset = np.linspace(0, (len(conditions) - 1) * epoch_duration,
                    len(conditions))
paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration)
hfcut = 2 * 2 * epoch_duration

# construct design matrix
Example #4
0
# fetch spm multimodal_faces data
subject_data = fetch_spm_multimodal_fmri_data(dataset_dir)

# preprocess the data
subject_id = "sub001"
subject_data = SubjectData(output_dir=os.path.join(dataset_dir, "pypreprocess",
                                                   subject_id),
                           subject_id=subject_id,
                           func=[subject_data.func1, subject_data.func2],
                           anat=subject_data.anat,
                           trials_ses1=subject_data.trials_ses1,
                           trials_ses2=subject_data.trials_ses2)
subject_data = do_subjects_preproc([subject_data],
                                   realign=True,
                                   coregister=True,
                                   segment=True,
                                   normalize=True)[0]

# experimental paradigm meta-params
stats_start_time = time.ctime()
tr = 2.
drift_model = 'Cosine'
hrf_model = 'Canonical With Derivative'
hfcut = 128.

# make design matrices
first_level_effects_maps = []
mask_images = []
design_matrices = []
for x in xrange(2):
Example #5
0
"""
Synopsis: Run pypreprocess using dataset-specific configuration file given
at command line.
Author: DOHMATOB Elvis Dopgima <*****@*****.**> <*****@*****.**>

"""

import sys
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

if __name__ == "__main__":
    # sanitize command-line usage
    if len(sys.argv) < 2:
        print "\r\nUsage: python %s </path/to/job.conf>\r\n" % (sys.argv[0])
        print ("Example:\r\npython %s ~/CODE/datasets/spm_multimodal"
               "/job.conf\r\n") % sys.argv[0]
        sys.exit(1)

    # consume config file and run pypreprocess back-end
    do_subjects_preproc(sys.argv[1])
    subject_data = SubjectData()
    subject_data.subject_id = subject_id
    subject_data.session_id = "haxby2001"

    # set func
    subject_data.func = [x for x in haxby_data.func if subject_id in x]

    assert len(subject_data.func) == 1
    subject_data.func = subject_data.func[0]

    # set anat
    subject_data.anat = [x for x in haxby_data.anat if subject_id in x]
    assert len(subject_data.anat) == 1
    subject_data.anat = subject_data.anat[0]

    # set subject output directory
    subject_data.output_dir = os.path.join(OUTPUT_DIR, subject_data.subject_id)

    # add this subject to list
    subjects.append(subject_data)

# do preprocessing proper
results = do_subjects_preproc(subjects,
                              output_dir=OUTPUT_DIR,
                              dataset_id="HAXBY 2001",
                              realign=False,
                              coregister=False,
                              dartel=DARTEL,
                              tsdiffana=False,
                              dataset_description=DATASET_DESCRIPTION)
Example #7
0
def preproc(jobfile):
    subject_data = do_subjects_preproc(jobfile, report=True)
    return subject_data
"""

# standard imports
import sys
import os

# import API for preprocessing business
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

# input data-grabber for SPM Auditory (single-subject) data
from pypreprocess.datasets import fetch_nyu_rest

# file containing configuration for preprocessing the data
this_dir = os.path.dirname(sys.argv[0])
jobfile = os.path.join(os.path.dirname(sys.argv[0]),
                       "nyu_rest_preproc.conf")

# set dataset dir
if len(sys.argv) > 1:
    dataset_dir = sys.argv[1]
else:
    dataset_dir = os.path.join(this_dir, "nyu_rest")


# # fetch spm auditory data
# fetch_nyu_rest(data_dir=dataset_dir)

# preprocess the data
results = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)
assert len(results) == 1
                       for x in haxby_data.func]):
    # instantiate subject_data object
    subject_data = SubjectData()
    subject_data.subject_id = subject_id
    subject_data.session_id = "haxby2001"

    # set func
    subject_data.func = [x for x in haxby_data.func if subject_id in x]

    assert len(subject_data.func) == 1
    subject_data.func = subject_data.func[0]

    # set anat
    subject_data.anat = [x for x in haxby_data.anat if subject_id in x]
    assert len(subject_data.anat) == 1
    subject_data.anat = subject_data.anat[0]

    # set subject output directory
    subject_data.output_dir = os.path.join(OUTPUT_DIR,
                                           subject_data.subject_id)

    # add this subject to list
    subjects.append(subject_data)

# do preprocessing proper
results = do_subjects_preproc(subjects, output_dir=OUTPUT_DIR,
                              dataset_id="HAXBY 2001", realign=False,
                              coregister=False, dartel=DARTEL,
                              tsdiffana=False,
                              dataset_description=DATASET_DESCRIPTION)
Example #10
0
    
    # Do one subject by one subject, otherwise it doesn't work for many subjects at time 
    for sub in subs:
        file_template = "/volatile/test/pypreprocess/test_localizer_bids/script/config_template.ini"
        new_text = ""
        with open(file_template , "r") as fichier:
            for line in fichier.readlines():
                if (line.find("include_only_these_subject_ids") != -1):
                    new_line = "include_only_these_subject_ids = " + sub +"\n\n"
                else : 
                    new_line = line
                new_text = new_text + new_line
    
        jobfile = "/volatile/test/pypreprocess/test_localizer_bids/script/config.ini"
        file_to_write = open(jobfile, "w")
        file_to_write.write(new_text)
        file_to_write.close()

        # Preproc
        subject_data = do_subjects_preproc(jobfile, report=True)
        print(subject_data)
        
        # Launch first level for each subject
        for subject in subject_data :
            z_maps = first_level(subject)

    
    
    
        
"""
:Author: DOHMATOB Elvis Dopgima
:Synopsis: Preprocessing of NYU rest data.
"""

import sys
import os

# import API for preprocessing business
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

# input data-grabber for SPM Auditory (single-subject) data
from nilearn.datasets import fetch_nyu_rest

# file containing configuration for preprocessing the data
jobfile = os.path.join(os.path.dirname(sys.argv[0]), "nyu_rest_preproc.ini")

# fetch spm auditory data
sd = fetch_nyu_rest()

# preprocess the data
dataset_dir = os.path.dirname(
    os.path.dirname(os.path.dirname(os.path.dirname(sd.anat_skull[0]))))
results = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)
            os.path.dirname(os.path.dirname(scans)))
        subject_data.func = os.path.join(
            scans, "rest/resources/NIfTI/files/rest.nii")
        subject_data.anat = os.path.join(
            scans, "anat/resources/NIfTI/files/mprage.nii")
        subject_data.output_dir = os.path.join(ABIDE_OUTPUT_DIR,
                                               subject_data.subject_id)

        yield subject_data


# run preproc pipeline
do_subjects_preproc(
    _abide_factory(),
    fwhm=[8, 8, 8],
    output_dir=ABIDE_OUTPUT_DIR,
    dataset_id='ABIDE',
    # do_report=False,
    # do_dartel=True
)

if 0x0:
    for (with_anat, do_segment, do_normalize, fwhm,
         hard_link_output) in itertools.product([False, True], [False, True],
                                                [False, True],
                                                [0, 8, [8, 8, 8]],
                                                [False, True]):
        # load spm auditory data

        sd = fetch_spm_auditory_data(
            os.path.join(os.environ['HOME'], 'CODE/datasets/spm_auditory'))
        subject_data1 = SubjectData(func=[sd.func],
    dataset_dir = os.path.join(this_dir, "spm_multimodal_faces")

# fetch spm multimodal_faces data
subject_data = fetch_spm_multimodal_fmri_data(dataset_dir)

# preprocess the data
subject_id = "sub001"
subject_data = SubjectData(output_dir=os.path.join(
        dataset_dir, "pypreprocess", subject_id),
                           subject_id=subject_id,
                           func=[subject_data.func1, subject_data.func2],
                           anat=subject_data.anat,
                           trials_ses1=subject_data.trials_ses1,
                           trials_ses2=subject_data.trials_ses2)
subject_data = do_subjects_preproc([subject_data], realign=True,
                                  coregister=True, segment=True,
                                  normalize=True)[0]

# experimental paradigm meta-params
stats_start_time = time.ctime()
tr = 2.
drift_model = 'Cosine'
hrf_model = 'Canonical With Derivative'
hfcut = 128.

# make design matrices
first_level_effects_maps = []
mask_images = []
design_matrices = []
for x in xrange(2):
    if not os.path.exists(subject_data.output_dir):
Example #14
0
# fetch spm auditory data
_subject_data = fetch_spm_auditory_data(DATA_DIR)
subject_data = SubjectData()
subject_data.func = _subject_data.func
subject_data.anat = _subject_data.anat
subject_data.subject_id = "sub001"
subject_data.output_dir = os.path.join(OUTPUT_DIR, subject_data.subject_id)

# preprocess the data
results = do_subjects_preproc(
    [subject_data],
    output_dir=OUTPUT_DIR,
    func_to_anat=True,
    # fwhm=8.,
    do_segment=False,
    do_normalize=False,
    dataset_id="SPM single-subject auditory",
    dataset_description=DATASET_DESCRIPTION,
    do_shutdown_reloaders=False,
    )

# collect preprocessed data
fmri_files = results[0]['func']
fmri_4D_filename = os.path.join(subject_data.output_dir,
                                "fmri_4D_preproc.nii.gz")
do_3Dto4D_merge(fmri_files, output_filename=fmri_4D_filename)
anat_file = results[0]['anat']

# construct design matrix
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
def main(data_dir, output_dir, exclusions=None, dataset_id=None,
         n_jobs=-1):
    """Main function for preprocessing (and analysis ?)

    Parameters
    ----------

    returns list of Bunch objects with fields anat, func, and subject_id
    for each preprocessed subject

    """
    exclusions = [] if exclusions is None else exclusions

    # glob for subject ids
    subject_ids = [
        os.path.basename(x)
        for x in glob.glob(os.path.join(data_dir,
                                        subject_id_wildcard))]

    model_dirs = glob.glob(os.path.join(
        data_dir, subject_ids[0], 'model', '*'))

    session_ids = [
        os.path.basename(x)
        for x in glob.glob(os.path.join(model_dirs[0], 'onsets', '*'))]

    session_ids.sort()
    subject_ids.sort()

    # producer subject data
    def subject_factory():
        for subject_id in subject_ids:
            if subject_id in exclusions:
                continue

            # construct subject data structure
            subject_data = SubjectData()
            subject_data.session_id = session_ids
            subject_data.subject_id = subject_id
            subject_data.func = []

            # glob for bold data
            has_bad_sessions = False
            for session_id in subject_data.session_id:
                bold_dir = os.path.join(
                    data_dir,
                    "%s/BOLD/%s" % (subject_id, session_id))

                # # extract .nii.gz to .nii
                # unzip_nii_gz(bold_dir)

                # glob bold data for this session
                func = glob.glob(os.path.join(bold_dir, "bold.nii.gz"))

                # check that this session is OK (has bold data, etc.)
                if not func:
                    has_bad_sessions = True
                    break

                subject_data.func.append(func[0])

            # exclude subject if necessary
            if has_bad_sessions:
                continue

            # glob for anatomical data
            # anat_dir = os.path.join(
            #     data_dir,
            #     "%s/anatomy" % subject_id)

            # # extract .nii.gz to .ni
            # unzip_nii_gz(anat_dir)

            # glob anatomical data proper
            subject_data.anat = glob.glob(
                os.path.join(
                    data_dir,
                    "%s/anatomy/highres001_brain.nii.gz" % subject_id))[0]

            # set subject output dir (all calculations for
            # this subject go here)
            subject_data.output_dir = os.path.join(
                    output_dir,
                    subject_id)

            yield subject_data

    # # do preprocessing proper
    # report_filename = os.path.join(output_dir,
    #                                "_report.html")
    return do_subjects_preproc(
        subject_factory(),
        n_jobs=n_jobs,
        dataset_id=dataset_id,
        output_dir=output_dir,
        deleteorient=True,  # some openfmri data have garbage orientation
        dartel=DO_DARTEL,
        # do_cv_tc=False,
        dataset_description=DATASET_DESCRIPTION,
        # report_filename=report_filename,
        # do_shutdown_reloaders=True
        )
Example #16
0
"""
Synopsis: Run pypreprocess using dataset-specific configuration file given
at command line.
Author: DOHMATOB Elvis Dopgima <*****@*****.**> <*****@*****.**>

"""

import sys
import matplotlib
matplotlib.use('Agg')
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

if __name__ == "__main__":
    # sanitize command-line usage
    if len(sys.argv) < 2:
        print "\r\nUsage: python %s </path/to/preproc/job.ini>\r\n" % (
            sys.argv[0])
        print(
            "Example:\r\npython %s scripts/HCP_tfMRI_MOTOR_preproc"
            ".ini\r\n") % sys.argv[0]
        sys.exit(1)

    # consume config file and run pypreprocess back-end
    do_subjects_preproc(sys.argv[1])
def run_dmri_pipeline(subject_session, do_topup=True):
    subject, session = subject_session.split('/')
    data_dir = os.path.join(source_dir,  subject_session, 'dwi')
    write_dir = os.path.join('/neurospin/ibc/derivatives', subject_session)
    dwi_dir = os.path.join(write_dir, 'dwi')
    
    # Apply topup to the images
    if do_topup:
        mem = Memory('/neurospin/tmp/bthirion/cache_dir')
        imgs = sorted(glob.glob('%s/*.nii.gz' % data_dir))
        se_maps = [
            os.path.join(source_dir, subject_session, 'fmap',
                         '%s_%s_dir-1_epi.nii.gz' % (subject, session)),
            os.path.join(source_dir, subject_session, 'fmap',
                         '%s_%s_dir-0_epi.nii.gz' % (subject, session))]
        
        fsl_topup(se_maps, imgs, mem, write_dir, 'dwi')

    # Then proceeed with Eddy current correction
    # get the images
    imgs = sorted(glob.glob(os.path.join(dwi_dir, 'dc*run*.nii.gz')))
    out = os.path.join(dwi_dir, 'dc%s_%s_dwi.nii.gz' %
                       (subject, session))
    img = concat_images(imgs, out)

    # get the bvals/bvec
    file_bvals = sorted(glob.glob('%s/*.bval' % data_dir))
    bvals = np.concatenate([np.loadtxt(fbval) for fbval in sorted(file_bvals)])
    bvals_file = os.path.join(dwi_dir, 'dc%s_%s_dwi.bval' % (subject, session)) 
    np.savetxt(bvals_file, bvals)
    file_bvecs = sorted(glob.glob('%s/*.bvec' % data_dir))
    bvecs = np.hstack([np.loadtxt(fbvec) for fbvec in sorted(file_bvecs)])
    bvecs_file = os.path.join(dwi_dir, 'dc%s_%s_dwi.bvec' % (subject, session))
    np.savetxt(bvecs_file, bvecs)
    
    if do_edc:
        eddy_current_correction(out, bvals_file, bvecs_file, dwi_dir, mem)

    ############################################################################
    # Proceed with registration to anatomical image
    from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc
    ini_file = adapt_ini_file("ini_files/IBC_preproc_dwi.ini", subject, session)
    subject_data = do_subjects_preproc(ini_file,
                                       dataset_dir=write_dir)[0]
    
    ############################################################################
    # do the tractography
    subject, session = subject_session.split('/')

    # concatenate dmri files into one
    img = nib.load(glob.glob(os.path.join(dwi_dir, 'eddc*.nii*'))[-1])
    
    # load the data
    gtab = gradient_table(bvals, bvecs, b0_threshold=10)
    # Create a brain mask
    # Anatomical mask
    from nilearn.image import math_img, resample_to_img, threshold_img
    anat_mask = math_img(" img1 + img2 ",
                   img1=subject_data.mwgm, img2=subject_data.mwwm)
    anat_mask = resample_to_img(anat_mask, img)
    anat_mask = math_img(" img > .5", img=anat_mask) 
    anat_mask.to_filename(os.path.join(dwi_dir, 'anat_mask.nii.gz'))
    mask = anat_mask.get_data()
    tractography(img, gtab, mask, dwi_dir)
Example #18
0
def preproc_abide_institute(institute_id, abide_data_dir, abide_output_dir,
                            do_dartel=True,
                            do_report=True,
                            n_jobs=-1,
                            ):
    """Preprocesses a given ABIDE institute

    """

    # set institute output dir
    institute_output_dir = os.path.join(abide_output_dir, institute_id)
    if not os.path.exists(institute_output_dir):
        os.makedirs(institute_output_dir)

    # set subject id wildcard for globbing institute subjects
    subject_id_wildcard = "%s_*/%s_*" % (institute_id, institute_id)

    # glob for subject ids
    subject_ids = [os.path.basename(x)
                   for x in glob.glob(os.path.join(abide_data_dir,
                                                   subject_id_wildcard))]

    # sort the ids
    subject_ids.sort()

    ignored_subject_ids = []

    # producer subject data
    def subject_factory():
        for subject_id in subject_ids:
            subject_data = SubjectData()
            subject_data.subject_id = subject_id

            try:
                subject_data.func = glob.glob(
                    os.path.join(
                        abide_data_dir,
                        "%s/%s/scans/rest*/resources/NIfTI/files/rest.nii" % (
                            subject_id, subject_id)))[0]
            except IndexError:
                ignored_because = "no rest data found"
                print("Ignoring subject %s (%s)" % (subject_id,)
                                                    ignored_because)
                ignored_subject_ids.append((subject_id, ignored_because))
                continue

            try:
                subject_data.anat = glob.glob(
                    os.path.join(
                        abide_data_dir,
                        "%s/%s/scans/anat/resources/NIfTI/files/mprage.nii" % (
                            subject_id, subject_id)))[0]
            except IndexError:
                if do_dartel:
                    # can't do DARTEL in under such conditions
                    continue

                try:
                    subject_data.hires = glob.glob(
                        os.path.join(
                            abide_data_dir,
                            ("%s/%s/scans/hires/resources/NIfTI/"
                             "files/hires.nii") % (subject_id, subject_id)))[0]
                except IndexError:
                    ignored_because = "no anat/hires data found"
                    print("Ignoring subject %s (%s)" % (subject_id,)
                                                        ignored_because)
                    ignored_subject_ids.append((subject_id, ignored_because))
                    continue

            subject_data.output_dir = os.path.join(
                os.path.join(
                    institute_output_dir, subject_id))

            # yield data for this subject
            yield subject_data

    # do preprocessing proper
    report_filename = os.path.join(institute_output_dir,
                                   "_report.html")
    do_subjects_preproc(
        subject_factory(),
        dataset_id=institute_id,
        output_dir=institute_output_dir,
        do_report=do_report,
        do_dartel=do_dartel,
        dataset_description="%s" % DATASET_DESCRIPTION.replace(
            "%s",
            institute_id),
        report_filename=report_filename,
        do_shutdown_reloaders=True,)

    for subject_id, ignored_because in ignored_subject_ids:
        print("Ignored %s because %s" % (subject_id, ignored_because))
        subject_data.session_id = "haxby2001"

        # set func
        subject_data.func = [x for x in haxby_data.func if subject_id in x]

        assert len(subject_data.func) == 1
        subject_data.func = subject_data.func[0]

        # set anat
        subject_data.anat = [x for x in haxby_data.func if subject_id in x]
        assert len(subject_data.anat) == 1
        subject_data.anat = subject_data.anat[0]

        # set subject output directory
        subject_data.output_dir = os.path.join(OUTPUT_DIR,
                                               subject_data.subject_id)

        yield subject_data

# do preprocessing proper
results = do_subjects_preproc(
    subject_factory(),
    output_dir=OUTPUT_DIR,
    dataset_id="HAXBY 2001",
    do_realign=False,
    do_coreg=False,
    do_dartel=DO_DARTEL,
    do_cv_tc=False,
    dataset_description=DATASET_DESCRIPTION,
    )
Example #20
0
subject_data.func = _subject_data.func
subject_data.anat = _subject_data.anat

output_dir = os.path.join(_subject_data.data_dir, "pypreprocess_output")
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
subject_data.output_dir = os.path.join(
    output_dir, subject_data.subject_id)



"""preprocess the data"""
results = do_subjects_preproc(
    [subject_data],
    output_dir=output_dir,
    dataset_id="FSL FEEDS single-subject",
    dataset_description=DATASET_DESCRIPTION,
    do_shutdown_reloaders=False,
    )

"""collect preprocessed data"""
fmri_files = results[0]['func']
anat_file = results[0]['anat']

"""specify contrasts"""
_, matrix, names = check_design_matrix(design_matrix)
contrasts = {}
n_columns = len(names)
I = np.eye(len(names))
for i in xrange(2):
    contrasts['%s' % names[2 * i]] = I[2 * i]
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 13:48:51 2015

@author: [email protected]
"""


from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

jobfile = 'pipeline_1.ini'

# preprocess the data
results = do_subjects_preproc(jobfile)
"""Include pypreprocess for fMRI data preprocessing."""
import os
import sys
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

config = sys.argv[-1]
_, extension = os.path.splitext(config)

if extension != '.ini':
    print('%s is not .ini file' % config)
else:
    # preprocess the data
    results = do_subjects_preproc(config)
def preproc(jobfile):
    subject_data = do_subjects_preproc(jobfile, report=True)
    return subject_data
subject_data.func = _subject_data.func
subject_data.anat = _subject_data.anat

output_dir = os.path.join(_subject_data.data_dir, "pypreprocess_output")
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
subject_data.output_dir = os.path.join(
    output_dir, subject_data.subject_id)



"""preprocess the data"""
results = do_subjects_preproc(
    [subject_data],
    output_dir=output_dir,
    dataset_id="FSL FEEDS single-subject",
    dataset_description=DATASET_DESCRIPTION,
    do_shutdown_reloaders=False,
    )

"""collect preprocessed data"""
fmri_files = results[0]['func']
anat_file = results[0]['anat']

"""specify contrasts"""
_, matrix, names = check_design_matrix(design_matrix)
contrasts = {}
n_columns = len(names)
I = np.eye(len(names))
for i in xrange(2):
    contrasts['%s' % names[2 * i]] = I[2 * i]
        # set func
        subject_data.func = [x for x in haxby_data.func if subject_id in x]

        assert len(subject_data.func) == 1
        subject_data.func = subject_data.func[0]

        # set anat
        subject_data.anat = [x for x in haxby_data.func if subject_id in x]
        assert len(subject_data.anat) == 1
        subject_data.anat = subject_data.anat[0]

        # set subject output directory
        subject_data.output_dir = os.path.join(OUTPUT_DIR,
                                               subject_data.subject_id)

        yield subject_data


# do preprocessing proper
results = do_subjects_preproc(
    subject_factory(),
    output_dir=OUTPUT_DIR,
    dataset_id="HAXBY 2001",
    realign=False,
    coregister=False,
    dartel=DARTEL,
    cv_tc=False,
    dataset_description=DATASET_DESCRIPTION,
)
Example #26
0
duration = epoch_duration * np.ones(len(conditions))
onset = np.linspace(0, (len(conditions) - 1) * epoch_duration, len(conditions))
paradigm = pd.DataFrame({
    'onset': onset,
    'duration': duration,
    'name': conditions
})

hfcut = 2 * 2 * epoch_duration
fd = open(sd.func[0].split(".")[0] + "_onset.txt", "w")
for c, o, d in zip(conditions, onset, duration):
    fd.write("%s %s %s\r\n" % (c, o, d))
fd.close()

# preprocess the data
subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0]

# construct design matrix
nscans = len(subject_data.func[0])
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
drift_model = 'Cosine'
hrf_model = 'spm + derivative'
design_matrix = make_design_matrix(frametimes,
                                   paradigm,
                                   hrf_model=hrf_model,
                                   drift_model=drift_model,
                                   period_cut=hfcut)

# plot and save design matrix
ax = plot_design_matrix(design_matrix)
ax.set_position([.05, .25, .9, .65])
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 13:48:51 2015

@author: [email protected]
"""

from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

jobfile = 'pipeline_1.ini'

# preprocess the data
results = do_subjects_preproc(jobfile)
        subject_data = SubjectData()
        subject_data.subject_id = os.path.basename(os.path.dirname(
                os.path.dirname(scans)))
        subject_data.func = os.path.join(scans,
                                         "rest/resources/NIfTI/files/rest.nii")
        subject_data.anat = os.path.join(
            scans, "anat/resources/NIfTI/files/mprage.nii")
        subject_data.output_dir = os.path.join(ABIDE_OUTPUT_DIR,
                                               subject_data.subject_id)

        yield subject_data

# run preproc pipeline
do_subjects_preproc(_abide_factory(), fwhm=[8, 8, 8],
                    output_dir=ABIDE_OUTPUT_DIR,
                    dataset_id='ABIDE',
                    # do_report=False,
                    # do_dartel=True
                    )

if 0x0:
    for (with_anat, do_segment, do_normalize,
         fwhm, hard_link_output) in itertools.product(
        [False, True], [False, True], [False, True], [0, 8, [8, 8, 8]],
        [False, True]):
        # load spm auditory data

        sd = fetch_spm_auditory_data(os.path.join(
                os.environ['HOME'], 'CODE/datasets/spm_auditory'))
        subject_data1 = SubjectData(func=[sd.func],
                                    anat=sd.anat if with_anat else None)
        subject_data1.output_dir = "/tmp/kimbo/sub001/"
Example #29
0
"""

# standard imports
import sys
import os

# import API for preprocessing business
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc

# input data-grabber for SPM Auditory (single-subject) data
from pypreprocess.datasets import fetch_nyu_rest

# file containing configuration for preprocessing the data
this_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
jobfile = os.path.join(os.path.dirname(sys.argv[0]), "nyu_rest_preproc.ini")

# set dataset dir
if len(sys.argv) > 1:
    dataset_dir = sys.argv[1]
else:
    dataset_dir = os.path.join(this_dir, "nyu_rest")

# fetch spm auditory data
fetch_nyu_rest(data_dir=dataset_dir)

# preprocess the data
results = do_subjects_preproc(jobfile,
                              dataset_dir=os.path.join(dataset_dir,
                                                       "nyu_rest"))
assert len(results) == 1
        if len(fmri_file) > 0:
            delete_scans_fmri(fmri_file[0])


###
def clean_dirs_adni():
    """ Remove all processed data
    """
    fmri_paths = sorted(glob.glob(os.path.join(BASE_DIR, 's*')))
    for fmri_path in fmri_paths:
        fmri_file = glob.glob(os.path.join(fmri_path, 'func', 'z*.nii'))
        for f in fmri_file:
            os.remove(f)
            print f, 'removed'


"""
# Delete first 3 scans
clean_dirs_adni()
delete_scans_adni()
"""

jobfile = 'preprocess_sample_config.ini'
dataset_dir = BASE_DIR

# sourcing FSL
os.system('source /etc/fsl/4.1/fsl.sh')

# preprocess the data
results = do_subjects_preproc(jobfile, dataset_dir=BASE_DIR)
            '%s_%s_acq-highres_T1w.nii' % (subject, session))
        subject_data.func = []
        subject_data.output_dir = os.path.join(data_dir, subject, session,
                                               'anat', 'dartel')
        # yield data for this subject
        yield subject_data


# do preprocessing proper
report_filename = os.path.join(output_dir, '_report.html')

do_subjects_preproc(
    subject_factory(),
    dataset_id='ibc',
    output_dir=output_dir,
    do_report=True,
    do_dartel=True,
    dataset_description="ibc",
    report_filename=report_filename,
    do_shutdown_reloaders=True,
)

# Create mean images for masking and display
wanats = sorted(
    glob.glob(
        os.path.join(data_dir, 'sub-*', 'ses-*', 'anat', 'dartel',
                     'w*_ses-*_acq-highres_T1w.nii.gz')))
template = mean_img(wanats)
template.to_filename(os.path.join(output_dir, 'highres_T1avg.nii.gz'))

mgms = sorted(
    glob.glob(