Example #1
0
"""
:Author: DOHMATOB Elvis Dopgima
:Synopsis: single_subject_pipeline.py demo

"""

import os
from pypreprocess.datasets import fetch_spm_auditory_data
from pypreprocess.purepython_preproc_utils import do_subject_preproc
from pypreprocess.subject_data import SubjectData
import nibabel

# fetch data
sd = fetch_spm_auditory_data(os.path.join(os.path.abspath('.'),
                                          "spm_auditory"))
sd.output_dir = "/tmp/sub001"
sd.func = [sd.func]

# preproc data
do_subject_preproc(sd.__dict__, concat=False, coregister=True,
                   stc=True, cv_tc=True, realign=True,
                   report=True)
def _preprocess_and_analysis_subject(subject_data,
                                     do_normalize=False,
                                     fwhm=0.,
                                     slicer='z',
                                     cut_coords=6,
                                     threshold=3.,
                                     cluster_th=15
                                     ):
    """
    Preprocesses the subject and then fits (mass-univariate) GLM thereupon.

    """

    # sanitize run_ids:
    # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage,

    # for example
    run_ids = range(9)
    if subject_data['subject_id'] == "Sub14":
        run_ids = [0] + range(2, 9)
        subject_data['func'] = [subject_data['func'][0]] + subject_data[
            'func'][2:]
        subject_data['session_id'] = [subject_data['session_id'][0]
                                      ] + subject_data['session_id'][2:]

    # sanitize subject output dir
    if not 'output_dir' in subject_data:
        subject_data['output_dir'] = os.path.join(
            output_dir, subject_data['subject_id'])

    # preprocess the data
    subject_data = do_subject_preproc(SubjectData(**subject_data),
                                      do_realign=True,
                                      do_coreg=True,
                                      do_report=False,
                                      do_cv_tc=False
                                      )
    assert not subject_data.anat is None

    # norm
    if do_normalize:
        subject_data = nipype_do_subject_preproc(
            subject_data,
            do_realign=False,
            do_coreg=False,
            do_segment=True,
            do_normalize=True,
            func_write_voxel_sizes=[3, 3, 3],
            anat_write_voxel_sizes=[2, 2, 2],
            fwhm=fwhm,
            hardlink_output=False,
            do_report=False
            )

    # chronometry
    stats_start_time = pretty_time()

    # to-be merged lists, one item per run
    paradigms = []
    frametimes_list = []
    design_matrices = []  # one
    list_of_contrast_dicts = []  # one dict per run
    n_scans = []
    for run_id in run_ids:
        _n_scans = len(subject_data.func[run_id])
        n_scans.append(_n_scans)

        # make paradigm
        paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id])

        # make design matrix
        tr = 2.
        drift_model = 'Cosine'
        hrf_model = 'Canonical With Derivative'
        hfcut = 128.
        frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans)
        design_matrix = make_dmtx(
            frametimes,
            paradigm, hrf_model=hrf_model,
            drift_model=drift_model, hfcut=hfcut,
            add_regs=np.loadtxt(getattr(subject_data,
                                        'realignment_parameters')[run_id]),
            add_reg_names=[
                'Translation along x axis',
                'Translation along yaxis',
                'Translation along z axis',
                'Rotation along x axis',
                'Rotation along y axis',
                'Rotation along z axis'
                ]
            )

        # import matplotlib.pyplot as plt
        # design_matrix.show()
        # plt.show()

        paradigms.append(paradigm)
        design_matrices.append(design_matrix)
        frametimes_list.append(frametimes)
        n_scans.append(_n_scans)

        # specify contrasts
        contrasts = {}
        n_columns = len(design_matrix.names)
        for i in xrange(paradigm.n_conditions):
            contrasts['%s' % design_matrix.names[2 * i]] = np.eye(
                n_columns)[2 * i]

        # more interesting contrasts"""
        contrasts['Famous-Unfamiliar'] = contrasts[
            'Famous'] - contrasts['Unfamiliar']
        contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar']
        contrasts['Famous-Scrambled'] = contrasts[
            'Famous'] - contrasts['Scrambled']
        contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled']
        contrasts['Unfamiliar-Scrambled'] = contrasts[
            'Unfamiliar'] - contrasts['Scrambled']
        contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled']

        list_of_contrast_dicts.append(contrasts)

    # importat maps
    z_maps = {}
    effects_maps = {}

    # fit GLM
    print('\r\nFitting a GLM (this takes time) ..')
    fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func)
                                for sess_func in subject_data.func],
                               [design_matrix.matrix
                                for design_matrix in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    print "... done.\r\n"

    # save computed mask
    mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")

    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)
    print "... done.\r\n"

    # replicate contrasts across runs
    contrasts = dict((cid, [contrasts[cid]
                            for contrasts in list_of_contrast_dicts])
                     for cid, cval in contrasts.iteritems())

    # compute effects
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, eff_map = fmri_glm.contrast(
            contrast_val,
            con_id=contrast_id,
            output_z=True,
            output_stat=False,
            output_effects=True,
            output_variance=False
            )

        # store stat maps to disk
        for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]):
            map_dir = os.path.join(
                subject_data.output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path

            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # remove repeated contrasts
    contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems())

    # do stats report
    stats_report_filename = os.path.join(getattr(subject_data,
                                                 'reports_output_dir',
                                                 subject_data.output_dir),
                                         "report_stats.html")
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        threshold=threshold,
        cluster_th=cluster_th,
        slicer=slicer,
        cut_coords=cut_coords,
        anat=nibabel.load(subject_data.anat).get_data(),
        anat_affine=nibabel.load(subject_data.anat).get_affine(),
        design_matrices=design_matrices,
        subject_id=subject_data.subject_id,
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_data.subject_id,

        # additional ``kwargs`` for more informative report
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        drift_model=drift_model,
        hrf_model=hrf_model,
        paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id].__dict__)
                      for run_id in run_ids),
        frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id])
                        for run_id in run_ids),
        # fwhm=fwhm
        )

    ProgressReport().finish_dir(subject_data.output_dir)
    print "\r\nStatistic report written to %s\r\n" % stats_report_filename

    return contrasts, effects_maps, z_maps, mask_path
"""
Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr
Synopsis: single_subject_pipeline.py demo
"""

from pypreprocess.datasets import fetch_spm_multimodal_fmri
from pypreprocess.purepython_preproc_utils import do_subject_preproc

# fetch data
sd = fetch_spm_multimodal_fmri()
sd.output_dir = "/tmp/sub001"
sd.func = [sd.func1, sd.func2]

# preproc data
do_subject_preproc(sd.__dict__,
                   concat=False,
                   coregister=True,
                   stc=True,
                   tsdiffana=True,
                   realign=True,
                   report=True,
                   reslice=True)
"""
Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr
Synopsis: single_subject_pipeline.py demo
"""

from pypreprocess.datasets import fetch_spm_multimodal_fmri
from pypreprocess.purepython_preproc_utils import do_subject_preproc

# fetch data
sd = fetch_spm_multimodal_fmri()
sd.output_dir = "/tmp/sub001"
sd.func = [sd.func1, sd.func2]

# preproc data
do_subject_preproc(sd.__dict__, concat=False, coregister=True, stc=True,
                   tsdiffana=True, realign=True, report=True, reslice=True)
if __name__ == "__main__":

    for subject in subjects:
        subject_dir = os.path.join(work_dir, subject)
        t1_dir = os.path.join(subject_dir, 't1')
        fmri_dir = os.path.join(subject_dir, 'fmri')
        anat_image = glob.glob(os.path.join(t1_dir, 'anat*.nii'))[0]
        fmri_images = glob.glob(os.path.join(fmri_dir,
                                             'visualcategs/visu*.nii'))
        fmri_images += glob.glob(os.path.join(fmri_dir,
                                              'audiosentence/audio*.nii'))
        fmri_images += glob.glob(os.path.join(fmri_dir,
                                              'localizer/loc*.nii'))
        
        preproc_dict = {
            'n_sessions': len(fmri_images),
            'func': fmri_images,
            'anat': anat_image,
            'subject_id': subject,
            'output_dir': fmri_dir
        }

#        do_subject_preproc(preproc_dict, concat=False, do_coreg=True,
#                           do_stc=False, do_cv_tc=True, do_realign=True,
#                           do_report=True)
        do_subject_preproc(preproc_dict, 
                             coregister=True,
                             cv_tc=0, 
                             realign=True,   
                             report=True,) 
import os
from pypreprocess.datasets import fetch_spm_auditory_data
from pypreprocess.purepython_preproc_utils import do_subject_preproc
import nibabel

# fetch data
sd = fetch_spm_auditory_data(os.path.join(os.path.abspath('.'),
                                          "spm_auditory"))

# preproc data
for flag in xrange(3):
    for pre_concat in [True, False]:
        # pack data into dict, the format understood by the pipeleine
        subject_data = {
            'n_sessions': 1,  # number of sessions
            'func': [sd.func],  # functional (BOLD) images 1 item/session
            'anat': sd.anat,  # anatomical (structural) image
            'subject_id': 'sub001',
            'output_dir': os.path.abspath('spm_auditory_preproc')
            }
        if pre_concat:
            subject_data['func'] = [nibabel.concat_images(sess_func,
                                                       check_affines=False)
                                 for sess_func in subject_data['func']]
            do_subject_preproc(subject_data,
                       stc=True,
                       fwhm=[8] * 3,
                       write_output_images=flag
                       )
def _preprocess_and_analysis_subject(subject_data,
                                     do_normalize=False,
                                     fwhm=0.,
                                     slicer='z',
                                     cut_coords=6,
                                     threshold=3.,
                                     cluster_th=15):
    """
    Preprocesses the subject and then fits (mass-univariate) GLM thereupon.

    """

    # sanitize run_ids:
    # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage,

    # for example
    run_ids = range(9)
    if subject_data['subject_id'] == "Sub14":
        run_ids = [0] + range(2, 9)
        subject_data['func'] = [subject_data['func'][0]
                                ] + subject_data['func'][2:]
        subject_data['session_id'] = [subject_data['session_id'][0]
                                      ] + subject_data['session_id'][2:]

    # sanitize subject output dir
    if not 'output_dir' in subject_data:
        subject_data['output_dir'] = os.path.join(output_dir,
                                                  subject_data['subject_id'])

    # preprocess the data
    subject_data = do_subject_preproc(SubjectData(**subject_data),
                                      do_realign=True,
                                      do_coreg=True,
                                      do_report=False,
                                      do_cv_tc=False)
    assert not subject_data.anat is None

    # norm
    if do_normalize:
        subject_data = nipype_do_subject_preproc(
            subject_data,
            do_realign=False,
            do_coreg=False,
            do_segment=True,
            do_normalize=True,
            func_write_voxel_sizes=[3, 3, 3],
            anat_write_voxel_sizes=[2, 2, 2],
            fwhm=fwhm,
            hardlink_output=False,
            do_report=False)

    # chronometry
    stats_start_time = pretty_time()

    # to-be merged lists, one item per run
    paradigms = []
    frametimes_list = []
    design_matrices = []  # one
    list_of_contrast_dicts = []  # one dict per run
    n_scans = []
    for run_id in run_ids:
        _n_scans = len(subject_data.func[run_id])
        n_scans.append(_n_scans)

        # make paradigm
        paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id])

        # make design matrix
        tr = 2.
        drift_model = 'Cosine'
        hrf_model = 'Canonical With Derivative'
        hfcut = 128.
        frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans)
        design_matrix = make_dmtx(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            hfcut=hfcut,
            add_regs=np.loadtxt(
                getattr(subject_data, 'realignment_parameters')[run_id]),
            add_reg_names=[
                'Translation along x axis', 'Translation along yaxis',
                'Translation along z axis', 'Rotation along x axis',
                'Rotation along y axis', 'Rotation along z axis'
            ])

        # import matplotlib.pyplot as plt
        # design_matrix.show()
        # plt.show()

        paradigms.append(paradigm)
        design_matrices.append(design_matrix)
        frametimes_list.append(frametimes)
        n_scans.append(_n_scans)

        # specify contrasts
        contrasts = {}
        n_columns = len(design_matrix.names)
        for i in xrange(paradigm.n_conditions):
            contrasts['%s' %
                      design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

        # more interesting contrasts"""
        contrasts['Famous-Unfamiliar'] = contrasts['Famous'] - contrasts[
            'Unfamiliar']
        contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar']
        contrasts[
            'Famous-Scrambled'] = contrasts['Famous'] - contrasts['Scrambled']
        contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled']
        contrasts['Unfamiliar-Scrambled'] = contrasts[
            'Unfamiliar'] - contrasts['Scrambled']
        contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled']

        list_of_contrast_dicts.append(contrasts)

    # importat maps
    z_maps = {}
    effects_maps = {}

    # fit GLM
    print('\r\nFitting a GLM (this takes time) ..')
    fmri_glm = FMRILinearModel(
        [nibabel.concat_images(sess_func) for sess_func in subject_data.func],
        [design_matrix.matrix for design_matrix in design_matrices],
        mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    print "... done.\r\n"

    # save computed mask
    mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")

    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)
    print "... done.\r\n"

    # replicate contrasts across runs
    contrasts = dict(
        (cid, [contrasts[cid] for contrasts in list_of_contrast_dicts])
        for cid, cval in contrasts.iteritems())

    # compute effects
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, eff_map = fmri_glm.contrast(contrast_val,
                                           con_id=contrast_id,
                                           output_z=True,
                                           output_stat=False,
                                           output_effects=True,
                                           output_variance=False)

        # store stat maps to disk
        for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]):
            map_dir = os.path.join(subject_data.output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path

            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # remove repeated contrasts
    contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems())

    # do stats report
    stats_report_filename = os.path.join(
        getattr(subject_data, 'reports_output_dir', subject_data.output_dir),
        "report_stats.html")
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        threshold=threshold,
        cluster_th=cluster_th,
        slicer=slicer,
        cut_coords=cut_coords,
        anat=nibabel.load(subject_data.anat).get_data(),
        anat_affine=nibabel.load(subject_data.anat).get_affine(),
        design_matrices=design_matrices,
        subject_id=subject_data.subject_id,
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_data.subject_id,

        # additional ``kwargs`` for more informative report
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        drift_model=drift_model,
        hrf_model=hrf_model,
        paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id].__dict__)
                      for run_id in run_ids),
        frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id])
                        for run_id in run_ids),
        # fwhm=fwhm
    )

    ProgressReport().finish_dir(subject_data.output_dir)
    print "\r\nStatistic report written to %s\r\n" % stats_report_filename

    return contrasts, effects_maps, z_maps, mask_path
:Author: DOHMATOB Elvis Dopgima
:Synopsis: single_subject_pipeline.py demo

"""

import os
from pypreprocess.datasets import fetch_spm_auditory_data
from pypreprocess.purepython_preproc_utils import do_subject_preproc
from pypreprocess._spike.pipeline_comparisons import execute_spm_auditory_glm

# fetch data
sd = fetch_spm_auditory_data(os.path.join(os.environ['HOME'],
                                          "CODE/datasets/spm_auditory"))

# pack data into dict, the format understood by the pipeleine
subject_data = {'n_sessions': 1,  # number of sessions
                'func': [sd.func],  # functional (BOLD) images 1 item/session
                'anat': sd.anat,  # anatomical (structural) image
                'subject_id': 'sub001',
                'output_dir': os.path.abspath('spm_auditory_preproc')
                }

# preproc data
subject_data = do_subject_preproc(subject_data,
                                  do_stc=True,
                                  fwhm=[8] * 3
                                  )

# run glm on data
execute_spm_auditory_glm(subject_data)