Exemple #1
0
def first_level(subject_dic):
    # experimental paradigm meta-params
    stats_start_time = time.ctime()
    tr = 2.4
    drift_model = 'blank'
    hrf_model = 'canonical'  # hemodynamic reponse function
    hfcut = 128.
    n_scans = 128

    # make design matrices
    mask_images = []
    design_matrices = []
    fmri_files = subject_dic['func']

    for x in xrange(len(fmri_files)):
        paradigm = paradigm_contrasts.localizer_paradigm()

        # build design matrix
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        design_matrix = make_dmtx(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            hfcut=hfcut,
        )
        design_matrices.append(design_matrix)

    # Specify contrasts
    contrasts = paradigm_contrasts.localizer_contrasts(design_matrix)

    #create output directory
    subject_session_output_dir = os.path.join(subject_dic['output_dir'],
                                              'res_stats')

    if not os.path.exists(subject_session_output_dir):
        os.makedirs(subject_session_output_dir)

    # Fit GLM
    print 'Fitting a GLM (this takes time)...'
    fmri_glm = FMRILinearModel(
        fmri_files,
        [design_matrix.matrix for design_matrix in design_matrices],
        mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz")
    print "Saving mask image %s" % mask_path
    nibabel.save(fmri_glm.mask, mask_path)
    mask_images.append(mask_path)

    # compute contrasts
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * 1,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)

        # store stat maps to disk
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_session_output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir,
                '%s%s.nii.gz' % (subject_dic['subject_id'], contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # do stats report
    anat_img = nibabel.load(subject_dic['anat'])
    stats_report_filename = os.path.join(subject_session_output_dir,
                                         "report_stats.html")

    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        threshold=2.3,
        cluster_th=15,
        anat=anat_img.get_data(),
        anat_affine=anat_img.get_affine(),
        design_matrices=design_matrix,
        subject_id="sub001",
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_dic['session_id'],

        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
    )

    ProgressReport().finish_dir(subject_session_output_dir)
    print "Statistic report written to %s\r\n" % stats_report_filename
    return z_maps
Exemple #2
0
# do stats report
anat_img = nibabel.load(subject_data.anat)
stats_report_filename = os.path.join(subject_data.output_dir, "reports",
                                     "report_stats.html")
generate_subject_stats_report(
    stats_report_filename,
    contrasts,
    z_maps,
    fmri_glm.mask,
    threshold=2.3,
    cluster_th=15,
    anat=anat_img.get_data(),
    anat_affine=anat_img.get_affine(),
    design_matrices=design_matrix,
    subject_id="sub001",
    start_time=stats_start_time,
    title="GLM for subject %s" % subject_data.subject_id,

    # additional ``kwargs`` for more informative report
    paradigm=paradigm.__dict__,
    TR=tr,
    n_scans=n_scans,
    hfcut=hfcut,
    frametimes=frametimes,
    drift_model=drift_model,
    hrf_model=hrf_model,
)

ProgressReport().finish_dir(subject_data.output_dir)
print "Statistic report written to %s\r\n" % stats_report_filename
Exemple #3
0
reports_dir = os.path.join(subject_data.output_dir, "reports")
stats_report_filename = os.path.join(reports_dir, "report_stats.html")
contrasts = dict((contrast_id, contrasts[contrast_id])
                 for contrast_id in z_maps.keys())
generate_subject_stats_report(
    stats_report_filename,
    contrasts,
    z_maps,
    fmri_glm.masker_.mask_img_,
    design_matrices=[design_matrix],
    subject_id=subject_data.subject_id,
    anat=anat_file,
    cluster_th=50,  # we're only interested in this 'large' clusters
    start_time=stats_start_time,

    # additional ``kwargs`` for more informative report
    paradigm=paradigm,
    TR=TR,
    n_scans=n_scans,
    hfcut=hfcut,
    frametimes=frametimes,
    drift_model=drift_model,
    hrf_model=hrf_model,
    slicer='z'
    )

# shutdown main report page
ProgressReport().finish_dir(output_dir)

print "\r\nStatistic report written to %s\r\n" % stats_report_filename
Exemple #4
0
def first_level(subject_id):
    '''
    Launch the first level analysis for one subject
    ROOTDIR is an variable of environnmenent where your data are stored.
    ROOTDIR needs to be set up by the user.
    See: https://github.com/neurospin/pypreprocess/
    Tape: python first_level.py
    

    Keyword arguments:
    subject_id -- Name of the subject
    '''

    # Configure paths
    data_dir = os.path.join(os.environ["ROOTDIR"], "dataset", "bids_dataset",
                            subject_id)
    output_dir = os.path.join(os.environ["ROOTDIR"], "processed_data",
                              subject_id)
    subject_session_output_dir = os.path.join(output_dir, 'res_stats')
    if not os.path.exists(subject_session_output_dir):
        os.makedirs(subject_session_output_dir)

    # Experimental paradigm meta-params
    stats_start_time = time.ctime()
    tr = 2.4
    drift_model = 'blank'
    #hrf_model = 'canonical'  # hemodynamic reponse function
    hrf_model = 'spm'  # hemodynamic reponse function
    hfcut = 128.
    n_scans = 128

    # Preparation of paradigm
    events_file = glob.glob(
        os.path.join(data_dir, 'func/*_task-standartloc_events.tsv'))[0]
    print events_file
    paradigm = paradigm_contrasts.localizer_paradigm(events_file)

    # Build design matrix
    frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    design_matrix = make_design_matrix(
        frametimes,
        paradigm,
        hrf_model=hrf_model,
        drift_model=drift_model,
        period_cut=hfcut,
    )

    # Specify contrasts
    contrasts = paradigm_contrasts.localizer_contrasts(design_matrix)

    # Fit GLM
    fmri_file = glob.glob(
        os.path.join(output_dir, 'func/wra*_task-standartloc_bold.nii.gz'))[0]
    print 'Fitting a GLM (this takes time)...'
    #    fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_files[0],
    #                           [design_matrix for design_matrix in design_matrices]
    #                           )
    #    fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_file,
    #                               [design_matrix for design_matrix in design_matrices]
    #                               )
    fmri_glm = FirstLevelGLM(noise_model='ar1',
                             standardize=False).fit(fmri_file, design_matrix)

    # Save computed mask
    mask_images = []
    mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz")
    nibabel.save(fmri_glm.masker_.mask_img_, mask_path)
    mask_images.append(mask_path)

    # Compute contrasts
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.transform(
            [contrast_val] * 1,
            contrast_name=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True)

        # Store stat maps to disk
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_session_output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir,
                                    '%s%s.nii.gz' % (subject_id, contrast_id))
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # Do stats report
    anat_file = glob.glob(os.path.join(output_dir, 'anat/w*_T1w.nii.gz'))[0]
    anat_img = nibabel.load(anat_file)
    stats_report_filename = os.path.join(subject_session_output_dir,
                                         "report_stats.html")
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.masker_.mask_img_,
        threshold=2.3,
        cluster_th=15,
        anat=anat_img,
        anat_affine=anat_img.get_affine(),
        design_matrices=[design_matrix],
        paradigm=paradigm,
        subject_id=subject_id,
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_id,
        # additional ``kwargs`` for more informative report
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
    )

    ProgressReport().finish_dir(subject_session_output_dir)
    print "Statistic report written to %s\r\n" % stats_report_filename
    return z_maps
def run_suject_level1_glm(
        subject_data,
        readout_time=.01392,  # seconds
        tr=.72,
        dc=True,
        hrf_model="Canonical with Derivative",
        drift_model="Cosine",
        hfcut=100,
        regress_motion=True,
        slicer='ortho',
        cut_coords=None,
        threshold=3.,
        cluster_th=15,
        normalize=True,
        fwhm=0.,
        protocol="MOTOR",
        func_write_voxel_sizes=None,
        anat_write_voxel_sizes=None,
        **other_preproc_kwargs):
    """
    Function to do preproc + analysis for a single HCP subject (task fMRI)

    """

    add_regs_files = None
    n_motion_regressions = 6
    subject_data.n_sessions = 2

    subject_data.tmp_output_dir = os.path.join(subject_data.output_dir, "tmp")
    if not os.path.exists(subject_data.tmp_output_dir):
        os.makedirs(subject_data.tmp_output_dir)

    if not os.path.exists(subject_data.output_dir):
        os.makedirs(subject_data.output_dir)

    mem = Memory(os.path.join(subject_data.output_dir, "cache_dir"),
                 verbose=100)

    # glob design files (.fsf)
    subject_data.design_files = [
        os.path.join(subject_data.data_dir,
                     ("MNINonLinear/Results/tfMRI_%s_%s/"
                      "tfMRI_%s_%s_hp200_s4_level1.fsf") %
                     (protocol, direction, protocol, direction))
        for direction in ['LR', 'RL']
    ]

    assert len(subject_data.design_files) == 2
    for df in subject_data.design_files:
        assert os.path.isfile(df), df

    if 0x0:
        subject_data = _do_fmri_distortion_correction(
            subject_data,
            dc=dc,
            fwhm=fwhm,
            readout_time=readout_time,
            **other_preproc_kwargs)

    # chronometry
    stats_start_time = pretty_time()

    # merged lists
    paradigms = []
    frametimes_list = []
    design_matrices = []
    # fmri_files = []
    n_scans = []
    # for direction, direction_index in zip(['LR', 'RL'], xrange(2)):
    for sess in xrange(subject_data.n_sessions):
        direction = ['LR', 'RL'][sess]
        # glob the design file
        # design_file = os.path.join(# _subject_data_dir, "tfMRI_%s_%s" % (
        # protocol, direction),
        design_file = subject_data.design_files[sess]
        #                    "tfMRI_%s_%s_hp200_s4_level1.fsf" % (
        # protocol, direction))
        if not os.path.isfile(design_file):
            print "Can't find design file %s; skipping subject %s" % (
                design_file, subject_data.subject_id)
            return

        # read the experimental setup
        print "Reading experimental setup from %s ..." % design_file
        fsl_condition_ids, timing_files, fsl_contrast_ids, contrast_values = \
            read_fsl_design_file(design_file)
        print "... done.\r\n"

        # fix timing filenames
        timing_files = [
            tf.replace("EVs", "tfMRI_%s_%s/EVs" % (protocol, direction))
            for tf in timing_files
        ]

        # make design matrix
        print "Constructing design matrix for direction %s ..." % direction
        _n_scans = nibabel.load(subject_data.func[sess]).shape[-1]
        n_scans.append(_n_scans)
        add_regs_file = add_regs_files[
            sess] if not add_regs_files is None else None
        design_matrix, paradigm, frametimes = make_dmtx_from_timing_files(
            timing_files,
            fsl_condition_ids,
            n_scans=_n_scans,
            tr=tr,
            hrf_model=hrf_model,
            drift_model=drift_model,
            hfcut=hfcut,
            add_regs_file=add_regs_file,
            add_reg_names=[
                'Translation along x axis', 'Translation along yaxis',
                'Translation along z axis', 'Rotation along x axis',
                'Rotation along y axis', 'Rotation along z axis',
                'Differential Translation along x axis',
                'Differential Translation along yaxis',
                'Differential Translation along z axis',
                'Differential Rotation along x axis',
                'Differential Rotation along y axis',
                'Differential Rotation along z axis'
            ][:n_motion_regressions] if not add_regs_files is None else None,
        )

        print "... done."
        paradigms.append(paradigm)
        frametimes_list.append(frametimes)
        design_matrices.append(design_matrix)

        # convert contrasts to dict
        contrasts = dict((
            contrast_id,
            # append zeros to end of contrast to match design
            np.hstack((
                contrast_value,
                np.zeros(len(design_matrix.names) - len(contrast_value)))))
                         for contrast_id, contrast_value in zip(
                             fsl_contrast_ids, contrast_values))

        # more interesting contrasts
        if protocol == 'MOTOR':
            contrasts['RH-LH'] = contrasts['RH'] - contrasts['LH']
            contrasts['LH-RH'] = -contrasts['RH-LH']
            contrasts['RF-LF'] = contrasts['RF'] - contrasts['LF']
            contrasts['LF-RF'] = -contrasts['RF-LF']
            contrasts['H'] = contrasts['RH'] + contrasts['LH']
            contrasts['F'] = contrasts['RF'] + contrasts['LF']
            contrasts['H-F'] = contrasts['RH'] + contrasts['LH'] - (
                contrasts['RF'] - contrasts['LF'])
            contrasts['F-H'] = -contrasts['H-F']

        contrasts = dict((k, v) for k, v in contrasts.iteritems() if "-" in k)

    # replicate contrasts across sessions
    contrasts = dict((cid, [cval] * 2) for cid, cval in contrasts.iteritems())

    cache_dir = cache_dir = os.path.join(subject_data.output_dir, 'cache_dir')
    if not os.path.exists(cache_dir):
        os.makedirs(cache_dir)
    nipype_mem = NipypeMemory(base_dir=cache_dir)

    if 0x0:
        if np.sum(fwhm) > 0.:
            subject_data.func = nipype_mem.cache(spm.Smooth)(
                in_files=subject_data.func,
                fwhm=fwhm,
                ignore_exception=False,
            ).outputs.smoothed_files

    # fit GLM
    def tortoise(*args):
        print args
        print(
            'Fitting a "Fixed Effect" GLM for merging LR and RL '
            'phase-encoding directions for subject %s ...' %
            (subject_data.subject_id))
        fmri_glm = FMRILinearModel(
            subject_data.func,
            [design_matrix.matrix for design_matrix in design_matrices],
            mask='compute')
        fmri_glm.fit(do_scaling=True, model='ar1')
        print "... done.\r\n"

        # save computed mask
        mask_path = os.path.join(subject_data.output_dir, "mask.nii")
        print "Saving mask image to %s ..." % mask_path
        nibabel.save(fmri_glm.mask, mask_path)
        print "... done.\r\n"

        z_maps = {}
        effects_maps = {}
        map_dirs = {}
        for contrast_id, contrast_val in contrasts.iteritems():
            print "\tcontrast id: %s" % contrast_id
            z_map, eff_map = fmri_glm.contrast(contrast_val,
                                               con_id=contrast_id,
                                               output_z=True,
                                               output_effects=True)

            # store stat maps to disk
            for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]):
                map_dir = os.path.join(subject_data.output_dir,
                                       '%s_maps' % map_type)
                map_dirs[map_type] = map_dir
                if not os.path.exists(map_dir):
                    os.makedirs(map_dir)
                map_path = os.path.join(map_dir,
                                        '%s_%s.nii' % (map_type, contrast_id))
                print "\t\tWriting %s ..." % map_path

                nibabel.save(out_map, map_path)

                # collect zmaps for contrasts we're interested in
                if map_type == 'z':
                    z_maps[contrast_id] = map_path

                if map_type == 'effects':
                    effects_maps[contrast_id] = map_path

        return effects_maps, z_maps, mask_path, map_dirs

    # compute native-space maps and mask
    effects_maps, z_maps, mask_path, map_dirs = mem.cache(tortoise)(
        subject_data.func, subject_data.anat)

    # do stats report
    if 0x0:
        anat_img = nibabel.load(subject_data.anat)
        stats_report_filename = os.path.join(subject_data.output_dir,
                                             "reports", "report_stats.html")
        generate_subject_stats_report(
            stats_report_filename,
            contrasts,
            z_maps,
            nibabel.load(mask_path),
            anat=anat_img.get_data(),
            anat_affine=anat_img.get_affine(),
            threshold=threshold,
            cluster_th=cluster_th,
            slicer=slicer,
            cut_coords=cut_coords,
            design_matrices=design_matrices,
            subject_id=subject_data.subject_id,
            start_time=stats_start_time,
            title="GLM for subject %s" % subject_data.subject_id,

            # additional ``kwargs`` for more informative report
            TR=tr,
            n_scans=n_scans,
            hfcut=hfcut,
            drift_model=drift_model,
            hrf_model=hrf_model,
            paradigm={
                'LR': paradigms[0].__dict__,
                'RL': paradigms[1].__dict__
            },
            frametimes={
                'LR': frametimes_list[0],
                'RL': frametimes_list[1]
            },
            fwhm=fwhm)

        ProgressReport().finish_dir(subject_data.output_dir)
        print "\r\nStatistic report written to %s\r\n" % stats_report_filename

    # remove repeated contrasts
    contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems())
    import json
    json.dump(
        dict((k, list(v)) for k, v in contrasts.iteritems()),
        open(os.path.join(subject_data.tmp_output_dir, "contrasts.json"), "w"))
    subject_data.contrasts = contrasts

    if normalize:
        assert hasattr(subject_data, "parameter_file")

        subject_data.native_effects_maps = effects_maps
        subject_data.native_z_maps = z_maps
        subject_data.native_mask_path = mask_path

        # warp effects maps and mask from native to standard space (MNI)
        apply_to_files = [
            v for _, v in subject_data.native_effects_maps.iteritems()
        ] + [subject_data.native_mask_path]
        tmp = nipype_mem.cache(spm.Normalize)(
            parameter_file=getattr(subject_data, "parameter_file"),
            apply_to_files=apply_to_files,
            write_bounding_box=[[-78, -112, -50], [78, 76, 85]],
            write_voxel_sizes=func_write_voxel_sizes,
            write_wrap=[0, 0, 0],
            write_interp=1,
            jobtype='write',
            ignore_exception=False,
        ).outputs.normalized_files

        subject_data.mask = hard_link(tmp[-1], subject_data.output_dir)
        subject_data.effects_maps = dict(
            zip(effects_maps.keys(), hard_link(tmp[:-1], map_dirs["effects"])))

        # warp anat image
        subject_data.anat = hard_link(
            nipype_mem.cache(spm.Normalize)(
                parameter_file=getattr(subject_data, "parameter_file"),
                apply_to_files=subject_data.anat,
                write_bounding_box=[[-78, -112, -50], [78, 76, 85]],
                write_voxel_sizes=anat_write_voxel_sizes,
                write_wrap=[0, 0, 0],
                write_interp=1,
                jobtype='write',
                ignore_exception=False,
            ).outputs.normalized_files, subject_data.anat_output_dir)
    else:
        subject_data.mask = mask_path
        subject_data.effects_maps = effects_maps
        subject_data.z_maps = z_maps

    return subject_data
Exemple #6
0
def first_level(subject_dic,
                additional_regressors=None,
                compcorr=False,
                smooth=None,
                surface=False,
                mask_img=None):
    """ Run the first-level analysis (GLM fitting + statistical maps)
    in a given subject

    Parameters
    ----------
    subject_dic: dict,
                 exhaustive description of an individual acquisition
    additional_regressors: dict or None,
                 additional regressors provided as an already sampled
                 design_matrix
                 dictionary keys are session_ids
    compcorr: Bool, optional,
              whether confound estimation and removal should be done or not
    smooth: float or None, optional,
            how much the data should spatially smoothed during masking
    """
    start_time = time.ctime()
    # experimental paradigm meta-params
    motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_model = subject_dic['hrf_model']
    hfcut = subject_dic['hfcut']
    drift_model = subject_dic['drift_model']
    tr = subject_dic['TR']

    if not surface and (mask_img is None):
        mask_img = masking(subject_dic['func'], subject_dic['output_dir'])

    if additional_regressors is None:
        additional_regressors = dict([
            (session_id, None) for session_id in subject_dic['session_id']
        ])

    for session_id, fmri_path, onset, motion_path in zip(
            subject_dic['session_id'], subject_dic['func'],
            subject_dic['onset'], subject_dic['realignment_parameters']):

        paradigm_id = _session_id_to_task_id([session_id])[0]

        if surface:
            from nibabel.gifti import read
            n_scans = np.array(
                [darrays.data for darrays in read(fmri_path).darrays]).shape[0]
        else:
            n_scans = nib.load(fmri_path).shape[3]

        # motion parameters
        motion = np.loadtxt(motion_path)
        # define the time stamps for different images
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        if paradigm_id == 'audio':
            mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1])
            n_cycles = 28
            cycle_duration = 20
            t_r = 2
            cycle = np.arange(0, cycle_duration, t_r)[mask > 0]
            frametimes = np.tile(cycle, n_cycles) +\
                np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum())
            frametimes = frametimes[:-2]  # for some reason...

        if surface:
            compcorr = False  # XXX Fixme

        if compcorr:
            confounds = high_variance_confounds(fmri_path, mask_img=mask_img)
            confounds = np.hstack((confounds, motion))
            confound_names = ['conf_%d' % i for i in range(5)] + motion_names
        else:
            confounds = motion
            confound_names = motion_names

        if onset is None:
            warnings.warn('Onset file not provided. Trying to guess it')
            task = os.path.basename(fmri_path).split('task')[-1][4:]
            onset = os.path.join(
                os.path.split(os.path.dirname(fmri_path))[0], 'model001',
                'onsets', 'task' + task + '_run001', 'task%s.csv' % task)

        if not os.path.exists(onset):
            warnings.warn('non-existant onset file. proceeding without it')
            paradigm = None
        else:
            paradigm = make_paradigm(onset, paradigm_id)

        # handle manually supplied regressors
        add_reg_names = []
        if additional_regressors[session_id] is None:
            add_regs = confounds
        else:
            df = read_csv(additional_regressors[session_id])
            add_regs = []
            for regressor in df:
                add_reg_names.append(regressor)
                add_regs.append(df[regressor])
            add_regs = np.array(add_regs).T
            add_regs = np.hstack((add_regs, confounds))

        add_reg_names += confound_names

        # create the design matrix
        design_matrix = make_first_level_design_matrix(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            period_cut=hfcut,
            add_regs=add_regs,
            add_reg_names=add_reg_names)
        _, dmtx, names = check_design_matrix(design_matrix)

        # create the relevant contrasts
        contrasts = make_contrasts(paradigm_id, names)

        if surface:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_surf_%s' % session_id)
        else:
            subject_session_output_dir = os.path.join(
                subject_dic['output_dir'], 'res_stats_%s' % session_id)

        if not os.path.exists(subject_session_output_dir):
            os.makedirs(subject_session_output_dir)
        np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'),
                 design_matrix=design_matrix)

        if surface:
            run_surface_glm(design_matrix, contrasts, fmri_path,
                            subject_session_output_dir)
        else:
            z_maps = run_glm(design_matrix,
                             contrasts,
                             fmri_path,
                             mask_img,
                             subject_dic,
                             subject_session_output_dir,
                             tr=tr,
                             smoothing_fwhm=smooth)

            # do stats report
            anat_img = nib.load(subject_dic['anat'])
            stats_report_filename = os.path.join(subject_session_output_dir,
                                                 'report_stats.html')

            generate_subject_stats_report(
                stats_report_filename,
                contrasts,
                z_maps,
                mask_img,
                threshold=3.,
                cluster_th=15,
                anat=anat_img,
                anat_affine=anat_img.affine,
                design_matrices=[design_matrix],
                subject_id=subject_dic['subject_id'],
                start_time=start_time,
                title="GLM for subject %s" % session_id,
                # additional ``kwargs`` for more informative report
                TR=tr,
                n_scans=n_scans,
                hfcut=hfcut,
                frametimes=frametimes,
                drift_model=drift_model,
                hrf_model=hrf_model,
            )
    if not surface:
        ProgressReport().finish_dir(subject_session_output_dir)
        print("Statistic report written to %s\r\n" % stats_report_filename)
Exemple #7
0
def _preprocess_and_analysis_subject(subject_data,
                                     slicer='z',
                                     cut_coords=6,
                                     threshold=3.,
                                     cluster_th=15,
                                     **preproc_params):
    """
    Preprocesses the subject and then fits (mass-univariate) GLM thereup.

    """

    # sanitize run_ids:
    # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage,

    # for example
    run_ids = range(9)
    if subject_data['subject_id'] == "Sub14":
        run_ids = [0] + range(2, 9)
        subject_data['func'] = [subject_data['func'][0]
                                ] + subject_data['func'][2:]
        subject_data['session_id'] = [subject_data['session_id'][0]
                                      ] + subject_data['session_id'][2:]

    # sanitize subject output dir
    if not 'output_dir' in subject_data:
        subject_data['output_dir'] = os.path.join(output_dir,
                                                  subject_data['subject_id'])

    # preprocess the data
    subject_data = do_subject_preproc(subject_data, **preproc_params)
    # chronometry
    stats_start_time = pretty_time()

    # to-be merged lists, one item per run
    paradigms = []
    frametimes_list = []
    design_matrices = []  # one
    list_of_contrast_dicts = []  # one dict per run
    n_scans = []
    for run_id in run_ids:
        _n_scans = len(subject_data.func[run_id])
        n_scans.append(_n_scans)

        # make paradigm
        paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id])

        # make design matrix
        tr = 2.
        drift_model = 'Cosine'
        hrf_model = 'Canonical With Derivative'
        hfcut = 128.
        frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans)
        design_matrix = make_dmtx(
            frametimes,
            paradigm,
            hrf_model=hrf_model,
            drift_model=drift_model,
            hfcut=hfcut,
            add_regs=np.loadtxt(
                getattr(subject_data, 'realignment_parameters')[run_id]),
            add_reg_names=[
                'Translation along x axis', 'Translation along yaxis',
                'Translation along z axis', 'Rotation along x axis',
                'Rotation along y axis', 'Rotation along z axis'
            ])

        # import matplotlib.pyplot as plt
        # design_matrix.show()
        # plt.show()

        paradigms.append(paradigm)
        design_matrices.append(design_matrix)
        frametimes_list.append(frametimes)
        n_scans.append(_n_scans)

        # specify contrasts
        contrasts = {}
        n_columns = len(design_matrix.names)
        for i in xrange(paradigm.n_conditions):
            contrasts['%s' %
                      design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

        # more interesting contrasts"""
        contrasts['Famous-Unfamiliar'] = contrasts['Famous'] - contrasts[
            'Unfamiliar']
        contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar']
        contrasts[
            'Famous-Scrambled'] = contrasts['Famous'] - contrasts['Scrambled']
        contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled']
        contrasts['Unfamiliar-Scrambled'] = contrasts[
            'Unfamiliar'] - contrasts['Scrambled']
        contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled']

        list_of_contrast_dicts.append(contrasts)

    # importat maps
    z_maps = {}
    effects_maps = {}

    # fit GLM
    print('\r\nFitting a GLM (this takes time) ..')
    fmri_glm = FMRILinearModel(
        [nibabel.concat_images(sess_func) for sess_func in subject_data.func],
        [design_matrix.matrix for design_matrix in design_matrices],
        mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    print "... done.\r\n"

    # save computed mask
    mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")

    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)
    print "... done.\r\n"

    # replicate contrasts across runs
    contrasts = dict(
        (cid, [contrasts[cid] for contrasts in list_of_contrast_dicts])
        for cid, cval in contrasts.iteritems())

    # compute effects
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, eff_map = fmri_glm.contrast(contrast_val,
                                           con_id=contrast_id,
                                           output_z=True,
                                           output_stat=False,
                                           output_effects=True,
                                           output_variance=False)

        # store stat maps to disk
        for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]):
            map_dir = os.path.join(subject_data.output_dir,
                                   '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z':
                z_maps[contrast_id] = map_path

            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # remove repeated contrasts
    contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems())

    # do stats report
    stats_report_filename = os.path.join(
        getattr(subject_data, 'reports_output_dir', subject_data.output_dir),
        "report_stats.html")
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        threshold=threshold,
        cluster_th=cluster_th,
        slicer=slicer,
        cut_coords=cut_coords,
        design_matrices=design_matrices,
        subject_id=subject_data.subject_id,
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_data.subject_id,

        # additional ``kwargs`` for more informative report
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        drift_model=drift_model,
        hrf_model=hrf_model,
        paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id])
                      for run_id in run_ids),
        frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id])
                        for run_id in run_ids),
        # fwhm=fwhm
    )

    ProgressReport().finish_dir(subject_data.output_dir)
    print "\r\nStatistic report written to %s\r\n" % stats_report_filename

    return contrasts, effects_maps, z_maps, mask_path