Example #1
0
def report_slm_oasis():  # pragma: no cover
    n_subjects = 5  # more subjects requires more memory
    oasis_dataset = nilearn.datasets.fetch_oasis_vbm(n_subjects=n_subjects)
    # Resample the images, since this mask has a different resolution
    mask_img = resample_to_img(
        nilearn.datasets.fetch_icbm152_brain_gm_mask(),
        oasis_dataset.gray_matter_maps[0],
        interpolation='nearest',
    )
    design_matrix = _make_design_matrix_slm_oasis(oasis_dataset, n_subjects)
    second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask=mask_img)
    second_level_model.fit(oasis_dataset.gray_matter_maps,
                           design_matrix=design_matrix)

    contrast = [[1, 0, 0], [0, 1, 0]]
    report = make_glm_report(
        model=second_level_model,
        contrasts=contrast,
        bg_img=nilearn.datasets.fetch_icbm152_2009()['t1'],
        height_control=None,
    )
    output_filename = 'generated_report_slm_oasis.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
def test_second_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, 'intercept')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol)
        # smoke test for different contrasts in fixed effects
        model.compute_contrast(c1)
        z_image = model.compute_contrast(c1, output_type='z_score')
        stat_image = model.compute_contrast(c1, output_type='stat')
        p_image = model.compute_contrast(c1, output_type='p_value')
        effect_image = model.compute_contrast(c1, output_type='effect_size')
        variance_image = model.compute_contrast(c1,
                                                output_type='effect_variance')

        # Test output_type='all', and verify images are equivalent
        all_images = model.compute_contrast(c1, output_type='all')
        assert_array_equal(all_images['z_score'].get_data(),
                           z_image.get_data())
        assert_array_equal(all_images['stat'].get_data(),
                           stat_image.get_data())
        assert_array_equal(all_images['p_value'].get_data(),
                           p_image.get_data())
        assert_array_equal(all_images['effect_size'].get_data(),
                           effect_image.get_data())
        assert_array_equal(all_images['effect_variance'].get_data(),
                           variance_image.get_data())

        # formula should work (passing variable name directly)
        model.compute_contrast('intercept')
        # or simply pass nothing
        model.compute_contrast()
        # passing null contrast should give back a value error
        assert_raises(ValueError, model.compute_contrast, cnull)
        # passing wrong parameters
        assert_raises(ValueError, model.compute_contrast, [])
        assert_raises(ValueError, model.compute_contrast, c1, None, '')
        assert_raises(ValueError, model.compute_contrast, c1, None, [])
        assert_raises(ValueError, model.compute_contrast, c1, None, None, '')
        # check that passing no explicit contrast when the dsign
        # matrix has morr than one columns raises an error
        X = pd.DataFrame(np.random.rand(4, 2), columns=['r1', 'r2'])
        model = model.fit(Y, design_matrix=X)
        assert_raises(ValueError, model.compute_contrast, None)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
def test_second_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, 'intercept')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol)
        # smoke test for different contrasts in fixed effects
        model.compute_contrast(c1)
        z_image = model.compute_contrast(c1, output_type='z_score')
        stat_image = model.compute_contrast(c1, output_type='stat')
        p_image = model.compute_contrast(c1, output_type='p_value')
        effect_image = model.compute_contrast(c1, output_type='effect_size')
        variance_image = \
            model.compute_contrast(c1, output_type='effect_variance')

        # Test output_type='all', and verify images are equivalent
        all_images = model.compute_contrast(c1, output_type='all')
        assert_array_equal(all_images['z_score'].get_data(),
                           z_image.get_data())
        assert_array_equal(all_images['stat'].get_data(),
                           stat_image.get_data())
        assert_array_equal(all_images['p_value'].get_data(),
                           p_image.get_data())
        assert_array_equal(all_images['effect_size'].get_data(),
                           effect_image.get_data())
        assert_array_equal(all_images['effect_variance'].get_data(),
                           variance_image.get_data())

        # formula should work (passing variable name directly)
        model.compute_contrast('intercept')
        # or simply pass nothing
        model.compute_contrast()
        # passing null contrast should give back a value error
        assert_raises(ValueError, model.compute_contrast, cnull)
        # passing wrong parameters
        assert_raises(ValueError, model.compute_contrast, [])
        assert_raises(ValueError, model.compute_contrast, c1, None, '')
        assert_raises(ValueError, model.compute_contrast, c1, None, [])
        assert_raises(ValueError, model.compute_contrast, c1, None, None, '')
        # check that passing no explicit contrast when the design
        # matrix has more than one columns raises an error
        X = pd.DataFrame(np.random.rand(4, 2), columns=['r1', 'r2'])
        model = model.fit(Y, design_matrix=X)
        assert_raises(ValueError, model.compute_contrast, None)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
def fit_second_level(models1, contrasts, atlas, dim, split, write_dir):
    mem = Memory(location=expanduser('cache'))
    model = SecondLevelModel(n_jobs=1, memory=mem, memory_level=1)
    model.fit(models1)

    for contrast in contrasts:
        for output_type in ['z_score', 'effect_size']:
            img = model.compute_contrast(first_level_contrast=contrast,
                                         output_type=output_type)
            img.to_filename(
                join(
                    write_dir, f'{contrast}_{output_type}_{atlas}'
                    f'_{dim}_{split}.nii.gz'))
def test_second_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1),)
        mask, FUNCFILE, _ = write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # ols case
        model = SecondLevelModel(mask=mask)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, 'intercept')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol)
        # smoke test for different contrasts in fixed effects
        model.compute_contrast(c1)
        model.compute_contrast(c1, output_type='z_score')
        model.compute_contrast(c1, output_type='stat')
        model.compute_contrast(c1, output_type='p_value')
        model.compute_contrast(c1, output_type='effect_size')
        model.compute_contrast(c1, output_type='effect_variance')
        # formula should work (passing variable name directly)
        model.compute_contrast('intercept')
        # or simply pass nothing
        model.compute_contrast()
        # passing null contrast should give back a value error
        assert_raises(ValueError, model.compute_contrast, cnull)
        # passing wrong parameters
        assert_raises(ValueError, model.compute_contrast, [])
        assert_raises(ValueError, model.compute_contrast, c1, None, '')
        assert_raises(ValueError, model.compute_contrast, c1, None, [])
        assert_raises(ValueError, model.compute_contrast, c1, None, None, '')
Example #6
0
def create_one_sample_t_test(name, maps, output_dir, smoothing_fwhm=6.0):
    if not op.isdir(output_dir):
        op.mkdir(output_dir)
    model = SecondLevelModel(smoothing_fwhm=smoothing_fwhm)
    design_matrix = pd.DataFrame([1] * len(maps), columns=['intercept'])
    model = model.fit(maps, design_matrix=design_matrix)
    z_map = model.compute_contrast(output_type='z_score')
    nib.save(z_map, op.join(output_dir, "{}_group_zmap.nii.gz".format(name)))
Example #7
0
def test_second_level_model_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1),)
        mask, FUNCFILE, _ = write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # ols case
        model = SecondLevelModel(mask=mask)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4)
        model = model.fit(Y, design_matrix=X)
        labels1 = model.labels_
        results1 = model.results_
        labels2, results2 = run_glm(model.masker_.transform(Y), X, 'ols')
        assert_almost_equal(labels1, labels2, decimal=1)
        assert_equal(len(results1), len(results2))
Example #8
0
def test_slm_reporting():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        model = SecondLevelModel()
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        c1 = np.eye(len(model.design_matrix_.columns))[0]
        report_slm = glmr.make_glm_report(model, c1)
        # catches & raises UnicodeEncodeError in HTMLDocument.get_iframe()
        report_iframe = report_slm.get_iframe()
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del Y, FUNCFILE, func_img, model
Example #9
0
def test_second_level_model_contrast_computation_with_memory_caching():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask=mask, memory='nilearn_cache')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1 = np.eye(ncol)[0, :]
        # test memory caching for compute_contrast
        model.compute_contrast(c1, output_type='z_score')
        # or simply pass nothing
        model.compute_contrast()
def do_contrast(con, ROOTDIR, MODEL):
    """ Input: con is the name of the contrast """
    confile = f'{con}_con.nii.gz'
    spmfile = f'{con}_zmap.nii.gz'
    cmaps = glob.glob(op.join(ROOTDIR, 'sub*', MODEL, confile))
    smaps = glob.glob(op.join(ROOTDIR, 'sub*', MODEL, spmfile))
    cmaps.sort()
    smaps.sort()

    fig, axes = plt.subplots(nrows=5, ncols=4)
    for cidx, tmap in enumerate(smaps):
        plotting.plot_glass_brain(tmap,
                                  colorbar=True,
                                  threshold=3.1,
                                  title=f'{cidx:02d}',
                                  axes=axes[int(cidx / 4),
                                            int(cidx % 4)],
                                  plot_abs=False,
                                  display_mode='z')
    fig.suptitle(f'contrast {con}')
    #pdf.savefig(fig)

    second_level_input = cmaps
    design_matrix = pd.DataFrame([1] * len(second_level_input),
                                 columns=['intercept'])

    second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
    second_level_model = second_level_model.fit(second_level_input,
                                                design_matrix=design_matrix)

    z_map = second_level_model.compute_contrast(output_type='z_score')
    nib.save(z_map, f'group_{con}.nii.gz')
    p_val = 0.001
    p001_unc = norm.isf(p_val)
    display = plotting.plot_glass_brain(
        z_map,
        threshold=p001_unc,
        colorbar=True,
        display_mode='lzry',
        plot_abs=False,
        title=f'group contrasts {con} (unc p<0.001)')
    display.savefig(f'group_{con}.png')
    #pdf.savefig()
    display.close()
def test_second_level_model_contrast_computation_with_memory_caching():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask, memory='nilearn_cache')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1 = np.eye(ncol)[0, :]
        # test memory caching for compute_contrast
        model.compute_contrast(c1, output_type='z_score')
        # or simply pass nothing
        model.compute_contrast()
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
Example #12
0
def test_high_level_glm_with_paths():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, [])
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        c1 = np.eye(len(model.design_matrix_.columns))[0]
        z_image = model.compute_contrast(c1, output_type='z_score')
        assert_true(isinstance(z_image, Nifti1Image))
        assert_array_equal(z_image.affine, load(mask).affine)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del Y, FUNCFILE, func_img, model
def test_high_level_glm_with_paths():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, [])
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        c1 = np.eye(len(model.design_matrix_.columns))[0]
        z_image = model.compute_contrast(c1, output_type='z_score')
        assert_true(isinstance(z_image, Nifti1Image))
        assert_array_equal(z_image.affine, load(mask).affine)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del Y, FUNCFILE, func_img, model
Example #14
0
def create_one_sample_t_test(name, maps, output_dir, smoothing_fwhm=8.0):
    if not op.isdir(output_dir):
        op.mkdir(output_dir)

    model = SecondLevelModel(smoothing_fwhm=smoothing_fwhm)
    design_matrix = pd.DataFrame([1] * len(maps), columns=['intercept'])
    model = model.fit(maps, design_matrix=design_matrix)
    z_map = model.compute_contrast(output_type='z_score')
    nibabel.save(z_map, op.join(output_dir,
                                "{}_group_zmap.nii.gz".format(name)))

    p_val = 0.001
    z_th = norm.isf(p_val)
    z_th = 3.1
    display = plotting.plot_glass_brain(z_map,
                                        threshold=z_th,
                                        colorbar=True,
                                        plot_abs=False,
                                        display_mode='lzry',
                                        title=name)
    display.savefig(op.join(output_dir, "{}_group_zmap".format(name)))
def anova(db, masker):
    """perform a big ANOVA of brain activation with three factors:
    acquisition, subject, contrast"""
    df = db[(db.acquisition == 'ap') | (db.acquisition == 'pa')]

    # make the design matrix
    subject_dmtx, subject_ = design(df.subject)
    contrast_dmtx, contrast_ = design(df.contrast)
    acq_dmtx, acq_ = design(df.acquisition)
    dmtx = np.hstack(
        (subject_dmtx[:, :-1], contrast_dmtx[:, :-1], acq_dmtx[:, :-1],
         np.ones((len(df), 1))))
    labels = np.hstack(
        (subject_[:-1], contrast_[:-1], acq_[:-1], ['intercept']))
    design_matrix = pd.DataFrame(dmtx, columns=labels)
    _, singular, _ = np.linalg.svd(design_matrix.values, 0)
    dof_subject = len(subject_) - 1
    dof_contrast = len(contrast_) - 1
    dof_acq = len(acq_) - 1

    # fit the model
    from nistats.second_level_model import SecondLevelModel
    second_level_model = SecondLevelModel(mask=masker.mask_img_)
    second_level_model = second_level_model.fit(list(df.path.values),
                                                design_matrix=design_matrix)
    subject_map = second_level_model.compute_contrast(np.eye(
        len(labels))[:dof_subject],
                                                      output_type='z_score')
    contrast_map = second_level_model.compute_contrast(np.eye(
        len(labels))[dof_subject:dof_subject + dof_contrast],
                                                       output_type='z_score')
    acq_map = second_level_model.compute_contrast(np.eye(
        len(labels))[-1 - dof_acq:-1],
                                                  output_type='z_score')
    subject_map = math_img('img * (img > -8.2095)', img=subject_map)
    contrast_map = math_img('img * (img > -8.2095)', img=contrast_map)
    acq_map = math_img('img * (img > -8.2095)', img=acq_map)
    return design_matrix, subject_map, contrast_map, acq_map
def test_second_level_model_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])

        model = model.fit(Y, design_matrix=X)
        model.compute_contrast()
        labels1 = model.labels_
        results1 = model.results_

        labels2, results2 = run_glm(
            model.masker_.transform(Y), X.values, 'ols')
        assert_almost_equal(labels1, labels2, decimal=1)
        assert_equal(len(results1), len(results2))
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
Example #17
0
def test_second_level_model_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])

        model = model.fit(Y, design_matrix=X)
        model.compute_contrast()
        labels1 = model.labels_
        results1 = model.results_

        labels2, results2 = run_glm(model.masker_.transform(Y), X.values,
                                    'ols')
        assert_almost_equal(labels1, labels2, decimal=1)
        assert_equal(len(results1), len(results2))
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
Example #18
0
def create_one_sample_t_test(name,
                             maps,
                             output_dir,
                             smoothing_fwhm=None,
                             vmax=None,
                             design_matrix=None,
                             p_val=0.001,
                             fdr=0.01,
                             loc=0,
                             scale=1,
                             fwhm=6):
    """ Do a one sample t-test over the maps.
    """
    print('##### ', name, ' #####')
    model = SecondLevelModel(smoothing_fwhm=smoothing_fwhm, n_jobs=-1)
    design_matrix = design_matrix if (
        design_matrix is not None) else pd.DataFrame([1] * len(maps),
                                                     columns=['intercept'])
    model = model.fit(maps, design_matrix=design_matrix)
    z_map = model.compute_contrast(output_type='z_score')
    p_val = p_val
    z_th = norm.isf(p_val, loc=loc, scale=scale)  # 3.09

    # apply fdr to zmap
    thresholded_zmap, th = map_threshold(stat_img=z_map,
                                         alpha=fdr,
                                         height_control='fdr',
                                         cluster_threshold=0,
                                         two_sided=False)
    print(z_th, th)
    # effect size-map
    eff_map = model.compute_contrast(output_type='effect_size')

    thr = np.abs(thresholded_zmap.get_data())

    return z_map, eff_map, new_img_like(eff_map, (thr > z_th)), (thr > z_th)
def compute_second_level(model1, model2, FWHM=None):
    """
    Compute the group-level significance of the paired difference between two
    models
    """
    # # compute and save the difference of two models
    compute_model_difference(model1, model2,
                             FWHM)  # compute with the FirstLevelAnalysis code

    # redefine the mask, without smoothing
    masker = compute_global_masker(rootdir)

    # use those different files as input for group-level analysis
    second_level_input = get_fmri_files(
        os.path.join(rootdir, first_dir, "diff"), f'{model1}-{model2}')

    # prepare second level analysis (one sample t-test)
    design_matrix = pd.DataFrame([1] * len(second_level_input),
                                 columns=['intercept'])
    second_level_model = SecondLevelModel(masker)
    second_level_model = second_level_model.fit(second_level_input,
                                                design_matrix=design_matrix)

    # estimation the contrast
    z_map = second_level_model.compute_contrast(output_type='z_score')
    # save to disk
    nib.save(
        z_map,
        os.path.join(rootdir, second_dir, f"GroupLevel_{model1}-{model2}"))

    # Get the map of positive values only
    z_val = masker.transform(z_map)
    z_val_pos = [val if val > 0 else 0 for val in z_val[0]]
    z_map_pos = masker.inverse_transform(z_val_pos)

    return z_map, z_map_pos
############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
second_level_input = data['cmaps']
design_matrix = pd.DataFrame([1] * len(second_level_input),
                             columns=['intercept'])

############################################################################
# Model specification and fit
from nistats.second_level_model import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input,
                                            design_matrix=design_matrix)

##########################################################################
# To estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(output_type='z_score')

###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot
from scipy.stats import norm
p_val = 0.001
p001_unc = norm.isf(p_val)
display = plotting.plot_glass_brain(
    z_map, threshold=p001_unc, colorbar=True, display_mode='z', plot_abs=False,
    title='group left-right button press (unc p<0.001')
Example #21
0
print("Actual number of subjects after quality check: %d" % n_samples)

############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
design_matrix = pd.DataFrame(np.hstack((tested_var, np.ones_like(tested_var))),
                             columns=['fluency', 'intercept'])

###########################################################################
# Fit of the second-level model
from nistats.second_level_model import SecondLevelModel
model = SecondLevelModel(smoothing_fwhm=5.0)
model.fit(contrast_map_filenames, design_matrix=design_matrix)

##########################################################################
# To estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = model.compute_contrast('fluency', output_type='z_score')

###########################################################################
# We compute the fdr-corrected p = 0.05 threshold for these data
from nistats.thresholding import map_threshold
_, threshold = map_threshold(z_map, alpha=.05, height_control='fdr')

###########################################################################
# Let us plot the second level contrast at the computed thresholds
from nilearn import plotting
plotting.plot_stat_map(
############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
design_matrix = pd.DataFrame(
    np.hstack((tested_var, np.ones_like(tested_var))),
    columns=['fluency', 'intercept'])

###########################################################################
# Fit of the second-level model
from nistats.second_level_model import SecondLevelModel
model = SecondLevelModel(smoothing_fwhm=5.0)
model.fit(contrast_map_filenames, design_matrix=design_matrix)

##########################################################################
# To estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = model.compute_contrast('fluency', output_type='z_score')

###########################################################################
# We compute the fdr-corrected p = 0.05 threshold for these data
from nistats.thresholding import map_threshold
_, threshold = map_threshold(z_map, alpha=.05, height_control='fdr')

###########################################################################
# Let us plot the second level contrast at the computed thresholds
from nilearn import plotting
plotting.plot_stat_map(
Example #23
0
                             columns=['age', 'sex', 'intercept'])

#############################################################################
# Plot the design matrix
from nistats.reporting import plot_design_matrix
ax = plot_design_matrix(design_matrix)
ax.set_title('Second level design matrix', fontsize=12)
ax.set_ylabel('maps')

##########################################################################
# Specify and fit the second-level model when loading the data, we
# smooth a little bit to improve statistical behavior

from nistats.second_level_model import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask=mask_img)
second_level_model.fit(gray_matter_map_filenames, design_matrix=design_matrix)

##########################################################################
# Estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(second_level_contrast=[1, 0, 0],
                                            output_type='z_score')

###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
# First compute the threshold.
from nistats.thresholding import map_threshold
_, threshold = map_threshold(z_map, level=.05, height_control='fdr')
print('The FDR=.05-corrected threshold is: %.3g' % threshold)

###########################################################################
Example #24
0
plotting.show()

#########################################################################
# Second level model estimation
# -----------------------------
# We just have to provide the list of fitted FirstLevelModel objects
# to the SecondLevelModel object for estimation. We can do this because
# all subjects share a similar design matrix (same variables reflected in
# column names)
from nistats.second_level_model import SecondLevelModel
second_level_input = models

#########################################################################
# Note that we apply a smoothing of 8mm.
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input)

#########################################################################
# Computing contrasts at the second level is as simple as at the first level
# Since we are not providing confounders we are performing an one-sample test
# at the second level with the images determined by the specified first level
# contrast.
zmap = second_level_model.compute_contrast(
    first_level_contrast='language-string')

#########################################################################
# The group level contrast reveals a left lateralized fronto-temporal
# language network
plotting.plot_glass_brain(zmap, colorbar=True, threshold=p001_unc,
                          title='Group language network (unc p<0.001)',
                          plot_abs=False, display_mode='x')

write_dir = ''
sorted_contrasts = ''

for task in sorted_contrasts.keys():
    task_dir = os.path.join(write_dir, task)
    if not os.path.exists(task_dir):
        os.mkdir(task_dir)
    contrasts = sorted_contrasts[task]
    n_contrasts = len(contrasts)
    # First do the random effects glass brain figure
    for i, contrast in enumerate(contrasts):
        contrast_mask = (db.contrast.values == contrast)
        dmtx = pd.DataFrame(np.ones(np.sum(contrast_mask)))
        glm.fit(list(db.path[contrast_mask].values), design_matrix=dmtx)
        grp_stat = glm.compute_contrast([1], stat_type='t', output_type='z_score')
        plotting.plot_glass_brain(
                grp_stat, display_mode='z', title=BETTER_NAMES[contrast],
                threshold=3., vmax=8, plot_abs=False, black_bg=True,
                output_file='/tmp/rfx_%s.png' % contrast)
        plt.figure(figsize=(7, 2 * n_contrasts + 1), facecolor='k', edgecolor='k')
        delta = (4 * n_contrasts - 1.) / (4 * n_contrasts ** 2)
        for i, contrast in enumerate(contrasts):
            ax = plt.axes([0., 1 - (i + 1) * delta, 1., delta], axisbg='k')
            ax.imshow(mpimg.imread('/tmp/rfx_%s.png' % contrast))
            plt.axis('off')        
        ax =  plt.axes([0.02, 0.0, .8, 1./ (8 * n_contrasts)],
                       axisbg='k')
        _draw_colorbar(ax, vmax=8, offset=3., orientation='horizontal', fontsize=14)        
        ax =  plt.axes([0.84, .01, .15, 1./ (8 * n_contrasts)], axisbg='k')
Example #26
0
def main(sourcedata, derivatives, subject, session, tmp_dir):

    sourcedata_layout = BIDSLayout(sourcedata)
    sourcedata_df = sourcedata_layout.as_data_frame()
    events = sourcedata_df[(sourcedata_df['type'] == 'events')
                           & (sourcedata_df['subject'] == subject) &
                           (sourcedata_df['session'] == session)]

    derivatives_layout = BIDSLayout(
        os.path.join(derivatives, 'spynoza_mc_mutualinfo'))
    derivatives_df = derivatives_layout.as_data_frame()
    bold = derivatives_df[(derivatives_df['type'] == 'preproc')
                          & (derivatives_df['subject'] == subject) &
                          (derivatives_df['session'] == session)]

    confounds = derivatives_df[(derivatives_df['type'] == 'confounds')
                               & (derivatives_df['subject'] == subject) &
                               (derivatives_df['session'] == session)]

    print(derivatives_df.type.unique())

    mask = derivatives_layout.get(subject=subject,
                                  session=session,
                                  type='mask',
                                  return_type='file')[0]

    df = events.merge(bold,
                      on=['subject', 'session', 'run'],
                      suffixes=('_events', '_bold'))

    confounds = confounds.rename(columns={'path': 'confounds'})
    df = df.merge(confounds[['subject', 'session', 'run', 'confounds']])

    models = []
    for ix, row in df.iterrows():

        results_dir = os.path.join(derivatives, 'modelfitting', 'glm4',
                                   'sub-{}'.format(row['subject']))
        if 'session' in row:
            results_dir = os.path.join(results_dir,
                                       'ses-{}'.format(row['session']))

        os.makedirs(results_dir, exist_ok=True)

        confounds = pd.read_table(row.confounds).fillna(method='bfill')

        print('Fitting {}'.format(row['path_bold']))
        model = FirstLevelModel(t_r=4, mask=mask)
        paradigm = pd.read_table(row['path_events'])
        model.fit(row['path_bold'], paradigm, confounds=confounds)

        left_right = model.compute_contrast('eye_L - eye_R',
                                            output_type='z_score')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_left_over_right_zmap.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        left_right = model.compute_contrast('eye_L - eye_R',
                                            output_type='effect_size')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_left_over_right_psc.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        models.append(model)

    second_level_model = SecondLevelModel(mask=mask)
    second_level_model.fit(models)

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'], row['session'])))

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='effect_size')
    left_right_group.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_left_over_right_effect_size.nii.gz'.format(
                row['subject'], row['session'])))

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'], row['session'])))
Example #27
0
def main(derivatives, ds):

    if ds == 'ds-01':
        subjects = ['{:02d}'.format(s) for s in range(1, 20)]
    elif ds == 'ds-02':
        subjects = ['{:02d}'.format(s) for s in range(1, 16)]
        subjects.pop(3)  # Remove 4

    models = []

    for subject in subjects:
        print('subject {}'.format(subject))
        runs = ['{:02d}'.format(i) for i in range(1, 4)]
        if ds == 'ds-01':
            if subject == '06':
                runs = ['{:02d}'.format(i) for i in range(1, 3)]
        elif ds == 'ds-02':
            if subject == '07':
                runs = ['{:02d}'.format(i) for i in range(1, 3)]

        include = [
            u'dvars', u'framewise_displacement', u'a_comp_cor_00',
            u'a_comp_cor_01', u'a_comp_cor_02', u'a_comp_cor_03',
            u'a_comp_cor_04', u'a_comp_cor_05', u'cosine00', u'cosine01',
            u'cosine02', u'cosine03', u'cosine04', u'cosine05', u'cosine06',
            u'cosine07', u'cosine08', u'cosine09', u'cosine10', u'cosine11',
            u'cosine12', u'cosine13', u'cosine14', u'cosine15', u'trans_x',
            u'trans_y', u'trans_z', u'rot_x', u'rot_y', u'rot_z'
        ]

        images = []
        confounds = []
        behavior = []
        masks = []

        for run in runs:
            masks.append(
                op.join(
                    derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                    'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
                ).format(**locals()))

            images.append(
                op.join(
                    derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                    'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
                ).format(**locals()))
            confounds.append(
                pd.read_table(
                    op.join(
                        derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                        'sub-{subject}_task-randomdotmotion_run-{run}_desc-confounds_regressors.tsv'
                    ).format(**locals())))

            behavior.append(
                pd.read_table(
                    op.join(
                        derivatives, ds, 'event_files',
                        'sub-{subject}_task-randomdotmotion_run-{run}_events.tsv'
                    ).format(**locals())))
            behavior[-1]['duration'] = None

        confounds = [c[include].fillna(method='bfill') for c in confounds]

        model = FirstLevelModel(
            t_r=3,
            mask=masks[0],
            drift_model=None,  # Already done by fmriprep
            smoothing_fwhm=5.0,
            hrf_model='spm + derivative',
            n_jobs=10,
            subject_label=subject)

        model.fit(images, behavior, confounds)

        print(model.design_matrices_[0].columns)

        difficulty = model.compute_contrast('hard - easy',
                                            output_type='z_score')
        left_right_cue = model.compute_contrast('cue_left - cue_right',
                                                output_type='z_score')
        left_right_response = model.compute_contrast(
            'response_left - response_right', output_type='z_score')
        error = model.compute_contrast('error', output_type='z_score')
        cue = model.compute_contrast('cue_left + cue_right - 2 * cue_neutral')

        template = op.join(derivatives, ds, 'glm', 'individual_zmaps',
                           'sub-{subject}_desc-{contrast}_contrast.nii.gz')

        difficulty.to_filename(
            template.format(subject=subject, contrast='difficulty'))
        left_right_cue.to_filename(
            template.format(subject=subject, contrast='left_right_cue'))
        left_right_response.to_filename(
            template.format(subject=subject, contrast='left_right_response'))
        error.to_filename(template.format(subject=subject, contrast='error'))
        cue.to_filename(template.format(subject=subject, contrast='cue'))

        models.append(model)

    mask = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
    model2 = SecondLevelModel(mask)

    model2.fit(models)

    difficulty = model2.compute_contrast(first_level_contrast='hard - easy',
                                         output_type='z_score')
    left_right_cue = model2.compute_contrast(
        first_level_contrast='cue_left - cue_right', output_type='z_score')
    left_right_response = model2.compute_contrast(
        first_level_contrast='response_left - response_right',
        output_type='z_score')
    error = model2.compute_contrast(first_level_contrast='error',
                                    output_type='z_score')
    cue = model2.compute_contrast(
        first_level_contrast='cue_left + cue_right - 2 * cue_neutral',
        output_type='z_score')

    template = op.join(derivatives, ds, 'glm',
                       'sub-{subject}_desc-{contrast}_contrast.nii.gz')
    difficulty.to_filename(
        template.format(subject='group', contrast='difficulty'))
    left_right_cue.to_filename(
        template.format(subject='group', contrast='left_right_cue'))
    left_right_response.to_filename(
        template.format(subject='group', contrast='left_right_response'))
    error.to_filename(template.format(subject='group', contrast='error'))
    cue.to_filename(template.format(subject='group', contrast='cue'))
Example #28
0
    confounds = confounds.groupby('dataset').transform(
        lambda x: (x - x.mean()) / x.std())

    confounds['subject_label'] = confounds.apply(
        lambda row: '{}.{}'.format(row.name[0], row.name[1]), 1)
    confounds['ds'] = confounds.index.get_level_values('dataset').map({
        'ds-01':
        0,
        'ds-02':
        1
    })

    confounds = confounds.reset_index(drop=True)

    model2 = SecondLevelModel(mask)
    model2.fit(models, confounds=confounds)

    glm_dir = op.join(derivatives, 'both', 'modelfitting', 'glm_5',
                      'shift-{}'.format(shift))

    if not op.exists(glm_dir):
        os.makedirs(glm_dir)

    keys = [
        'difficulty', 'cue', 'cue_left_right', 'error_difficulty',
        'error_bias', 'left_minus_right'
    ]
    first_level_contrasts = [
        'hard - easy', 'cue_left - cue_right',
        'cue_left + cue_right - 2 * cue_neutral', 'error', 'error',
        'response_left - response_right'
Example #29
0
def main(derivatives, ds):

    if ds == 'ds-01':
        subjects = ['{:02d}'.format(s) for s in range(1, 20)]
    elif ds == 'ds-02':
        subjects = ['{:02d}'.format(s) for s in range(1, 16)]
        subjects.pop(3)  # Remove 4

    models = []

    for subject in subjects:
        print('subject {}'.format(subject))
        runs = ['{:02d}'.format(i) for i in range(1, 4)]
        if ds == 'ds-01':
            if subject == '06':
                runs = ['{:02d}'.format(i) for i in range(1, 3)]
        elif ds == 'ds-02':
            if subject == '07':
                runs = ['{:02d}'.format(i) for i in range(1, 3)]

        include = [
            u'dvars', u'framewise_displacement', u'a_comp_cor_00',
            u'a_comp_cor_01', u'a_comp_cor_02', u'a_comp_cor_03',
            u'a_comp_cor_04', u'a_comp_cor_05', u'cosine00', u'cosine01',
            u'cosine02', u'cosine03', u'cosine04', u'cosine05', u'cosine06',
            u'cosine07', u'cosine08', u'cosine09', u'cosine10', u'cosine11',
            u'cosine12', u'cosine13', u'cosine14', u'cosine15', u'trans_x',
            u'trans_y', u'trans_z', u'rot_x', u'rot_y', u'rot_z'
        ]

        images = []
        confounds = []
        behavior = []
        masks = []

        for run in runs:
            masks.append(
                op.join(
                    derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                    'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
                ).format(**locals()))

            images.append(
                op.join(
                    derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                    'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
                ).format(**locals()))
            confounds.append(
                pd.read_table(
                    op.join(
                        derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                        'sub-{subject}_task-randomdotmotion_run-{run}_desc-confounds_regressors.tsv'
                    ).format(**locals())))

            behavior.append(
                pd.read_table(
                    op.join(
                        derivatives, ds, 'event_files',
                        'sub-{subject}_task-randomdotmotion_run-{run}_events.tsv'
                    ).format(**locals())))
            behavior[-1]['duration'] = None

        confounds = [c[include].fillna(method='bfill') for c in confounds]

        model = FirstLevelModel(
            t_r=3,
            mask=masks[0],
            drift_model=None,  # Already done by fmriprep
            smoothing_fwhm=5.0,
            hrf_model='spm + derivative',
            n_jobs=10,
            subject_label='{}.{}'.format(ds, subject))

        model.fit(images, behavior, confounds)

        models.append(model)

    mask = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')

    confounds = pd.read_pickle(
        op.join(derivatives, 'all_subjectwise_parameters.pkl'))
    confounds = confounds[['ddm difficulty_effect', 'ddm z_cue_regressor']]
    confounds = confounds.groupby('dataset').transform(
        lambda x: (x - x.mean()) / x.std())

    confounds['subject_label'] = confounds.apply(
        lambda row: '{}.{}'.format(row.name[0], row.name[1]), 1)
    #confounds['ds'] = confounds.index.get_level_values('dataset').map({'ds-01':0, 'ds-02':1})

    confounds = confounds.reset_index(drop=True)

    model2 = SecondLevelModel(mask)
    model2.fit(models, confounds=confounds)

    glm_dir = op.join(derivatives, ds, 'modelfitting', 'glm_3')

    if not op.exists(glm_dir):
        os.makedirs(glm_dir)

    keys = ['difficulty', 'cue', 'cue_left_right', 'error']
    first_level_contrasts = [
        'hard - easy', 'cue_left - cue_right',
        'cue_left + cue_right - 2 * cue_neutral', 'error'
    ]
    second_level_contrasts = [
        'ddm difficulty_effect', 'ddm z_cue_regressor', 'ddm z_cue_regressor',
        'ddm difficulty_effect'
    ]

    for key, fl, sl in zip(keys, first_level_contrasts,
                           second_level_contrasts):

        for sl_ in ['intercept', sl]:
            contrast = model2.compute_contrast(first_level_contrast=fl,
                                               second_level_contrast=sl_,
                                               output_type='z_score')
            contrast.to_filename(
                op.join(glm_dir, '{}_{}_zmap.nii.gz'.format(key, sl_)))
############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
second_level_input = data['cmaps']
design_matrix = pd.DataFrame([1] * len(second_level_input),
                             columns=['intercept'])

############################################################################
# Model specification and fit
from nistats.second_level_model import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input,
                                            design_matrix=design_matrix)

##########################################################################
# To estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(output_type='z_score')

###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot
from scipy.stats import norm
p_val = 0.001
p001_uncorrected = norm.isf(p_val)

from nistats.thresholding import cluster_level_inference
proportion_true_discoveries_img = cluster_level_inference(z_map,
                                                          threshold=[3, 4, 5],
        img = resample_to_img(filelist[i], template)
    else:
        img = load_img(filelist[i])
    imgs.append(math_img('img * np.sqrt(%d)' % nvol, img=img))

# Create mask (containing only voxels with values in at least half of the images)
img_concat = concat_imgs(imgs)
mask = np.sum(img_concat.get_data()!=0, axis=-1)>=(img_concat.shape[-1] * 0.8)
mask = binary_fill_holes(
        binary_dilation(binary_erosion(mask, iterations=2), iterations=2))
group_mask = new_img_like(img_concat, mask.astype('int'), copy_header=True)

# Create 2nd-level model
design_matrix = design_matrix = pd.get_dummies(method_id)
second_level_model = SecondLevelModel(n_jobs=-1, mask=group_mask)
second_level_model = second_level_model.fit(imgs, design_matrix=design_matrix)

# Compute contrasts, save nifti and plot glass brain
weights = [ [1, 0, 0, 0, 0], [1,-1, 0, 0, 0], [1, 0,-1, 0, 0], [1, 0, 0,-1, 0], [1, 0, 0, 0,-1],
           [-1, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1,-1, 0, 0], [0, 1, 0,-1, 0], [0, 1, 0, 0,-1],
           [-1, 0, 1, 0, 0], [0,-1, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1,-1, 0], [0, 0, 1, 0,-1],
           [-1, 0, 0, 1, 0], [0,-1, 0, 1, 0], [0, 0,-1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1,-1],
           [-1, 0, 0, 0, 1], [0,-1, 0, 0, 1], [0, 0,-1, 0, 1], [0, 0, 0,-1, 1], [0, 0, 0, 0, 1]]

for i, w in enumerate(weights):

        # Estimate contrast
        z_map = second_level_model.compute_contrast(
                second_level_contrast=w, second_level_stat_type='t',
                output_type='z_score')
        #z_map.to_filename(res_path + '/tsnr/tsnr_%04d.nii.gz' % (i + 1))
Example #32
0
                             columns=['age', 'sex', 'intercept'])

#############################################################################
# Plot the design matrix
from nistats.reporting import plot_design_matrix
ax = plot_design_matrix(design_matrix)
ax.set_title('Second level design matrix', fontsize=12)
ax.set_ylabel('maps')

##########################################################################
# Specify and fit the second-level model when loading the data, we
# smooth a little bit to improve statistical behavior

from nistats.second_level_model import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask=mask_img)
second_level_model.fit(gray_matter_map_filenames,
                       design_matrix=design_matrix)

##########################################################################
# Estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(second_level_contrast=[1, 0, 0],
                                            output_type='z_score')

###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
# First compute the threshold.
from nistats.thresholding import map_threshold
_, threshold = map_threshold(
    z_map, level=.05, height_control='fdr')
print('The FDR=.05-corrected threshold is: %.3g' % threshold)
Example #33
0
def main(sourcedata, derivatives, subject, session, tmp_dir):

    sourcedata_layout = BIDSLayout(sourcedata)
    sourcedata_df = sourcedata_layout.as_data_frame()
    events = sourcedata_df[(sourcedata_df['type'] == 'events')
                           & (sourcedata_df['subject'] == subject) &
                           (sourcedata_df['session'] == session)]

    derivatives_layout = BIDSLayout(os.path.join(derivatives, 'spynoza'))
    derivatives_df = derivatives_layout.as_data_frame()
    bold = derivatives_df[(derivatives_df['type'] == 'preproc')
                          & (derivatives_df['subject'] == subject) &
                          (derivatives_df['session'] == session)]

    mask = derivatives_layout.get(subject=subject,
                                  session=session,
                                  type='mask',
                                  return_type='file')[0]

    mask = image.math_img('(im > .5).astype(int)', im=mask)
    print(mask)

    df = events.merge(bold,
                      on=['subject', 'session', 'run'],
                      suffixes=('_events', '_bold'))

    models = []
    for ix, row in df.iterrows():

        results_dir = os.path.join(derivatives, 'modelfitting',
                                   'sub-{}'.format(row['subject']))
        if 'session' in row:
            results_dir = os.path.join(results_dir,
                                       'ses-{}'.format(row['session']))

        os.makedirs(results_dir, exist_ok=True)

        print('Fitting {}'.format(row['path_bold']))
        model = FirstLevelModel(t_r=4, mask=mask)
        paradigm = pd.read_table(row['path_events'])
        paradigm_short = paradigm.copy()
        paradigm_short['duration'] = 1
        paradigm_short['trial_type'] = paradigm_short['trial_type'].map(
            lambda x: '{}_instant'.format(x))
        paradigm = pd.concat((paradigm, paradigm_short))
        model.fit(row['path_bold'], paradigm)

        left_right = model.compute_contrast('eye_L - eye_R',
                                            output_type='z_score')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_left_over_right_zmap.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        left_right = model.compute_contrast('eye_L - eye_R',
                                            output_type='effect_size')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_left_over_right_psc.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))

        eye_l_instant = model.compute_contrast('eye_L_instant',
                                               output_type='z_score')
        eye_l_instant.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_eye_l_instant_zmap.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        eye_l_instant = model.compute_contrast('eye_L_instant',
                                               output_type='effect_size')
        eye_l_instant.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_eye_l_instant_effect_size.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))

        eye_r_instant = model.compute_contrast('eye_R_instant',
                                               output_type='z_score')
        eye_r_instant.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_eye_r_instant_zmap.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        eye_r_instant = model.compute_contrast('eye_R_instant',
                                               output_type='effect_size')
        eye_r_instant.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_eye_R_instant_effect_size.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))

        models.append(model)

    second_level_model = SecondLevelModel(mask=mask)
    second_level_model.fit(models)

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'], row['session'])))

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='effect_size')
    left_right_group.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_left_over_right_effect_size.nii.gz'.format(
                row['subject'], row['session'])))
Example #34
0
def main(sourcedata, derivatives, subject, session, tmp_dir):

    sourcedata_layout = BIDSLayout(sourcedata)
    sourcedata_df = sourcedata_layout.as_data_frame()
    events = sourcedata_df[(sourcedata_df['suffix'] == 'events')
                           & (sourcedata_df['subject'] == subject) &
                           (sourcedata_df['session'] == session)]

    derivatives_layout = BIDSLayout(os.path.join(derivatives), validate=False)
    derivatives_df = derivatives_layout.as_data_frame()
    bold = derivatives_df[(derivatives_df['suffix'] == 'preproc')
                          & (derivatives_df['subject'] == subject) &
                          (derivatives_df['session'] == session)]

    confounds = derivatives_df[(derivatives_df['suffix'] == 'confounds')
                               & (derivatives_df['subject'] == subject) &
                               (derivatives_df['session'] == session)]

    compcor = derivatives_df[(derivatives_df['suffix'] == 'compcor')
                             & (derivatives_df['subject'] == subject) &
                             (derivatives_df['session'] == session)]

    mask = derivatives_layout.get(subject=subject,
                                  session=session,
                                  suffix='mask',
                                  return_type='file')[0]

    df = events.merge(bold,
                      on=['subject', 'session', 'run'],
                      suffixes=('_events', '_bold'))

    confounds = confounds.rename(columns={'path': 'confounds'})
    df = df.merge(confounds[['subject', 'session', 'run', 'confounds']])

    compcor = compcor.rename(columns={'path': 'compcor'})
    df = df.merge(compcor[['subject', 'session', 'run', 'compcor']])

    df.sort_values('run', inplace=True)

    print(df.iloc[0])

    models = []
    for ix, row in df.iterrows():

        results_dir = os.path.join(derivatives, 'modelfitting', 'glm8',
                                   'sub-{}'.format(row['subject']))
        if 'session' in row:
            results_dir = os.path.join(results_dir,
                                       'ses-{}'.format(row['session']))

        results_dir = op.join(results_dir, 'func')

        os.makedirs(results_dir, exist_ok=True)

        confounds = pd.read_table(row.confounds).fillna(method='bfill')
        compcor = pd.read_table(row.compcor).fillna(method='bfill')

        confounds = pd.concat((confounds, compcor), 1)
        confounds -= confounds.mean()
        confounds /= confounds.std()

        pca = decomposition.PCA(n_components=6)
        confounds_trans = pd.DataFrame(
            pca.fit_transform(confounds),
            columns=['pca_{}'.format(i) for i in range(6)])

        print('Fitting {}'.format(row['path_bold']))
        model = FirstLevelModel(t_r=4,
                                signal_scaling=False,
                                subject_label=int(row['run']),
                                mask_img=mask)
        paradigm = pd.read_table(row['path_events'])
        paradigm_ = paradigm.copy()
        paradigm['trial_type'] = 'stimulation'
        paradigm['modulation'] = 1
        paradigm_['modulation'] = paradigm_.trial_type.map({
            'eye_L': 1,
            'eye_R': -1
        })
        paradigm_['trial_type'] = 'eye'
        paradigm = pd.concat((paradigm, paradigm_), ignore_index=True)

        model.fit(row['path_bold'], paradigm, confounds=confounds_trans)

        row['run'] = int(row['run'])
        row = dict(row)

        left_right = model.compute_contrast('eye', output_type='z_score')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{subject}_ses-{session}_task-{task_events}_run-{run:02d}_left_over_right_zmap.nii.gz'
                .format(**row)))

        left_right = model.compute_contrast('eye', output_type='effect_size')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{subject}_ses-{session}_task-{task_events}_run-{run:02d}_left_over_right_psc.nii.gz'
                .format(**row)))

        stimulation = model.compute_contrast('stimulation',
                                             output_type='effect_size')
        stimulation.to_filename(
            os.path.join(
                results_dir,
                'sub-{subject}_ses-{session}_task-{task_events}_run-{run:02d}_stimulation_psc.nii.gz'
                .format(**row)))

        stimulation = model.compute_contrast('stimulation',
                                             output_type='z_score')
        stimulation.to_filename(
            os.path.join(
                results_dir,
                'sub-{subject}_ses-{session}_task-{task_events}_run-{run:02d}_stimulation_zmap.nii.gz'
                .format(**row)))

        models.append(model)

    second_level_model = SecondLevelModel(mask_img=mask)
    second_level_model.fit(models)

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'], row['session'])))

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye', output_type='effect_size')
    left_right_group.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_left_over_right_effect_size.nii.gz'.format(
                row['subject'], row['session'])))

    stimulation_group = second_level_model.compute_contrast(
        first_level_contrast='stimulation', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_stimulation_zmap.nii.gz'.format(
                row['subject'], row['session'])))

    stimulation_group = second_level_model.compute_contrast(
        first_level_contrast='stimulation', output_type='effect_size')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_stimulation_effect_size.nii.gz'.format(
                row['subject'], row['session'])))
Example #35
0
                              axes=axes[int(midx / 5),
                                        int(midx % 5)],
                              plot_abs=False,
                              display_mode='x')
fig.suptitle('subjects z_map language network (unc p<0.001)')
plt.show()

#########################################################################
# Second level model estimation
# -----------------------------
# We just have to provide the list of fitted FirstLevelModel objects
# to the SecondLevelModel object for estimation. We can do this since
# all subjects share the same design matrix.
second_level_input = models
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input)

#########################################################################
# Computing contrasts at the second level is as simple as at the first level
# Since we are not providing confounders we are performing an one-sample test
# at the second level with the images determined by the specified first level
# contrast.
zmap = second_level_model.compute_contrast(
    first_level_contrast='language-string')

#########################################################################
# The group level contrast of the language network is mostly left
# lateralized as expected
plotting.plot_glass_brain(zmap,
                          colorbar=True,
                          threshold=norm.isf(0.001),
Example #36
0
                       verbose=100,
                       n_jobs=1,
                       minimize_memory=True)

for con in ['left_hand-right_hand'
            ]:  #['incongruent-congruent', 'incorrect-correct']:

    if con == 'incorrect-correct':
        models = []
        for model in fl_models:
            if 'incorrect' in model.design_matrices_[0]:
                models.append(model)
    else:
        models = fl_models

    slm.fit(models)

    print("Computing %s contrast ..." % con)
    zmap = slm.compute_contrast(second_level_contrast=None,
                                first_level_contrast=con,
                                second_level_stat_type='t',
                                output_type='z_score')

    zmap_thr = map_threshold(stat_img=zmap,
                             mask_img=None,
                             level=0.05,
                             height_control='fdr',
                             cluster_threshold=0)[0]
    zmap.to_filename(op.join(sl_dir, 'zmap_%s.nii.gz' % con))
    zmap_thr.to_filename(op.join(sl_dir, 'zmap_thr_%s.nii.gz' % con))
del fname_atts['sub']
del fname_atts['task']
fname_atts['con'] = "face"
fname_atts['val2'] = "z"
fname_atts['correction'] = "none"
if 'extra' in fname_atts.keys():
    fname_atts.move_to_end('extra')

# setup model
model = SecondLevelModel(mask=mni_mask_dil_img, smoothing_fwhm=5.0)
design_matrix = pd.DataFrame([1] * len(data_fnames), columns=['intercept'])
con_name = 'intercept'
print("Using design matrix: ")
print(design_matrix)
# save z map
model = model.fit(data_fnames, design_matrix=design_matrix)
z_map = model.compute_contrast(con_name, output_type='z_score')
out_fname = os.path.join(out_dir, make_bids_str(fname_atts))
nib.save(z_map, out_fname)
print(out_fname + ' saved.')
threshold = 3.1  #3.1  # correponds to  p < .001, uncorrected
display = plotting.plot_glass_brain(z_map,
                                    threshold=threshold,
                                    colorbar=True,
                                    plot_abs=False,
                                    output_file=out_fname,
                                    title='z map')
# save p map
p_val = model.compute_contrast(con_name, output_type='p_value')
fname_atts['val2'] = "p"
out_fname = os.path.join(out_dir, make_bids_str(fname_atts))