Exemple #1
0
def test_high_level_glm_with_paths():
    # New API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3
    with InTemporaryDirectory():
        mask_file, fmri_files, design_files = write_fake_fmri_data(shapes, rk)
        multi_session_model = FirstLevelGLM(mask=None).fit(
            fmri_files, design_files)
        z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2)
        assert_array_equal(z_image.get_affine(), load(mask_file).get_affine())
        assert_true(z_image.get_data().std() < 3.)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory
        del z_image, fmri_files, multi_session_model
Exemple #2
0
def test_high_level_glm_with_paths():
    # New API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3
    with InTemporaryDirectory():
        mask_file, fmri_files, design_files = write_fake_fmri_data(shapes, rk)
        multi_session_model = FirstLevelGLM(mask=None).fit(
            fmri_files, design_files)
        z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2)
        assert_array_equal(z_image.get_affine(), load(mask_file).get_affine())
        assert_true(z_image.get_data().std() < 3.)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory
        del z_image, fmri_files, multi_session_model
Exemple #3
0
def test_high_level_glm_one_session():
    # New API
    shapes, rk = [(7, 8, 9, 15)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk)

    single_session_model = FirstLevelGLM(mask=None).fit(
        fmri_data[0], design_matrices[0])
    assert_true(isinstance(single_session_model.masker_.mask_img_,
                           Nifti1Image))

    single_session_model = FirstLevelGLM(mask=mask).fit(
        fmri_data[0], design_matrices[0])
    z1, = single_session_model.transform(np.eye(rk)[:1])
    assert_true(isinstance(z1, Nifti1Image))
Exemple #4
0
def test_high_level_glm_one_session():
    # New API
    shapes, rk = [(7, 8, 9, 15)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk)

    single_session_model = FirstLevelGLM(mask=None).fit(
        fmri_data[0], design_matrices[0])
    assert_true(isinstance(single_session_model.masker_.mask_img_,
                           Nifti1Image))

    single_session_model = FirstLevelGLM(mask=mask).fit(
        fmri_data[0], design_matrices[0])
    z1, = single_session_model.transform(np.eye(rk)[:1])
    assert_true(isinstance(z1, Nifti1Image))
Exemple #5
0
def test_high_level_glm_null_contrasts():
    # test that contrast computation is resilient to 0 values.
    # new API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk)

    multi_session_model = FirstLevelGLM(mask=None).fit(
        fmri_data, design_matrices)
    single_session_model = FirstLevelGLM(mask=None).fit(
        fmri_data[0], design_matrices[0])
    z1, = multi_session_model.transform([np.eye(rk)[:1], np.zeros((1, rk))],
                                        output_z=False, output_stat=True)
    z2, = single_session_model.transform([np.eye(rk)[:1]],
                                         output_z=False, output_stat=True)
    np.testing.assert_almost_equal(z1.get_data(), z2.get_data())
Exemple #6
0
def test_high_level_glm_with_data():
    # New API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
    mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk)

    multi_session_model = FirstLevelGLM(mask=None).fit(
        fmri_data, design_matrices)
    n_voxels = multi_session_model.masker_.mask_img_.get_data().sum()
    z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2)
    assert_equal(np.sum(z_image.get_data() != 0), n_voxels)
    assert_true(z_image.get_data().std() < 3. )

    # with mask
    multi_session_model = FirstLevelGLM(mask=mask).fit(
        fmri_data, design_matrices)
    z_image, effect_image, variance_image = multi_session_model.transform(
        [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True)
    assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.)
    assert_true(
        (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all())
Exemple #7
0
def test_high_level_glm_null_contrasts():
    # test that contrast computation is resilient to 0 values.
    # new API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk)

    multi_session_model = FirstLevelGLM(mask=None).fit(fmri_data,
                                                       design_matrices)
    single_session_model = FirstLevelGLM(mask=None).fit(
        fmri_data[0], design_matrices[0])
    z1, = multi_session_model.transform(
        [np.eye(rk)[:1], np.zeros((1, rk))], output_z=False, output_stat=True)
    z2, = single_session_model.transform([np.eye(rk)[:1]],
                                         output_z=False,
                                         output_stat=True)
    np.testing.assert_almost_equal(z1.get_data(), z2.get_data())
Exemple #8
0
def test_fmri_inputs():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=[''])
        des_fname = 'design.csv'
        des.to_csv(des_fname)
        for fi in func_img, FUNCFILE:
            for d in des, des_fname:
                FirstLevelGLM().fit(fi, d)
                FirstLevelGLM(mask=None).fit([fi], d)
                FirstLevelGLM(mask=mask).fit(fi, [d])
                FirstLevelGLM(mask=mask).fit([fi], [d])
                FirstLevelGLM(mask=mask).fit([fi, fi], [d, d])
                FirstLevelGLM(mask=None).fit((fi, fi), (d, d))
                assert_raises(ValueError,
                              FirstLevelGLM(mask=None).fit, [fi, fi], d)
                assert_raises(ValueError,
                              FirstLevelGLM(mask=None).fit, fi, [d, d])
Exemple #9
0
def test_high_level_glm_with_data():
    # New API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
    mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk)

    multi_session_model = FirstLevelGLM(mask=None).fit(fmri_data,
                                                       design_matrices)
    n_voxels = multi_session_model.masker_.mask_img_.get_data().sum()
    z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2)
    assert_equal(np.sum(z_image.get_data() != 0), n_voxels)
    assert_true(z_image.get_data().std() < 3.)

    # with mask
    multi_session_model = FirstLevelGLM(mask=mask).fit(fmri_data,
                                                       design_matrices)
    z_image, effect_image, variance_image = multi_session_model.transform(
        [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True)
    assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.)
    assert_true(
        (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all())

# write directory
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
    mkdir(write_dir)

# Data and analysis parameters
data = datasets.fetch_fiac_first_level()
fmri_img = [data['func1'], data['func2']]
mean_img_ = mean_img(fmri_img[0])
design_files = [data['design_matrix1'], data['design_matrix2']]
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

# GLM specification
fmri_glm = FirstLevelGLM(data['mask'], standardize=False, noise_model='ar1')

# GLM fitting
fmri_glm.fit(fmri_img, design_matrices)

# compute fixed effects of the two runs and compute related images
n_columns = design_matrices[0].shape[1]
def pad_vector(contrast_, n_columns):
    return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))

contrasts = {'SStSSp_minus_DStDSp': pad_vector([1, 0, 0, -1], n_columns),
            'DStDSp_minus_SStSSp': pad_vector([-1, 0, 0, 1], n_columns),
            'DSt_minus_SSt': pad_vector([-1, -1, 1, 1], n_columns),
            'DSp_minus_SSp': pad_vector([-1, 1, -1, 1], n_columns),
            'DSt_minus_SSt_for_DSp': pad_vector([0, -1, 0, 1], n_columns),
            'DSp_minus_SSp_for_DSt': pad_vector([0, 0, -1, 1], n_columns),
from nistats.glm import FirstLevelGLM
from nistats import datasets


# write directory
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
    mkdir(write_dir)

# Data and analysis parameters
data = datasets.fetch_fiac_first_level()
fmri_files = [data['func1'], data['func2']]
design_files = [data['design_matrix1'], data['design_matrix2']]

# Load all the data into a common GLM
multi_session_model = FirstLevelGLM(data['mask'], standardize=False,
                                    noise_model='ar1')

# GLM fitting
multi_session_model.fit(fmri_files, design_files)

def make_fiac_contrasts(n_columns):
    """ Specify some contrasts for the FIAC experiment"""
    contrast = {}
    # the design matrices of both runs comprise 13 columns
    # the first 5 columns of the design matrices correspond to the following
    # conditions: ['SSt-SSp', 'SSt-DSp', 'DSt-SSp', 'DSt-DSp', 'FirstSt']

    def _pad_vector(contrast_, n_columns):
        return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))

    contrast['SStSSp_minus_DStDSp'] = _pad_vector([1, 0, 0, -1], n_columns)
                          normalize_y=normalize_y, verbose=True,
                          optimize=optimize,
                          n_restarts_optimizer=n_restarts_optimizer,
                          zeros_extremes=zeros_extremes, f_mean=f_hrf)

        (hx, hy, hrf_var, resid_norm_sq, sigma_sq_resid) = gp.fit(ys, paradigm)

        print 'residual norm square = ', resid_norm_sq

        # Testing with a GLM
        mask_img = nb.Nifti1Image(np.ones((2, 2, 2)), affine=np.eye(4))
        masker = NiftiMasker(mask_img=mask_img)
        masker.fit()
        ys2 = np.ones((2, 2, 2, ys.shape[0])) * ys[np.newaxis, np.newaxis, np.newaxis, :]
        niimgs = nb.Nifti1Image(ys2, affine=np.eye(4))
        glm = FirstLevelGLM(mask=mask_img, t_r=t_r, standardize=True, noise_model='ols')
        glm.fit(niimgs, dm)
        norm_resid = (np.linalg.norm(glm.results_[0][0].resid, axis=0)**2).mean()
        ys_pred_glm = glm.results_[0][0].predicted[:, 0]

        # Predict GP
        # XXX: Do we need to predict for GLM???
        ys_pred, matrix, betas, resid = gp.predict(ys, paradigm)

        corr_gp = np.corrcoef(ys_pred, ys)[1, 0]
        corr_glm = np.corrcoef(ys_pred_glm, ys)[1, 0]

        print "corr glm: %s, corr gp: %s" % (corr_glm, corr_gp)

        data = {}
        data['ys'] = ys
    threshold = 0.7
    seed = 42

    mask_img = nb.Nifti1Image(np.ones((n_x, n_y, n_z)), affine=np.eye(4))
    masker = NiftiMasker(mask_img=mask_img)
    masker.fit()

    fmri, paradigm, design, masks = generate_fmri(
        n_x=n_x, n_y=n_y, n_z=n_y, modulation=None, n_events=n_events,
        event_types=event_types, n_blank_events=n_blank_events,
        event_spacing=event_spacing, t_r=t_r, smoothing_fwhm=smoothing_fwhm,
        sigma=sigma, sigma_noise=sigma_noise, threshold=threshold, seed=seed)

    niimgs = nb.Nifti1Image(fmri, affine=np.eye(4))
    # Testing with a GLM
    glm = FirstLevelGLM(mask=mask_img, t_r=t_r, standardize=True,
                        noise_model='ols')
    glm.fit(niimgs, design)

    contrast_matrix = np.eye(design.shape[1])
    contrasts = dict([(column, contrast_matrix[i])
                      for i, column in enumerate(design.columns)])

    z_maps = {}
    for condition_id in event_types:
        z_maps[condition_id] = glm.transform(contrasts[condition_id],
                                             contrast_name=condition_id,
                                             output_z=True, output_stat=False,
                                             output_effects=False,
                                             output_variance=False)

    fig, axx = plt.subplots(nrows=len(event_types), ncols=2, figsize=(8, 8))
Exemple #14
0
                                   paradigm,
                                   hrf_model=hrf_model,
                                   drift_model=drift_model,
                                   period_cut=period_cut)

# specify contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

# Specify one interesting contrast
contrasts = {'active-rest': contrasts['active'] - contrasts['rest']}

# fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FirstLevelGLM(noise_model='ar1',
                         standardize=False).fit([fmri_img], design_matrix)

print("Computing contrasts ..")
output_dir = 'results'
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    z_map, t_map, eff_map, var_map = fmri_glm.transform(
        contrasts[contrast_id],
        contrast_name=contrast_id,
        output_z=True,
        output_stat=True,
        output_effects=True,
        output_variance=True)
Exemple #15
0
paradigm_file = data.paradigm
fmri_img = data.epi_img

### Design matrix ########################################

paradigm = pd.read_csv(paradigm_file, sep=' ', header=None, index_col=None)
paradigm.columns = ['session', 'name', 'onset']
design_matrix = make_design_matrix(frame_times,
                                   paradigm,
                                   hrf_model='canonical with derivative',
                                   drift_model="cosine",
                                   period_cut=128)

### Perform a GLM analysis ########################################

fmri_glm = FirstLevelGLM().fit(fmri_img, design_matrix)

### Estimate contrasts #########################################

# Specify the contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\
    contrasts["calculaudio"] + contrasts["phraseaudio"]
contrasts["video"] = contrasts["clicDvideo"] + contrasts["clicGvideo"] + \
    contrasts["calculvideo"] + contrasts["phrasevideo"]
contrasts["computation"] = contrasts["calculaudio"] + contrasts["calculvideo"]
contrasts["sentences"] = contrasts["phraseaudio"] + contrasts["phrasevideo"]
# specify contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
# more interesting contrasts
contrasts = {
    'faces-scrambled': contrasts['faces'] - contrasts['scrambled'],
    'scrambled-faces': -contrasts['faces'] + contrasts['scrambled'],
    'effects_of_interest': np.vstack((contrasts['faces'],
                                      contrasts['scrambled']))
    }

# fit GLM
print('Fitting a GLM')
fmri_glm = FirstLevelGLM(standardize=False).fit(fmri_img, design_matrices)

# compute contrast maps
print('Computing contrasts')
from nilearn import plotting

for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    z_map, = fmri_glm.transform(
        [contrast_val] * 2, contrast_name=contrast_id, output_z=True)
    plotting.plot_stat_map(
        z_map, bg_img=mean_image, threshold=3.0, display_mode='z',
        cut_coords=3, black_bg=True, title=contrast_id)

plotting.show()
Exemple #17
0
hrf_model = 'glover + derivative'
design_matrix = make_design_matrix(
    frame_times, paradigm, hrf_model=hrf_model, drift_model=drift_model,
    period_cut=period_cut)

# specify contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

# Specify one interesting contrast
contrasts = {'active-rest': contrasts['active'] - contrasts['rest']}

# fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(
    [fmri_img], design_matrix)

print("Computing contrasts ..")
output_dir = 'results'
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    z_map, t_map, eff_map, var_map = fmri_glm.transform(
        contrasts[contrast_id], contrast_name=contrast_id, output_z=True,
        output_stat=True, output_effects=True, output_variance=True)

    # store stat maps to disk
    for dtype, out_map in zip(['z', 't', 'effects', 'variance'],
                              [z_map, t_map, eff_map, var_map]):
data = datasets.fetch_localizer_first_level()
paradigm_file = data.paradigm
fmri_img = data.epi_img

### Design matrix ########################################

paradigm = pd.read_csv(paradigm_file, sep=' ', header=None, index_col=None)
paradigm.columns = ['session', 'name', 'onset']
n_conditions = len(paradigm.name.unique())
design_matrix = make_design_matrix(
    frame_times, paradigm, hrf_model='glover + derivative',
    drift_model='cosine', period_cut=128)

### Perform a GLM analysis ########################################

fmri_glm = FirstLevelGLM().fit(fmri_img, design_matrix)

### Estimate contrasts #########################################

# Specify the contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\
    contrasts["calculaudio"] + contrasts["phraseaudio"]
contrasts["video"] = contrasts["clicDvideo"] + contrasts["clicGvideo"] + \
    contrasts["calculvideo"] + contrasts["phrasevideo"]
contrasts["computation"] = contrasts["calculaudio"] + contrasts["calculvideo"]
contrasts["sentences"] = contrasts["phraseaudio"] + contrasts["phrasevideo"]
            # GLM using HRF with a different peak
            hrf_est = _gamma_difference_hrf(1., oversampling=1./dt, time_length=hrf_length + dt,
                                          onset=0., delay=hrf_peak, undershoot=hrf_ushoot,
                                          dispersion=1., u_dispersion=1., ratio=0.167)
            f_hrf_est = interp1d(x_0, hrf_est)

            _, design, _, _ = generate_spikes_time_series(
                n_events=n_events, n_blank_events=n_blank_events,
                event_spacing=event_spacing, t_r=t_r, event_types=event_types,
                return_jitter=True, jitter_min=jitter_min, jitter_max=jitter_max,
                period_cut=period_cut, drift_order=drift_order, time_offset=10,
                modulation=None, seed=seed, f_hrf=f_hrf_est, hrf_length=hrf_length)

            # Testing with a GLM
            glm = FirstLevelGLM(mask=mask_img, t_r=t_r, standardize=True,
                                noise_model='ols')
            glm.fit(niimgs, design)
            #print 'n_timepoints, n_voxels: ', glm.results_[0][0].norm_resid.shape
            #print glm.results_[0][0].resid
            #print glm.results_[0][0].logL
            snr = np.linalg.norm(fmri, axis=3) / sigma_noise
            snr_db = 20 * (np.log10(np.linalg.norm(fmri, axis=3) / sigma_noise))
            print 'sigma_noise = ', sigma_noise
            print 'SNR = ', snr.mean()
            print 'SNR = ', snr_db.mean(), ' dB'

            print glm.results_[0][0].norm_resid.mean()
            norm_resid[isim, iest] = (np.linalg.norm(glm.results_[0][0].resid, axis=0)**2).mean()


    if not op.exists(fig_folder): os.makedirs(fig_folder)
from nistats import datasets

# write directory
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
    mkdir(write_dir)

# Data and analysis parameters
data = datasets.fetch_fiac_first_level()
fmri_img = [data['func1'], data['func2']]
mean_img_ = mean_img(fmri_img[0])
design_files = [data['design_matrix1'], data['design_matrix2']]
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

# GLM specification
fmri_glm = FirstLevelGLM(data['mask'], standardize=False, noise_model='ar1')

# GLM fitting
fmri_glm.fit(fmri_img, design_matrices)

# compute fixed effects of the two runs and compute related images
n_columns = design_matrices[0].shape[1]


def pad_vector(contrast_, n_columns):
    return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))


contrasts = {
    'SStSSp_minus_DStDSp': pad_vector([1, 0, 0, -1], n_columns),
    'DStDSp_minus_SStSSp': pad_vector([-1, 0, 0, 1], n_columns),