Beispiel #1
0
    display_mode='ortho')
plt.show()

from nilearn.plotting import plot_img_comparison
plot_img_comparison([z_map], [fsl_z_map],
                    model.masker_,
                    ref_label='Nilearn',
                    src_label='FSL')
plt.show()

#############################################################################
# Simple statistical report of thresholded contrast
# -----------------------------------------------------
# We display the contrast plot and table with cluster information
from nilearn.plotting import plot_contrast_matrix
plot_contrast_matrix('StopSuccess - Go', design_matrix)
plotting.plot_glass_brain(z_map,
                          colorbar=True,
                          threshold=norm.isf(0.001),
                          plot_abs=False,
                          display_mode='z',
                          figure=plt.figure(figsize=(4, 4)))
plt.show()

###############################################################################
# We can get a latex table from a Pandas Dataframe for display and publication
# purposes
from nilearn.reporting import get_clusters_table
print(get_clusters_table(z_map, norm.isf(0.001), 10).to_latex())

#########################################################################
    }
    return contrasts


#########################################################################
# Let's look at these computed contrasts:
#
# * 'left - right button press': probes motor activity in left versus right button presses
# * 'horizontal-vertical': probes the differential activity in viewing a horizontal vs vertical checkerboard
# * 'audio - visual': probes the difference of activity between listening to some content or reading the same type of content (instructions, stories)
# * 'computation - sentences': looks at the activity when performing a mental comptation task  versus simply reading sentences.
#
contrasts = make_localizer_contrasts(design_matrix)
from nilearn.plotting import plot_contrast_matrix
for key, values in contrasts.items():
    plot_contrast_matrix(values, design_matrix=design_matrix)
    plt.suptitle(key)

plt.show()

#########################################################################
# A first contrast estimation and plotting
# ----------------------------------------
#
# As this script will be repeated several times, we encapsulate model
# fitting and plotting in a function that we call when needed.
#
from nilearn import plotting


def plot_contrast(first_level_model):
Beispiel #3
0
contrasts = {
    'left-right': (contrasts['visual_left_hand_button_press'] +
                   contrasts['audio_left_hand_button_press'] -
                   contrasts['visual_right_hand_button_press'] -
                   contrasts['audio_right_hand_button_press']),
    'H-V': (contrasts['horizontal_checkerboard'] -
            contrasts['vertical_checkerboard']),
    'audio-video':
    contrasts['audio'] - contrasts['video'],
    'sentences-computation':
    (contrasts['sentences'] - contrasts['computation'])
}

#########################################################################
# Take a look at the contrasts.
plot_contrast_matrix(contrasts['left-right'], design_matrix)

#########################################################################
# Take a breath.
#
# We can now  proceed by estimating the contrasts and displaying them.

import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map

fig = plt.figure(figsize=(11, 3))
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
    ax = plt.subplot(1, len(contrasts), 1 + index)
    z_map = first_level_model.compute_contrast(contrast_val,
                                               output_type='z_score')
    plot_stat_map(z_map,
Beispiel #4
0
    'rest':   array([0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
                     0.]),
}

###############################################################################
# We can then compare the two conditions 'active' and 'rest' by
# defining the corresponding contrast:

active_minus_rest = conditions['active'] - conditions['rest']

###############################################################################
# Let's look at it: plot the coefficients of the contrast, indexed by
# the names of the columns of the design matrix.

from nilearn.plotting import plot_contrast_matrix
plot_contrast_matrix(active_minus_rest, design_matrix=design_matrix)

###############################################################################
# Below, we compute the estimated effect. It is in BOLD signal unit,
# but has no statistical guarantees, because it does not take into
# account the associated variance.

eff_map = fmri_glm.compute_contrast(active_minus_rest,
                                    output_type='effect_size')

###############################################################################
# In order to get statistical significance, we form a t-statistic, and
# directly convert it into z-scale. The z-scale means that the values
# are scaled to match a standard Gaussian distribution (mean=0,
# variance=1), across voxels, if there were no effects in the data.