plot_stat_map(z_map, bg_img=mean_img, threshold=3.0, display_mode='z', cut_coords=3, black_bg=True, title='Active minus Rest (Z>3)') plt.show() ############################################################################### # Statistical significance testing. One should worry about the # statistical validity of the procedure: here we used an arbitrary # threshold of 3.0 but the threshold should provide some guarantees on # the risk of false detections (aka type-1 errors in statistics). # One suggestion is to control the false positive rate (fpr, denoted by # alpha) at a certain level, e.g. 0.001: this means that there is 0.1% chance # of declaring an inactive voxel, active. from nilearn.stats import map_threshold _, threshold = map_threshold(z_map, alpha=.001, height_control='fpr') print('Uncorrected p<0.001 threshold: %.3f' % threshold) plot_stat_map(z_map, bg_img=mean_img, threshold=threshold, display_mode='z', cut_coords=3, black_bg=True, title='Active minus Rest (p<0.001)') plt.show() ############################################################################### # The problem is that with this you expect 0.001 * n_voxels to show up # while they're not active --- tens to hundreds of voxels. A more # conservative solution is to control the family wise error rate, # i.e. the probability of making only one false detection, say at # 5%. For that we use the so-called Bonferroni correction. _, threshold = map_threshold(z_map, alpha=.05, height_control='bonferroni') print('Bonferroni-corrected, p<0.05 threshold: %.3f' % threshold)
from nilearn.stats.second_level_model import SecondLevelModel second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask_img=mask_img) second_level_model.fit(gray_matter_map_filenames, design_matrix=design_matrix) ########################################################################## # Estimating the contrast is very simple. We can just provide the column # name of the design matrix. z_map = second_level_model.compute_contrast(second_level_contrast=[1, 0, 0], output_type='z_score') ########################################################################### # We threshold the second level contrast at uncorrected p < 0.001 and plot it. from nilearn import plotting from nilearn.stats import map_threshold _, threshold = map_threshold(z_map, alpha=.05, height_control='fdr') print('The FDR=.05-corrected threshold is: %.3g' % threshold) display = plotting.plot_stat_map( z_map, threshold=threshold, colorbar=True, display_mode='z', cut_coords=[-4, 26], title='age effect on grey matter density (FDR = .05)') plotting.show() ########################################################################### # We can also study the effect of sex by computing the contrast, thresholding it # and plot the resulting map.
def test_all_resolution_inference(): shape = (9, 10, 11) p = np.prod(shape) data = norm.isf(np.linspace(1. / p, 1. - 1. / p, p)).reshape(shape) alpha = .001 data[2:4, 5:7, 6:8] = 5. stat_img = nib.Nifti1Image(data, np.eye(4)) mask_img = nib.Nifti1Image(np.ones(shape), np.eye(4)) # test 1: standard case th_map = cluster_level_inference(stat_img, threshold=3, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 8 # test 2: high threshold th_map = cluster_level_inference(stat_img, threshold=6, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 0 # test 3: list of thresholds th_map = cluster_level_inference(stat_img, threshold=[3, 6], alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 8 # test 4: one single voxel data[3, 6, 7] = 10 stat_img_ = nib.Nifti1Image(data, np.eye(4)) th_map = cluster_level_inference(stat_img_, threshold=7, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 1 # test 5: aberrant alpha with pytest.raises(ValueError): cluster_level_inference(stat_img, threshold=3, alpha=2) with pytest.raises(ValueError): cluster_level_inference(stat_img, threshold=3, alpha=-1) # test 6 with mask_img th_map = cluster_level_inference(stat_img, mask_img=mask_img, threshold=3, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 8 # test 7 verbose mode th_map = cluster_level_inference(stat_img, threshold=3, alpha=.05, verbose=True) # test 9: one-sided test th_map, z_th = map_threshold(stat_img, mask_img, alpha, height_control='fpr', cluster_threshold=10, two_sided=False) assert_equal(z_th, norm.isf(.001)) # test 10: two-side fdr threshold + bonferroni data[0:2, 0:2, 6:8] = -5. stat_img = nib.Nifti1Image(data, np.eye(4)) for control in ['fdr', 'bonferroni']: th_map, _ = map_threshold(stat_img, mask_img, alpha=.05, height_control=control, cluster_threshold=5) vals = get_data(th_map) assert_equal(np.sum(vals > 0), 8) assert_equal(np.sum(vals < 0), 8) th_map, _ = map_threshold(stat_img, mask_img, alpha=.05, height_control=control, cluster_threshold=5, two_sided=False) vals = get_data(th_map) assert_equal(np.sum(vals > 0), 8) assert_equal(np.sum(vals < 0), 0)
def test_map_threshold(): shape = (9, 10, 11) p = np.prod(shape) data = norm.isf(np.linspace(1. / p, 1. - 1. / p, p)).reshape(shape) alpha = .001 data[2:4, 5:7, 6:8] = 5. stat_img = nib.Nifti1Image(data, np.eye(4)) mask_img = nib.Nifti1Image(np.ones(shape), np.eye(4)) # test 1 th_map, _ = map_threshold(stat_img, mask_img, alpha, height_control='fpr', cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 2: excessive cluster forming threshold th_map, _ = map_threshold(stat_img, mask_img, threshold=100, height_control=None, cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 0 # test 3: excessive size threshold th_map, z_th = map_threshold(stat_img, mask_img, alpha, height_control='fpr', cluster_threshold=10) vals = get_data(th_map) assert np.sum(vals > 0) == 0 assert z_th == norm.isf(.0005) # test 4: fdr threshold + bonferroni for control in ['fdr', 'bonferroni']: th_map, _ = map_threshold(stat_img, mask_img, alpha=.05, height_control=control, cluster_threshold=5) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 5: direct threshold th_map, _ = map_threshold(stat_img, mask_img, threshold=4.0, height_control=None, cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 6: without mask th_map, _ = map_threshold(stat_img, None, threshold=4.0, height_control=None, cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 7 without a map th_map, threshold = map_threshold(None, None, threshold=3.0, height_control=None, cluster_threshold=0) assert threshold == 3.0 assert th_map == None # noqa:E711 th_map, threshold = map_threshold(None, None, alpha=0.05, height_control='fpr', cluster_threshold=0) assert (threshold > 1.64) assert th_map == None # noqa:E711 with pytest.raises(ValueError): map_threshold(None, None, alpha=0.05, height_control='fdr') with pytest.raises(ValueError): map_threshold(None, None, alpha=0.05, height_control='bonferroni') # test 8 wrong procedure with pytest.raises(ValueError): map_threshold(None, None, alpha=0.05, height_control='plop')
# Next, we specify and estimate the model. from nilearn.stats.second_level_model import SecondLevelModel second_level_model = SecondLevelModel().fit(cmap_filenames, design_matrix=design_matrix) ######################################################################### # Compute the only possible contrast: the one-sample test. Since there # is only one possible contrast, we don't need to specify it in detail. z_map = second_level_model.compute_contrast(output_type='z_score') ######################################################################### # Threshold the resulting map: # false positive rate < .001, cluster size > 10 voxels. from nilearn.stats import map_threshold thresholded_map1, threshold1 = map_threshold(z_map, alpha=.001, height_control='fpr', cluster_threshold=10) ######################################################################### # Now use FDR <.05 (False Discovery Rate) and no cluster-level threshold. thresholded_map2, threshold2 = map_threshold(z_map, alpha=.05, height_control='fdr') print('The FDR=.05 threshold is %.3g' % threshold2) ######################################################################### # Now use FWER <.05 (Family-Wise Error Rate) and no cluster-level # threshold. As the data has not been intensively smoothed, we can # use a simple Bonferroni correction. thresholded_map3, threshold3 = map_threshold(z_map, alpha=.05,