def test_second_level_model_contrast_computation(): with InTemporaryDirectory(): shapes = ((7, 8, 9, 1),) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # Ordinary Least Squares case model = SecondLevelModel(mask_img=mask) # asking for contrast before model fit gives error assert_raises(ValueError, model.compute_contrast, 'intercept') # fit model Y = [func_img] * 4 X = pd.DataFrame([[1]] * 4, columns=['intercept']) model = model.fit(Y, design_matrix=X) ncol = len(model.design_matrix_.columns) c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol) # smoke test for different contrasts in fixed effects model.compute_contrast(c1) z_image = model.compute_contrast(c1, output_type='z_score') stat_image = model.compute_contrast(c1, output_type='stat') p_image = model.compute_contrast(c1, output_type='p_value') effect_image = model.compute_contrast(c1, output_type='effect_size') variance_image = \ model.compute_contrast(c1, output_type='effect_variance') # Test output_type='all', and verify images are equivalent all_images = model.compute_contrast(c1, output_type='all') assert_array_equal(get_data(all_images['z_score']), get_data(z_image)) assert_array_equal(get_data(all_images['stat']), get_data(stat_image)) assert_array_equal(get_data(all_images['p_value']), get_data(p_image)) assert_array_equal(get_data(all_images['effect_size']), get_data(effect_image)) assert_array_equal(get_data(all_images['effect_variance']), get_data(variance_image)) # formula should work (passing variable name directly) model.compute_contrast('intercept') # or simply pass nothing model.compute_contrast() # passing null contrast should give back a value error assert_raises(ValueError, model.compute_contrast, cnull) # passing wrong parameters assert_raises(ValueError, model.compute_contrast, []) assert_raises(ValueError, model.compute_contrast, c1, None, '') assert_raises(ValueError, model.compute_contrast, c1, None, []) assert_raises(ValueError, model.compute_contrast, c1, None, None, '') # check that passing no explicit contrast when the design # matrix has more than one columns raises an error X = pd.DataFrame(np.random.rand(4, 2), columns=['r1', 'r2']) model = model.fit(Y, design_matrix=X) assert_raises(ValueError, model.compute_contrast, None) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del func_img, FUNCFILE, model, X, Y
def _prepare_downloaded_spm_auditory_data(subject_dir): """ Uncompresses downloaded spm_auditory dataset and organizes the data into apprpriate directories. Parameters ---------- subject_dir: string Path to subject's data directory. Returns ------- _subject_data: skl.Bunch object Scikit-Learn Bunch object containing data of a single subject from the SPM Auditory dataset. """ subject_data = {} for file_name in SPM_AUDITORY_DATA_FILES: file_path = os.path.join(subject_dir, file_name) if os.path.exists(file_path): subject_data[file_name] = file_path else: print('%s missing from filelist!' % file_name) return None _subject_data = {} _subject_data['func'] = sorted( [subject_data[x] for x in subject_data.keys() if re.match('^fM00223_0\d\d\.img$', # noqa:W605 os.path.basename(x))]) # volumes for this dataset of shape (64, 64, 64, 1); let's fix this for x in _subject_data['func']: vol = nib.load(x) if len(vol.shape) == 4: vol = nib.Nifti1Image(get_data(vol)[:, :, :, 0], vol.affine) nib.save(vol, x) _subject_data['anat'] = [subject_data[x] for x in subject_data.keys() if re.match('^sM00223_002\.img$', # noqa:W605 os.path.basename(x))][0] # ... same thing for anat vol = nib.load(_subject_data['anat']) if len(vol.shape) == 4: vol = nib.Nifti1Image(get_data(vol)[:, :, :, 0], vol.affine) nib.save(vol, _subject_data['anat']) return Bunch(**_subject_data)
def test_high_level_glm_null_contrasts(): # test that contrast computation is resilient to 0 values. shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3 mask, fmri_data, design_matrices = _generate_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelModel(mask_img=None).fit( fmri_data, design_matrices=design_matrices) single_session_model = FirstLevelModel(mask_img=None).fit( fmri_data[0], design_matrices=design_matrices[0]) z1 = multi_session_model.compute_contrast( [np.eye(rk)[:1], np.zeros((1, rk))], output_type='stat') z2 = single_session_model.compute_contrast(np.eye(rk)[:1], output_type='stat') np.testing.assert_almost_equal(get_data(z1), get_data(z2))
def test_first_level_model_design_creation(): # Test processing of FMRI inputs with InTemporaryDirectory(): shapes = ((7, 8, 9, 10), ) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 10.0 slice_time_ref = 0. events = basic_paradigm() model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask, drift_model='polynomial', drift_order=3) model = model.fit(func_img, events) frame1, X1, names1 = check_design_matrix(model.design_matrices_[0]) # check design computation is identical n_scans = get_data(func_img).shape[3] start_time = slice_time_ref * t_r end_time = (n_scans - 1 + slice_time_ref) * t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=3) frame2, X2, names2 = check_design_matrix(design) assert_array_equal(frame1, frame2) assert_array_equal(X1, X2) assert_array_equal(names1, names2) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del FUNCFILE, mask, model, func_img
def test_high_level_non_parametric_inference_with_paths(): with InTemporaryDirectory(): n_perm = 100 shapes = ((7, 8, 9, 1), ) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) Y = [func_img] * 4 X = pd.DataFrame([[1]] * 4, columns=['intercept']) c1 = np.eye(len(X.columns))[0] neg_log_pvals_img = non_parametric_inference(Y, design_matrix=X, second_level_contrast=c1, mask=mask, n_perm=n_perm) neg_log_pvals = get_data(neg_log_pvals_img) assert isinstance(neg_log_pvals_img, Nifti1Image) assert_array_equal(neg_log_pvals_img.affine, load(mask).affine) assert np.all(neg_log_pvals <= -np.log10(1.0 / (n_perm + 1))) assert np.all(0 <= neg_log_pvals) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory del X, Y, FUNCFILE, func_img, neg_log_pvals_img
def test_high_level_glm_with_paths(): shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3 with InTemporaryDirectory(): mask_file, fmri_files, design_files = _write_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelModel(mask_img=None).fit( fmri_files, design_matrices=design_files) z_image = multi_session_model.compute_contrast(np.eye(rk)[1]) assert_array_equal(z_image.affine, load(mask_file).affine) assert get_data(z_image).std() < 3. # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del z_image, fmri_files, multi_session_model
def test_non_parametric_inference_permutation_computation(): with InTemporaryDirectory(): shapes = ((7, 8, 9, 1),) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) Y = [func_img] * 4 X = pd.DataFrame([[1]] * 4, columns=['intercept']) neg_log_pvals_img = non_parametric_inference(Y, design_matrix=X, mask=mask, n_perm=100) assert_equal(get_data(neg_log_pvals_img).shape, shapes[0][:3]) del func_img, FUNCFILE, neg_log_pvals_img, X, Y
def test_high_level_glm_with_data(): with InTemporaryDirectory(): shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3 mask, fmri_data, design_matrices = _write_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelModel(mask_img=None).fit( fmri_data, design_matrices=design_matrices) n_voxels = get_data(multi_session_model.masker_.mask_img_).sum() z_image = multi_session_model.compute_contrast(np.eye(rk)[1]) assert np.sum(get_data(z_image) != 0) == n_voxels assert get_data(z_image).std() < 3. # with mask multi_session_model = FirstLevelModel(mask_img=mask).fit( fmri_data, design_matrices=design_matrices) z_image = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='z_score') p_value = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='p_value') stat_image = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='stat') effect_image = multi_session_model.compute_contrast( np.eye(rk)[:2], output_type='effect_size') variance_image = multi_session_model.compute_contrast( np.eye(rk)[:2], output_type='effect_variance') assert_array_equal(get_data(z_image) == 0., get_data(load(mask)) == 0.) assert (get_data(variance_image)[get_data(load(mask)) > 0] > .001).all() all_images = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='all') assert_array_equal(get_data(all_images['z_score']), get_data(z_image)) assert_array_equal(get_data(all_images['p_value']), get_data(p_value)) assert_array_equal(get_data(all_images['stat']), get_data(stat_image)) assert_array_equal(get_data(all_images['effect_size']), get_data(effect_image)) assert_array_equal(get_data(all_images['effect_variance']), get_data(variance_image)) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del (all_images, design_matrices, effect_image, fmri_data, mask, multi_session_model, n_voxels, p_value, rk, shapes, stat_image, variance_image, z_image)
threshold=threshold, colorbar=True, title='Group-level association between motor activity \n' 'and reading fluency (fdr=0.05)') plotting.show() ########################################################################## # Computing the (corrected) p-values with parametric test to compare with # non parametric test from nilearn.image import math_img from nilearn.input_data import NiftiMasker from nistats.utils import get_data p_val = model.compute_contrast('fluency', output_type='p_value') n_voxels = np.sum(get_data(model.masker_.mask_img_)) # Correcting the p-values for multiple testing and taking negative logarithm neg_log_pval = math_img("-np.log10(np.minimum(1, img * {}))".format( str(n_voxels)), img=p_val) ########################################################################### # Let us plot the (corrected) negative log p-values for the parametric test cut_coords = [38, -17, -3] # Since we are plotting negative log p-values and using a threshold equal to 1, # it corresponds to corrected p-values lower than 10%, meaning that there # is less than 10% probability to make a single false discovery # (90% chance that we make no false discoveries at all). # This threshold is much more conservative than the previous one. threshold = 1 title = ('Group-level association between motor activity and reading: \n'
def get_clusters_table(stat_img, stat_threshold, cluster_threshold=None, min_distance=8.): """Creates pandas dataframe with img cluster statistics. Parameters ---------- stat_img : Niimg-like object, Statistical image (presumably in z- or p-scale). stat_threshold: `float` Cluster forming threshold in same scale as `stat_img` (either a p-value or z-scale value). cluster_threshold : `int` or `None`, optional Cluster size threshold, in voxels. min_distance: `float`, optional Minimum distance between subpeaks in mm. Default is 8 mm. Returns ------- df : `pandas.DataFrame` Table with peaks and subpeaks from thresholded `stat_img`. For binary clusters (clusters with >1 voxel containing only one value), the table reports the center of mass of the cluster, rather than any peaks/subpeaks. """ cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)'] stat_map = get_data(stat_img) conn_mat = np.zeros((3, 3, 3), int) # 6-connectivity, aka NN1 or "faces" conn_mat[1, 1, :] = 1 conn_mat[1, :, 1] = 1 conn_mat[:, 1, 1] = 1 voxel_size = np.prod(stat_img.header.get_zooms()) # Binarize using CDT binarized = stat_map > stat_threshold binarized = binarized.astype(int) # If the stat threshold is too high simply return an empty dataframe if np.sum(binarized) == 0: warnings.warn('Attention: No clusters with stat higher than %f' % stat_threshold) return pd.DataFrame(columns=cols) # Extract connected components above cluster size threshold label_map = ndimage.measurements.label(binarized, conn_mat)[0] clust_ids = sorted(list(np.unique(label_map)[1:])) for c_val in clust_ids: if cluster_threshold is not None and np.sum( label_map == c_val) < cluster_threshold: stat_map[label_map == c_val] = 0 binarized[label_map == c_val] = 0 # If the cluster threshold is too high simply return an empty dataframe # this checks for stats higher than threshold after small clusters # were removed from stat_map if np.sum(stat_map > stat_threshold) == 0: warnings.warn('Attention: No clusters with more than %d voxels' % cluster_threshold) return pd.DataFrame(columns=cols) # Now re-label and create table label_map = ndimage.measurements.label(binarized, conn_mat)[0] clust_ids = sorted(list(np.unique(label_map)[1:])) peak_vals = np.array( [np.max(stat_map * (label_map == c)) for c in clust_ids]) clust_ids = [clust_ids[c] for c in (-peak_vals).argsort() ] # Sort by descending max value rows = [] for c_id, c_val in enumerate(clust_ids): cluster_mask = label_map == c_val masked_data = stat_map * cluster_mask cluster_size_mm = int(np.sum(cluster_mask) * voxel_size) # Get peaks, subpeaks and associated statistics subpeak_ijk, subpeak_vals = _local_max(masked_data, stat_img.affine, min_distance=min_distance) subpeak_xyz = np.asarray( coord_transform(subpeak_ijk[:, 0], subpeak_ijk[:, 1], subpeak_ijk[:, 2], stat_img.affine)).tolist() subpeak_xyz = np.array(subpeak_xyz).T # Only report peak and, at most, top 3 subpeaks. n_subpeaks = np.min((len(subpeak_vals), 4)) for subpeak in range(n_subpeaks): if subpeak == 0: row = [ c_id + 1, subpeak_xyz[subpeak, 0], subpeak_xyz[subpeak, 1], subpeak_xyz[subpeak, 2], subpeak_vals[subpeak], cluster_size_mm ] else: # Subpeak naming convention is cluster num + letter (1a, 1b, etc.) sp_id = '{0}{1}'.format(c_id + 1, ascii_lowercase[subpeak - 1]) row = [ sp_id, subpeak_xyz[subpeak, 0], subpeak_xyz[subpeak, 1], subpeak_xyz[subpeak, 2], subpeak_vals[subpeak], '' ] rows += [row] df = pd.DataFrame(columns=cols, data=rows) return df
def map_threshold(stat_img=None, mask_img=None, alpha=.001, threshold=3., height_control='fpr', cluster_threshold=0, two_sided=True): """ Compute the required threshold level and return the thresholded map Parameters ---------- stat_img : Niimg-like object or None, optional statistical image (presumably in z scale) whenever height_control is 'fpr' or None, stat_img=None is acceptable. If it is 'fdr' or 'bonferroni', an error is raised if stat_img is None. mask_img : Niimg-like object, optional, mask image alpha: float or list, optional number controlling the thresholding (either a p-value or q-value). Its actual meaning depends on the height_control parameter. This function translates alpha to a z-scale threshold. threshold: float, optional desired threshold in z-scale. This is used only if height_control is None height_control: string, or None optional false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'\|None cluster_threshold: float, optional cluster size threshold. In the returned thresholded map, sets of connected voxels (`clusters`) with size smaller than this number will be removed. two_sided: Bool, optional, Whether the thresholding should yield both positive and negative part of the maps. In that case, alpha is corrected by a factor of 2. Defaults to True. Returns ------- thresholded_map : Nifti1Image, the stat_map thresholded at the prescribed voxel- and cluster-level threshold: float, the voxel-level threshold used actually Note ---- If the input image is not z-scaled (i.e. some z-transformed statistic) the computed threshold is not rigorous and likely meaningless """ height_control_methods = [ 'fpr', 'fdr', 'bonferroni', 'all-resolution-inference', None ] if height_control not in height_control_methods: raise ValueError("height control should be one of {0}", height_control_methods) # if two-sided, correct alpha by a factor of 2 alpha_ = alpha / 2 if two_sided else alpha # if height_control is 'fpr' or None, we don't need to look at the data # to compute the threshold if height_control == 'fpr': threshold = norm.isf(alpha_) # In this case, and if stat_img is None, we return if stat_img is None: if height_control in ['fpr', None]: return None, threshold else: raise ValueError( 'Map_threshold requires stat_img not to be None' 'when the height_control procedure is "bonferroni" or "fdr"') if mask_img is None: masker = NiftiMasker(mask_strategy='background').fit(stat_img) else: masker = NiftiMasker(mask_img=mask_img).fit() stats = np.ravel(masker.transform(stat_img)) n_voxels = np.size(stats) # Thresholding if two_sided: # replace stats by their absolute value after storing the sign sign = np.sign(stats) stats = np.abs(stats) if height_control == 'fdr': threshold = fdr_threshold(stats, alpha_) elif height_control == 'bonferroni': threshold = norm.isf(alpha_ / n_voxels) stats *= (stats > threshold) if two_sided: stats *= sign # embed it back to 3D grid stat_map = get_data(masker.inverse_transform(stats)) # Extract connected components above threshold label_map, n_labels = label(np.abs(stat_map) > threshold) labels = label_map[get_data(masker.mask_img_) > 0] for label_ in range(1, n_labels + 1): if np.sum(labels == label_) < cluster_threshold: stats[labels == label_] = 0 return masker.inverse_transform(stats), threshold
def cluster_level_inference(stat_img, mask_img=None, threshold=3., alpha=.05, verbose=False): """ Report the proportion of active voxels for all clusters defined by the input threshold. Parameters ---------- stat_img : Niimg-like object or None, optional statistical image (presumably in z scale) mask_img : Niimg-like object, optional, mask image threshold: list of floats, optional cluster-forming threshold in z-scale. alpha: float or list, optional level of control on the true positive rate, aka true dsicovery proportion verbose: bool, optional verbosity mode Returns ------- proportion_true_discoveries_img: Nifti1Image, the statistical map that gives the true positive Note ---- This implements the method described in: Rosenblatt JD, Finos L, Weeda WD, Solari A, Goeman JJ. All-Resolutions Inference for brain imaging. Neuroimage. 2018 Nov 1;181:786-796. doi: 10.1016/j.neuroimage.2018.07.060 """ if not isinstance(threshold, list): threshold = [threshold] if mask_img is None: masker = NiftiMasker(mask_strategy='background').fit(stat_img) else: masker = NiftiMasker(mask_img=mask_img).fit() stats = np.ravel(masker.transform(stat_img)) hommel_value = _compute_hommel_value(stats, alpha, verbose=verbose) # embed it back to 3D grid stat_map = get_data(masker.inverse_transform(stats)) # Extract connected components above threshold proportion_true_discoveries_img = math_img('0. * img', img=stat_img) proportion_true_discoveries = masker.transform( proportion_true_discoveries_img).ravel() for threshold_ in sorted(threshold): label_map, n_labels = label(stat_map > threshold_) labels = label_map[get_data(masker.mask_img_) > 0] for label_ in range(1, n_labels + 1): # get the z-vals in the cluster cluster_vals = stats[labels == label_] proportion = _true_positive_fraction(cluster_vals, hommel_value, alpha) proportion_true_discoveries[labels == label_] = proportion proportion_true_discoveries_img = masker.inverse_transform( proportion_true_discoveries) return proportion_true_discoveries_img
def test_map_threshold(): shape = (9, 10, 11) p = np.prod(shape) data = norm.isf(np.linspace(1. / p, 1. - 1. / p, p)).reshape(shape) alpha = .001 data[2:4, 5:7, 6:8] = 5. stat_img = nib.Nifti1Image(data, np.eye(4)) mask_img = nib.Nifti1Image(np.ones(shape), np.eye(4)) # test 1 th_map, _ = map_threshold(stat_img, mask_img, alpha, height_control='fpr', cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 2: excessive cluster forming threshold th_map, _ = map_threshold(stat_img, mask_img, threshold=100, height_control=None, cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 0 # test 3: excessive size threshold th_map, z_th = map_threshold(stat_img, mask_img, alpha, height_control='fpr', cluster_threshold=10) vals = get_data(th_map) assert np.sum(vals > 0) == 0 assert z_th == norm.isf(.0005) # test 4: fdr threshold + bonferroni for control in ['fdr', 'bonferroni']: th_map, _ = map_threshold(stat_img, mask_img, alpha=.05, height_control=control, cluster_threshold=5) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 5: direct threshold th_map, _ = map_threshold(stat_img, mask_img, threshold=4.0, height_control=None, cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 6: without mask th_map, _ = map_threshold(stat_img, None, threshold=4.0, height_control=None, cluster_threshold=0) vals = get_data(th_map) assert np.sum(vals > 0) == 8 # test 7 without a map th_map, threshold = map_threshold(None, None, threshold=3.0, height_control=None, cluster_threshold=0) assert threshold == 3.0 assert th_map == None # noqa:E711 th_map, threshold = map_threshold(None, None, alpha=0.05, height_control='fpr', cluster_threshold=0) assert (threshold > 1.64) assert th_map == None # noqa:E711 with pytest.raises(ValueError): map_threshold(None, None, alpha=0.05, height_control='fdr') with pytest.raises(ValueError): map_threshold(None, None, alpha=0.05, height_control='bonferroni') # test 8 wrong procedure with pytest.raises(ValueError): map_threshold(None, None, alpha=0.05, height_control='plop')
def test_all_resolution_inference(): shape = (9, 10, 11) p = np.prod(shape) data = norm.isf(np.linspace(1. / p, 1. - 1. / p, p)).reshape(shape) alpha = .001 data[2:4, 5:7, 6:8] = 5. stat_img = nib.Nifti1Image(data, np.eye(4)) mask_img = nib.Nifti1Image(np.ones(shape), np.eye(4)) # test 1: standard case th_map = cluster_level_inference(stat_img, threshold=3, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 8 # test 2: high threshold th_map = cluster_level_inference(stat_img, threshold=6, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 0 # test 3: list of thresholds th_map = cluster_level_inference(stat_img, threshold=[3, 6], alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 8 # test 4: one single voxel data[3, 6, 7] = 10 stat_img_ = nib.Nifti1Image(data, np.eye(4)) th_map = cluster_level_inference(stat_img_, threshold=7, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 1 # test 5: aberrant alpha with pytest.raises(ValueError): cluster_level_inference(stat_img, threshold=3, alpha=2) with pytest.raises(ValueError): cluster_level_inference(stat_img, threshold=3, alpha=-1) # test 6 with mask_img th_map = cluster_level_inference(stat_img, mask_img=mask_img, threshold=3, alpha=.05) vals = th_map.get_data() assert np.sum(vals > 0) == 8 # test 7 verbose mode th_map = cluster_level_inference(stat_img, threshold=3, alpha=.05, verbose=True) # test 9: one-sided test th_map, z_th = map_threshold(stat_img, mask_img, alpha, height_control='fpr', cluster_threshold=10, two_sided=False) assert_equal(z_th, norm.isf(.001)) # test 10: two-side fdr threshold + bonferroni data[0:2, 0:2, 6:8] = -5. stat_img = nib.Nifti1Image(data, np.eye(4)) for control in ['fdr', 'bonferroni']: th_map, _ = map_threshold(stat_img, mask_img, alpha=.05, height_control=control, cluster_threshold=5) vals = get_data(th_map) assert_equal(np.sum(vals > 0), 8) assert_equal(np.sum(vals < 0), 8) th_map, _ = map_threshold(stat_img, mask_img, alpha=.05, height_control=control, cluster_threshold=5, two_sided=False) vals = get_data(th_map) assert_equal(np.sum(vals > 0), 8) assert_equal(np.sum(vals < 0), 0)
title='group left-right button press (unc p<0.001)') plotting.show() ########################################################################### # As expected, we find the motor cortex. ########################################################################## # Next, we compute the (corrected) p-values with a parametric test to compare them with the results # from a nonparametric test. import numpy as np from nilearn.image import math_img from nilearn.input_data import NiftiMasker from nistats.utils import get_data p_val = second_level_model.compute_contrast(output_type='p_value') n_voxels = np.sum(get_data(second_level_model.masker_.mask_img_)) # Correcting the p-values for multiple testing and taking negative logarithm neg_log_pval = math_img("-np.log10(np.minimum(1, img * {}))".format( str(n_voxels)), img=p_val) ########################################################################### # Let us plot the (corrected) negative log p-values for the parametric test. cut_coords = [0] # Since we are plotting negative log p-values and using a threshold equal to 1, # it corresponds to corrected p-values lower than 10%, meaning that there # is less than 10% probability to make a single false discovery # (90% chance that we make no false discovery at all). # This threshold is much more conservative than the previous one. threshold = 1 title = ('Group left-right button press: \n' 'parametric test (FWER < 10%)')