def __init__( self, xyz, n_iters=10000, n_cores=1, kernel_transformer=ALEKernel, **kwargs, ): if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel): LGR.warning( f"The KernelTransformer being used ({kernel_transformer}) is not optimized " f"for the {type(self).__name__} algorithm. " "Expect suboptimal performance and beware bugs." ) # Add kernel transformer attribute and process keyword arguments super().__init__(kernel_transformer=kernel_transformer, **kwargs) if not isinstance(xyz, np.ndarray): raise TypeError(f"Parameter 'xyz' must be a numpy.ndarray, not a {type(xyz)}") elif xyz.ndim != 2: raise ValueError(f"Parameter 'xyz' must be a 2D array, but has {xyz.ndim} dimensions") elif xyz.shape[1] != 3: raise ValueError(f"Parameter 'xyz' must have 3 columns, but has shape {xyz.shape}") self.xyz = xyz self.n_iters = n_iters self.n_cores = _check_ncores(n_cores) # memory_limit needs to exist to trigger use_memmap decorator, but it will also be used if # a Dataset with pre-generated MA maps is provided. self.memory_limit = "100mb"
def __init__( self, target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo", voxel_thresh=None, n_cores=1, ): self.target_image = target_image self.voxel_thresh = voxel_thresh self.n_cores = _check_ncores(n_cores)
def _compute_null_montecarlo(self, n_iters, n_cores): """Compute uncorrected null distribution using Monte Carlo method. Parameters ---------- n_iters : int Number of permutations. n_cores : int Number of cores to use. Notes ----- This method adds two entries to the null_distributions_ dict attribute: "histweights_corr-none_method-montecarlo" and "histweights_level-voxel_corr-fwe_method-montecarlo". """ null_ijk = np.vstack(np.where(self.masker.mask_img.get_fdata())).T n_cores = _check_ncores(n_cores) rand_idx = np.random.choice( null_ijk.shape[0], size=(self.inputs_["coordinates"].shape[0], n_iters), ) rand_ijk = null_ijk[rand_idx, :] rand_xyz = vox2mm(rand_ijk, self.masker.mask_img.affine) iter_xyzs = np.split(rand_xyz, rand_xyz.shape[1], axis=1) iter_df = self.inputs_["coordinates"].copy() with tqdm_joblib(tqdm(total=n_iters)): perm_histograms = Parallel(n_jobs=n_cores)( delayed(self._compute_null_montecarlo_permutation)( iter_xyzs[i_iter], iter_df=iter_df) for i_iter in range(n_iters)) perm_histograms = np.vstack(perm_histograms) self.null_distributions_[ "histweights_corr-none_method-montecarlo"] = np.sum( perm_histograms, axis=0) fwe_voxel_max = np.apply_along_axis(_get_last_bin, 1, perm_histograms) histweights = np.zeros(perm_histograms.shape[1], dtype=perm_histograms.dtype) for perm in fwe_voxel_max: histweights[perm] += 1 self.null_distributions_[ "histweights_level-voxel_corr-fwe_method-montecarlo"] = histweights
def __init__(self, kernel_transformer=ALEKernel, n_iters=10000, n_cores=1, **kwargs): if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel): LGR.warning( f"The KernelTransformer being used ({kernel_transformer}) is not optimized " f"for the {type(self).__name__} algorithm. " "Expect suboptimal performance and beware bugs." ) # Add kernel transformer attribute and process keyword arguments super().__init__(kernel_transformer=kernel_transformer, **kwargs) self.dataset1 = None self.dataset2 = None self.n_iters = n_iters self.n_cores = _check_ncores(n_cores) # memory_limit needs to exist to trigger use_memmap decorator, but it will also be used if # a Dataset with pre-generated MA maps is provided. self.memory_limit = "100mb"
def __init__( self, kernel_transformer=ALEKernel, null_method="approximate", n_iters=10000, n_cores=1, **kwargs, ): if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel): LGR.warning( f"The KernelTransformer being used ({kernel_transformer}) is not optimized " f"for the {type(self).__name__} algorithm. " "Expect suboptimal performance and beware bugs." ) # Add kernel transformer attribute and process keyword arguments super().__init__(kernel_transformer=kernel_transformer, **kwargs) self.null_method = null_method self.n_iters = n_iters self.n_cores = _check_ncores(n_cores) self.dataset = None
def correct_fwe_montecarlo(self, result, n_iters=10000, n_cores=1): """Perform FWE correction using the max-value permutation method. .. versionchanged:: 0.0.8 * [FIX] Remove single-dimensional entries of each array of returns (:obj:`dict`). .. versionadded:: 0.0.4 Only call this method from within a Corrector. Parameters ---------- result : :obj:`~nimare.results.MetaResult` Result object from an ALE meta-analysis. n_iters : :obj:`int`, optional The number of iterations to run in estimating the null distribution. Default is 10000. n_cores : :obj:`int`, optional Number of cores to use for parallelization. If <=0, defaults to using all available cores. Default is 1. Returns ------- images : :obj:`dict` Dictionary of 1D arrays corresponding to masked images generated by the correction procedure. The following arrays are generated by this method: 'z_vthresh', 'p_level-voxel', 'z_level-voxel', and 'logp_level-cluster'. See Also -------- nimare.correct.FWECorrector : The Corrector from which to call this method. nilearn.mass_univariate.permuted_ols : The function used for this IBMA. Examples -------- >>> meta = PermutedOLS() >>> result = meta.fit(dset) >>> corrector = FWECorrector(method='montecarlo', n_iters=5, n_cores=1) >>> cresult = corrector.transform(result) """ n_cores = _check_ncores(n_cores) log_p_map, t_map, _ = permuted_ols( self.parameters_["tested_vars"], self.inputs_["z_maps"], confounding_vars=self.parameters_["confounding_vars"], model_intercept=False, # modeled by tested_vars n_perm=n_iters, two_sided_test=self.two_sided, random_state=42, n_jobs=n_cores, verbose=0, ) # Fill complete maps p_map = np.power(10.0, -log_p_map) # Convert p to z, preserving signs sign = np.sign(t_map) sign[sign == 0] = 1 z_map = p_to_z(p_map, tail="two") * sign images = { "logp_level-voxel": _boolean_unmask(log_p_map.squeeze(), self.inputs_["aggressive_mask"]), "z_level-voxel": _boolean_unmask(z_map.squeeze(), self.inputs_["aggressive_mask"]), } return images
def correct_fwe_montecarlo( self, result, voxel_thresh=0.001, n_iters=10000, n_cores=1, vfwe_only=False, ): """Perform FWE correction using the max-value permutation method. Only call this method from within a Corrector. .. versionchanged:: 0.0.12 * Fix the ``vfwe_only`` option. .. versionchanged:: 0.0.11 * Rename ``*_level-cluster`` maps to ``*_desc-size_level-cluster``. * Add new ``*_desc-mass_level-cluster`` maps that use cluster mass-based inference. Parameters ---------- result : :obj:`~nimare.results.MetaResult` Result object from a CBMA meta-analysis. voxel_thresh : :obj:`float`, optional Cluster-defining p-value threshold. Default is 0.001. n_iters : :obj:`int`, optional Number of iterations to build the voxel-level, cluster-size, and cluster-mass FWE null distributions. Default is 10000. n_cores : :obj:`int`, optional Number of cores to use for parallelization. If <=0, defaults to using all available cores. Default is 1. vfwe_only : :obj:`bool`, optional If True, only calculate the voxel-level FWE-corrected maps. Voxel-level correction can be performed very quickly if the Estimator's ``null_method`` was "montecarlo". Default is False. Returns ------- images : :obj:`dict` Dictionary of 1D arrays corresponding to masked images generated by the correction procedure. The following arrays are generated by this method: - ``logp_desc-size_level-cluster``: Cluster-level FWE-corrected ``-log10(p)`` map based on cluster size. This was previously simply called "logp_level-cluster". This array is **not** generated if ``vfwe_only`` is ``True``. - ``logp_desc-mass_level-cluster``: Cluster-level FWE-corrected ``-log10(p)`` map based on cluster mass. According to :footcite:t:`bullmore1999global` and :footcite:t:`zhang2009cluster`, cluster mass-based inference is more powerful than cluster size. This array is **not** generated if ``vfwe_only`` is ``True``. - ``logp_level-voxel``: Voxel-level FWE-corrected ``-log10(p)`` map. Voxel-level correction is generally more conservative than cluster-level correction, so it is only recommended for very large meta-analyses (i.e., hundreds of studies), per :footcite:t:`eickhoff2016behavior`. Notes ----- If ``vfwe_only`` is ``False``, this method adds three new keys to the ``null_distributions_`` attribute: - ``values_level-voxel_corr-fwe_method-montecarlo``: The maximum summary statistic value from each Monte Carlo iteration. An array of shape (n_iters,). - ``values_desc-size_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster size from each Monte Carlo iteration. An array of shape (n_iters,). - ``values_desc-mass_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster mass from each Monte Carlo iteration. An array of shape (n_iters,). See Also -------- nimare.correct.FWECorrector : The Corrector from which to call this method. References ---------- .. footbibliography:: Examples -------- >>> meta = MKDADensity() >>> result = meta.fit(dset) >>> corrector = FWECorrector(method='montecarlo', voxel_thresh=0.01, n_iters=5, n_cores=1) >>> cresult = corrector.transform(result) """ stat_values = result.get_map("stat", return_type="array") if vfwe_only and (self.null_method == "montecarlo"): LGR.info( "Using precalculated histogram for voxel-level FWE correction." ) # Determine p- and z-values from stat values and null distribution. p_vfwe_values = nullhist_to_p( stat_values, self.null_distributions_[ "histweights_level-voxel_corr-fwe_method-montecarlo"], self.null_distributions_["histogram_bins"], ) else: if vfwe_only: LGR.warn( "In order to run this method with the 'vfwe_only' option, " "the Estimator must use the 'montecarlo' null_method. " "Running permutations from scratch.") null_xyz = vox2mm( np.vstack(np.where(self.masker.mask_img.get_fdata())).T, self.masker.mask_img.affine, ) n_cores = _check_ncores(n_cores) # Identify summary statistic corresponding to intensity threshold ss_thresh = self._p_to_summarystat(voxel_thresh) rand_idx = np.random.choice( null_xyz.shape[0], size=(self.inputs_["coordinates"].shape[0], n_iters), ) rand_xyz = null_xyz[rand_idx, :] iter_xyzs = np.split(rand_xyz, rand_xyz.shape[1], axis=1) iter_df = self.inputs_["coordinates"].copy() # Define connectivity matrix for cluster labeling conn = ndimage.generate_binary_structure(3, 2) with tqdm_joblib(tqdm(total=n_iters)): perm_results = Parallel(n_jobs=n_cores)( delayed(self._correct_fwe_montecarlo_permutation)( iter_xyzs[i_iter], iter_df=iter_df, conn=conn, voxel_thresh=ss_thresh, vfwe_only=vfwe_only, ) for i_iter in range(n_iters)) fwe_voxel_max, fwe_cluster_size_max, fwe_cluster_mass_max = zip( *perm_results) if not vfwe_only: # Cluster-level FWE # Extract the summary statistics in voxel-wise (3D) form, threshold, and # cluster-label thresh_stat_values = self.masker.inverse_transform( stat_values).get_fdata() thresh_stat_values[thresh_stat_values <= ss_thresh] = 0 labeled_matrix, _ = ndimage.measurements.label( thresh_stat_values, conn) cluster_labels, idx, cluster_sizes = np.unique( labeled_matrix, return_inverse=True, return_counts=True, ) assert cluster_labels[0] == 0 # Cluster mass-based inference cluster_masses = np.zeros(cluster_labels.shape) for i_val in cluster_labels: if i_val == 0: cluster_masses[i_val] = 0 cluster_mass = np.sum( thresh_stat_values[labeled_matrix == i_val] - ss_thresh) cluster_masses[i_val] = cluster_mass p_cmfwe_vals = null_to_p(cluster_masses, fwe_cluster_mass_max, "upper") p_cmfwe_map = p_cmfwe_vals[np.reshape(idx, labeled_matrix.shape)] p_cmfwe_values = np.squeeze( self.masker.transform( nib.Nifti1Image(p_cmfwe_map, self.masker.mask_img.affine))) logp_cmfwe_values = -np.log10(p_cmfwe_values) logp_cmfwe_values[np.isinf(logp_cmfwe_values)] = -np.log10( np.finfo(float).eps) z_cmfwe_values = p_to_z(p_cmfwe_values, tail="one") # Cluster size-based inference cluster_sizes[ 0] = 0 # replace background's "cluster size" with zeros p_csfwe_vals = null_to_p(cluster_sizes, fwe_cluster_size_max, "upper") p_csfwe_map = p_csfwe_vals[np.reshape(idx, labeled_matrix.shape)] p_csfwe_values = np.squeeze( self.masker.transform( nib.Nifti1Image(p_csfwe_map, self.masker.mask_img.affine))) logp_csfwe_values = -np.log10(p_csfwe_values) logp_csfwe_values[np.isinf(logp_csfwe_values)] = -np.log10( np.finfo(float).eps) z_csfwe_values = p_to_z(p_csfwe_values, tail="one") self.null_distributions_[ "values_desc-size_level-cluster_corr-fwe_method-montecarlo"] = fwe_cluster_size_max self.null_distributions_[ "values_desc-mass_level-cluster_corr-fwe_method-montecarlo"] = fwe_cluster_mass_max # Voxel-level FWE LGR.info("Using null distribution for voxel-level FWE correction.") p_vfwe_values = null_to_p(stat_values, fwe_voxel_max, tail="upper") self.null_distributions_[ "values_level-voxel_corr-fwe_method-montecarlo"] = fwe_voxel_max z_vfwe_values = p_to_z(p_vfwe_values, tail="one") logp_vfwe_values = -np.log10(p_vfwe_values) logp_vfwe_values[np.isinf(logp_vfwe_values)] = -np.log10( np.finfo(float).eps) if vfwe_only: # Return unthresholded value images images = { "logp_level-voxel": logp_vfwe_values, "z_level-voxel": z_vfwe_values, } else: # Return unthresholded value images images = { "logp_level-voxel": logp_vfwe_values, "z_level-voxel": z_vfwe_values, "logp_desc-size_level-cluster": logp_csfwe_values, "z_desc-size_level-cluster": z_csfwe_values, "logp_desc-mass_level-cluster": logp_cmfwe_values, "z_desc-mass_level-cluster": z_cmfwe_values, } return images