Exemplo n.º 1
0
    def _scale_to_p(self, stat_values, scale_values):
        """Compute p- and z-values.

        Parameters
        ----------
        stat_values : (V) array
            ALE values.
        scale_values : (I x V) array
            Permutation ALE values.

        Returns
        -------
        p_values : (V) array
        z_values : (V) array

        Notes
        -----
        This method also uses the "histogram_bins" element in the null_distributions_ attribute.
        """
        n_voxels = stat_values.shape[0]

        # I know that joblib probably preserves order of outputs, but I'm paranoid, so we track
        # the iteration as well and sort the resulting p-value array based on that.
        with tqdm_joblib(tqdm(total=n_voxels)):
            p_values, voxel_idx = zip(*Parallel(n_jobs=self.n_cores)(
                delayed(self._scale_to_p_voxel)(i_voxel, stat_values[i_voxel],
                                                scale_values[:, i_voxel])
                for i_voxel in range(n_voxels)))
        # Convert to an array and sort the p-values array based on the voxel index.
        p_values = np.array(p_values)[np.array(voxel_idx)]

        z_values = p_to_z(p_values, tail="one")
        return p_values, z_values
Exemplo n.º 2
0
 def _generate_secondary_maps(self, result, corr_maps):
     # Generates corrected version of z and log-p maps if they exist
     p = corr_maps["p"]
     if "z" in result.maps:
         corr_maps["z"] = p_to_z(p) * np.sign(result.maps["z"])
     if "logp" in result.maps:
         corr_maps["logp"] = -np.log10(p)
     return corr_maps
Exemplo n.º 3
0
    def correct_fwe_montecarlo(self, result, n_iters=10000, n_cores=1):
        """Perform FWE correction using the max-value permutation method.

        .. versionchanged:: 0.0.8

            * [FIX] Remove single-dimensional entries of each array of returns (:obj:`dict`).

        .. versionadded:: 0.0.4

        Only call this method from within a Corrector.

        Parameters
        ----------
        result : :obj:`~nimare.results.MetaResult`
            Result object from an ALE meta-analysis.
        n_iters : :obj:`int`, optional
            The number of iterations to run in estimating the null distribution.
            Default is 10000.
        n_cores : :obj:`int`, optional
            Number of cores to use for parallelization.
            If <=0, defaults to using all available cores. Default is 1.

        Returns
        -------
        images : :obj:`dict`
            Dictionary of 1D arrays corresponding to masked images generated by
            the correction procedure. The following arrays are generated by
            this method: 'z_vthresh', 'p_level-voxel', 'z_level-voxel', and
            'logp_level-cluster'.

        See Also
        --------
        nimare.correct.FWECorrector : The Corrector from which to call this method.
        nilearn.mass_univariate.permuted_ols : The function used for this IBMA.

        Examples
        --------
        >>> meta = PermutedOLS()
        >>> result = meta.fit(dset)
        >>> corrector = FWECorrector(method='montecarlo',
                                     n_iters=5, n_cores=1)
        >>> cresult = corrector.transform(result)
        """
        n_cores = _check_ncores(n_cores)

        log_p_map, t_map, _ = permuted_ols(
            self.parameters_["tested_vars"],
            self.inputs_["z_maps"],
            confounding_vars=self.parameters_["confounding_vars"],
            model_intercept=False,  # modeled by tested_vars
            n_perm=n_iters,
            two_sided_test=self.two_sided,
            random_state=42,
            n_jobs=n_cores,
            verbose=0,
        )

        # Fill complete maps
        p_map = np.power(10.0, -log_p_map)

        # Convert p to z, preserving signs
        sign = np.sign(t_map)
        sign[sign == 0] = 1
        z_map = p_to_z(p_map, tail="two") * sign
        images = {
            "logp_level-voxel":
            _boolean_unmask(log_p_map.squeeze(),
                            self.inputs_["aggressive_mask"]),
            "z_level-voxel":
            _boolean_unmask(z_map.squeeze(), self.inputs_["aggressive_mask"]),
        }
        return images
Exemplo n.º 4
0
def neurosynth_decode(
    coordinates,
    annotations,
    ids,
    ids2=None,
    feature_group=None,
    features=None,
    frequency_threshold=0.001,
    prior=0.5,
    u=0.05,
    correction="fdr_bh",
):
    """Perform discrete functional decoding according to Neurosynth's meta-analytic method.

    This does not employ correlations between unthresholded maps, which are the
    method of choice for decoding within Neurosynth and Neurovault.
    Metadata (i.e., feature labels) for studies within the selected sample
    (`ids`) are compared to the unselected studies remaining in the database
    (`dataset`).

    Neurosynth was described in :footcite:t:`yarkoni2011large`.

    Parameters
    ----------
    coordinates : :class:`pandas.DataFrame`
        DataFrame containing coordinates. Must include a column named 'id' and
        must have a separate row for each reported peak coordinate for each
        study (i.e., there are multiple rows per ID).
        IDs from ``coordinates`` must match those from ``annotations``.
    annotations : :class:`pandas.DataFrame`
        DataFrame containing labels. Must include a column named 'id' and each
        row must correspond to a study. Other columns may correspond to
        individual labels.
        IDs from ``annotations`` must match those from ``coordinates``.
    ids : :obj:`list`
        Subset of studies in coordinates/annotations dataframes indicating
        target for decoding. Examples include studies reporting at least one
        peak in an ROI, or studies selected from a clustering analysis.
    ids2 : :obj:`list` or None, optional
        Second subset of studies, representing "unselected" studies. If None,
        then all studies in coordinates/annotations dataframes **not** in
        ``ids`` will be used.
    features : :obj:`list`, optional
        List of features in dataset annotations to use for decoding.
        Default is None, which uses all features available.
    frequency_threshold : :obj:`float`, optional
        Threshold to apply to dataset annotations. Values greater than or
        equal to the threshold as assigned as label+, while values below
        the threshold are considered label-. Default is 0.001.
    prior : :obj:`float`, optional
        Uniform prior probability of each label being active in a study in
        the absence of evidence (labels or selection) from the study.
        Default is 0.5 (50%).
    u : :obj:`float`, optional
        Alpha level for multiple comparisons correction. Default is 0.05.
    correction : {None, "bh", "by", "bonferroni"}, optional
        Multiple comparisons correction method to apply.
        Default is 'bh' (Benjamini-Hochberg FDR correction).

    Returns
    -------
    out_df : :class:`pandas.DataFrame`
        Table with each label and the following values associated with each
        label: 'pForward', 'zForward', 'probForward', 'pReverse', 'zReverse',
        and 'probReverse'.

    See Also
    --------
    :class:`~nimare.decode.discrete.NeurosynthDecoder`: The associated class for this method.
    :func:`~nimare.decode.continuous.CorrelationDecoder`: The correlation-based decoding
        method employed in Neurosynth and NeuroVault.

    References
    ----------
    .. footbibliography::
    """
    dataset_ids = sorted(list(set(coordinates["id"].values)))
    if ids2 is None:
        unselected = sorted(list(set(dataset_ids) - set(ids)))
    else:
        unselected = ids2[:]

    # Binarize with frequency threshold
    features_df = annotations.set_index("id", drop=True)
    features_df = features_df[features].ge(frequency_threshold)

    sel_array = features_df.loc[ids].values
    unsel_array = features_df.loc[unselected].values

    n_selected = len(ids)
    n_unselected = len(unselected)

    n_selected_term = np.sum(sel_array, axis=0)
    n_unselected_term = np.sum(unsel_array, axis=0)

    n_selected_noterm = n_selected - n_selected_term
    n_unselected_noterm = n_unselected - n_unselected_term

    n_term = n_selected_term + n_unselected_term
    n_noterm = n_selected_noterm + n_unselected_noterm

    p_term = n_term / (n_term + n_noterm)

    p_selected_g_term = n_selected_term / n_term
    p_selected_g_noterm = n_selected_noterm / n_noterm

    # Recompute conditions with empirically derived prior (or inputted one)
    if prior is None:
        # if this is used, p_term_g_selected_prior = p_selected (regardless of term)
        prior = p_term

    # Significance testing
    # One-way chi-square test for consistency of term frequency across terms
    chi2_fi = one_way(n_selected_term, n_term)
    p_fi = special.chdtrc(1, chi2_fi)
    sign_fi = np.sign(n_selected_term - np.mean(n_selected_term)).ravel()  # pylint: disable=no-member

    # Two-way chi-square test for specificity of activation
    cells = np.array([
        [n_selected_term, n_selected_noterm],  # pylint: disable=no-member
        [n_unselected_term, n_unselected_noterm],
    ]).T
    chi2_ri = two_way(cells)
    p_ri = special.chdtrc(1, chi2_ri)
    sign_ri = np.sign(p_selected_g_term - p_selected_g_noterm).ravel()  # pylint: disable=no-member

    # Multiple comparisons correction across terms. Separately done for FI and RI.
    if correction in ("bh", "by"):
        p_corr_fi = fdr(p_fi, alpha=u, method=correction)
        p_corr_ri = fdr(p_ri, alpha=u, method=correction)
    elif correction == "bonferroni":
        p_corr_fi = bonferroni(p_fi)
        p_corr_ri = bonferroni(p_ri)
    else:
        p_corr_fi = p_fi
        p_corr_ri = p_ri

    # Compute z-values
    z_corr_fi = p_to_z(p_corr_fi, "two") * sign_fi
    z_corr_ri = p_to_z(p_corr_ri, "two") * sign_ri

    # Effect size
    # est. prob. of brain state described by term finding activation in ROI
    p_selected_g_term_g_prior = prior * p_selected_g_term + (
        1 - prior) * p_selected_g_noterm

    # est. prob. of activation in ROI reflecting brain state described by term
    p_term_g_selected_g_prior = p_selected_g_term * prior / p_selected_g_term_g_prior

    arr = np.array([
        p_corr_fi,
        z_corr_fi,
        p_selected_g_term_g_prior,  # pylint: disable=no-member
        p_corr_ri,
        z_corr_ri,
        p_term_g_selected_g_prior,
    ]).T

    out_df = pd.DataFrame(
        data=arr,
        index=features,
        columns=[
            "pForward", "zForward", "probForward", "pReverse", "zReverse",
            "probReverse"
        ],
    )
    out_df.index.name = "Term"
    return out_df
Exemplo n.º 5
0
def brainmap_decode(
    coordinates,
    annotations,
    ids,
    ids2=None,
    features=None,
    frequency_threshold=0.001,
    u=0.05,
    correction="fdr_bh",
):
    """Perform image-to-text decoding for discrete inputs according to the BrainMap method.

    This method was described in :footcite:t:`amft2015definition`.

    Parameters
    ----------
    coordinates : :class:`pandas.DataFrame`
        DataFrame containing coordinates. Must include a column named 'id' and
        must have a separate row for each reported peak coordinate for each
        study (i.e., there are multiple rows per ID).
        IDs from ``coordinates`` must match those from ``annotations``.
    annotations : :class:`pandas.DataFrame`
        DataFrame containing labels. Must include a column named 'id' and each
        row must correspond to a study. Other columns may correspond to
        individual labels.
        IDs from ``annotations`` must match those from ``coordinates``.
    ids : :obj:`list`
        Subset of studies in coordinates/annotations dataframes indicating
        target for decoding. Examples include studies reporting at least one
        peak in an ROI, or studies selected from a clustering analysis.
    ids2 : :obj:`list` or None, optional
        Second subset of studies, representing "unselected" studies. If None,
        then all studies in coordinates/annotations dataframes **not** in
        ``ids`` will be used.
    features : :obj:`list`, optional
        List of features in dataset annotations to use for decoding.
        Default is None, which uses all features available.
    frequency_threshold : :obj:`float`, optional
        Threshold to apply to dataset annotations. Values greater than or
        equal to the threshold as assigned as label+, while values below
        the threshold are considered label-. Default is 0.001.
    u : :obj:`float`, optional
        Alpha level for multiple comparisons correction. Default is 0.05.
    correction : {None, "bh", "by", "bonferroni"}, optional
        Multiple comparisons correction method to apply.
        Default is 'bh' (Benjamini-Hochberg FDR correction).

    Returns
    -------
    out_df : :class:`pandas.DataFrame`
        Table with each label and the following values associated with each
        label: 'pForward', 'zForward', 'likelihoodForward', 'pReverse',
        'zReverse', and 'probReverse'.

    See Also
    --------
    :func:`~nimare.decode.discrete.BrainMapDecoder`: The associated class for this method.

    References
    ----------
    .. footbibliography::
    """
    dataset_ids = sorted(list(set(coordinates["id"].values)))
    if ids2 is None:
        unselected = sorted(list(set(dataset_ids) - set(ids)))
    else:
        unselected = ids2[:]

    # Binarize with frequency threshold
    features_df = annotations.set_index("id", drop=True)
    features_df = features_df[features].ge(frequency_threshold)

    sel_array = features_df.loc[ids].values
    unsel_array = features_df.loc[unselected].values

    n_selected = len(ids)
    n_unselected = len(unselected)

    # the number of times any term is used (e.g., if one experiment uses
    # two terms, that counts twice). Why though?
    n_exps_across_terms = np.sum(np.sum(features_df))

    n_selected_term = np.sum(sel_array, axis=0)
    n_unselected_term = np.sum(unsel_array, axis=0)

    n_selected_noterm = n_selected - n_selected_term
    n_unselected_noterm = n_unselected - n_unselected_term

    n_term = n_selected_term + n_unselected_term
    p_term = n_term / n_exps_across_terms

    n_foci_in_database = coordinates.shape[0]
    p_selected = n_selected / n_foci_in_database

    # I hope there's a way to do this without the for loop
    n_term_foci = np.zeros(len(features))
    n_noterm_foci = np.zeros(len(features))
    for i, term in enumerate(features):
        term_ids = features_df.loc[features_df[term] == 1].index.values
        noterm_ids = features_df.loc[features_df[term] == 0].index.values
        n_term_foci[i] = coordinates["id"].isin(term_ids).sum()
        n_noterm_foci[i] = coordinates["id"].isin(noterm_ids).sum()

    p_selected_g_term = n_selected_term / n_term_foci  # probForward
    l_selected_g_term = p_selected_g_term / p_selected  # likelihoodForward
    p_selected_g_noterm = n_selected_noterm / n_noterm_foci

    p_term_g_selected = p_selected_g_term * p_term / p_selected  # probReverse
    p_term_g_selected = p_term_g_selected / np.nansum(
        p_term_g_selected)  # Normalize

    # Significance testing
    # Forward inference significance is determined with a binomial distribution
    p_fi = 1 - binom.cdf(k=n_selected_term, n=n_term_foci, p=p_selected)
    sign_fi = np.sign(n_selected_term - np.mean(n_selected_term)).ravel()  # pylint: disable=no-member

    # Two-way chi-square test for specificity of activation
    cells = np.array([
        [n_selected_term, n_selected_noterm],  # pylint: disable=no-member
        [n_unselected_term, n_unselected_noterm],
    ]).T
    chi2_ri = two_way(cells)
    p_ri = special.chdtrc(1, chi2_ri)
    sign_ri = np.sign(p_selected_g_term - p_selected_g_noterm).ravel()  # pylint: disable=no-member

    # Ignore rare features
    p_fi[n_selected_term < 5] = 1.0
    p_ri[n_selected_term < 5] = 1.0

    # Multiple comparisons correction across features. Separately done for FI and RI.
    if correction in ("bh", "by"):
        p_corr_fi = fdr(p_fi, alpha=u, method=correction)
        p_corr_ri = fdr(p_ri, alpha=u, method=correction)
    elif correction == "bonferroni":
        p_corr_fi = bonferroni(p_fi)
        p_corr_ri = bonferroni(p_ri)
    else:
        p_corr_fi = p_fi
        p_corr_ri = p_ri

    # Compute z-values
    z_corr_fi = p_to_z(p_corr_fi, "two") * sign_fi
    z_corr_ri = p_to_z(p_corr_ri, "two") * sign_ri

    # Effect size
    arr = np.array([
        p_corr_fi,
        z_corr_fi,
        l_selected_g_term,  # pylint: disable=no-member
        p_corr_ri,
        z_corr_ri,
        p_term_g_selected,
    ]).T

    out_df = pd.DataFrame(
        data=arr,
        index=features,
        columns=[
            "pForward",
            "zForward",
            "likelihoodForward",
            "pReverse",
            "zReverse",
            "probReverse",
        ],
    )
    out_df.index.name = "Term"
    return out_df
Exemplo n.º 6
0
    def _fit(self, dataset1, dataset2):
        self.dataset1 = dataset1
        self.dataset2 = dataset2
        self.masker = self.masker or dataset1.masker

        ma_maps1 = self._collect_ma_maps(
            maps_key="ma_maps1",
            coords_key="coordinates1",
            fname_idx=0,
        )
        ma_maps2 = self._collect_ma_maps(
            maps_key="ma_maps2",
            coords_key="coordinates2",
            fname_idx=1,
        )

        n_grp1, n_voxels = ma_maps1.shape

        # Get ALE values for the two groups and difference scores
        grp1_ale_values = 1.0 - np.prod(1.0 - ma_maps1, axis=0)
        grp2_ale_values = 1.0 - np.prod(1.0 - ma_maps2, axis=0)
        diff_ale_values = grp1_ale_values - grp2_ale_values
        del grp1_ale_values, grp2_ale_values

        # Combine the MA maps into a single array to draw from for null distribution
        ma_arr = np.vstack((ma_maps1, ma_maps2))

        if isinstance(ma_maps1, np.memmap):
            LGR.debug(f"Closing memmap at {ma_maps1.filename}")
            ma_maps1._mmap.close()

        if isinstance(ma_maps2, np.memmap):
            LGR.debug(f"Closing memmap at {ma_maps2.filename}")
            ma_maps2._mmap.close()

        del ma_maps1, ma_maps2

        # Calculate null distribution for each voxel based on group-assignment randomization
        # Use a memmapped 2D array
        iter_diff_values = np.memmap(
            self.memmap_filenames[2],
            dtype=ma_arr.dtype,
            mode="w+",
            shape=(self.n_iters, n_voxels),
        )

        with tqdm_joblib(tqdm(total=self.n_iters)):
            Parallel(n_jobs=self.n_cores)(delayed(self._run_permutation)(
                i_iter, n_grp1, ma_arr, iter_diff_values)
                                          for i_iter in range(self.n_iters))

        # Determine p-values based on voxel-wise null distributions
        # I know that joblib probably preserves order of outputs, but I'm paranoid, so we track
        # the iteration as well and sort the resulting p-value array based on that.
        with tqdm_joblib(tqdm(total=n_voxels)):
            p_values, voxel_idx = zip(*Parallel(n_jobs=self.n_cores)(
                delayed(self._alediff_to_p_voxel)(
                    i_voxel,
                    diff_ale_values[i_voxel],
                    iter_diff_values[:, i_voxel],
                ) for i_voxel in range(n_voxels)))
        # Convert to an array and sort the p-values array based on the voxel index.
        p_values = np.array(p_values)[np.array(voxel_idx)]

        diff_signs = np.sign(diff_ale_values -
                             np.median(iter_diff_values, axis=0))

        if isinstance(iter_diff_values, np.memmap):
            LGR.debug(f"Closing memmap at {iter_diff_values.filename}")
            iter_diff_values._mmap.close()

        del iter_diff_values

        z_arr = p_to_z(p_values, tail="two") * diff_signs
        logp_arr = -np.log10(p_values)

        images = {
            "stat_desc-group1MinusGroup2": diff_ale_values,
            "p_desc-group1MinusGroup2": p_values,
            "z_desc-group1MinusGroup2": z_arr,
            "logp_desc-group1MinusGroup2": logp_arr,
        }
        return images
Exemplo n.º 7
0
    def correct_fwe_montecarlo(
        self,
        result,
        voxel_thresh=0.001,
        n_iters=10000,
        n_cores=1,
        vfwe_only=False,
    ):
        """Perform FWE correction using the max-value permutation method.

        Only call this method from within a Corrector.

        .. versionchanged:: 0.0.12

            * Fix the ``vfwe_only`` option.

        .. versionchanged:: 0.0.11

            * Rename ``*_level-cluster`` maps to ``*_desc-size_level-cluster``.
            * Add new ``*_desc-mass_level-cluster`` maps that use cluster mass-based inference.

        Parameters
        ----------
        result : :obj:`~nimare.results.MetaResult`
            Result object from a CBMA meta-analysis.
        voxel_thresh : :obj:`float`, optional
            Cluster-defining p-value threshold. Default is 0.001.
        n_iters : :obj:`int`, optional
            Number of iterations to build the voxel-level, cluster-size, and cluster-mass FWE
            null distributions. Default is 10000.
        n_cores : :obj:`int`, optional
            Number of cores to use for parallelization.
            If <=0, defaults to using all available cores. Default is 1.
        vfwe_only : :obj:`bool`, optional
            If True, only calculate the voxel-level FWE-corrected maps. Voxel-level correction
            can be performed very quickly if the Estimator's ``null_method`` was "montecarlo".
            Default is False.

        Returns
        -------
        images : :obj:`dict`
            Dictionary of 1D arrays corresponding to masked images generated by
            the correction procedure. The following arrays are generated by
            this method:

            -   ``logp_desc-size_level-cluster``: Cluster-level FWE-corrected ``-log10(p)`` map
                based on cluster size. This was previously simply called "logp_level-cluster".
                This array is **not** generated if ``vfwe_only`` is ``True``.
            -   ``logp_desc-mass_level-cluster``: Cluster-level FWE-corrected ``-log10(p)`` map
                based on cluster mass. According to :footcite:t:`bullmore1999global` and
                :footcite:t:`zhang2009cluster`, cluster mass-based inference is more powerful than
                cluster size.
                This array is **not** generated if ``vfwe_only`` is ``True``.
            -   ``logp_level-voxel``: Voxel-level FWE-corrected ``-log10(p)`` map.
                Voxel-level correction is generally more conservative than cluster-level
                correction, so it is only recommended for very large meta-analyses
                (i.e., hundreds of studies), per :footcite:t:`eickhoff2016behavior`.

        Notes
        -----
        If ``vfwe_only`` is ``False``, this method adds three new keys to the
        ``null_distributions_`` attribute:

            -   ``values_level-voxel_corr-fwe_method-montecarlo``: The maximum summary statistic
                value from each Monte Carlo iteration. An array of shape (n_iters,).
            -   ``values_desc-size_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster
                size from each Monte Carlo iteration. An array of shape (n_iters,).
            -   ``values_desc-mass_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster
                mass from each Monte Carlo iteration. An array of shape (n_iters,).

        See Also
        --------
        nimare.correct.FWECorrector : The Corrector from which to call this method.

        References
        ----------
        .. footbibliography::

        Examples
        --------
        >>> meta = MKDADensity()
        >>> result = meta.fit(dset)
        >>> corrector = FWECorrector(method='montecarlo', voxel_thresh=0.01,
                                     n_iters=5, n_cores=1)
        >>> cresult = corrector.transform(result)
        """
        stat_values = result.get_map("stat", return_type="array")

        if vfwe_only and (self.null_method == "montecarlo"):
            LGR.info(
                "Using precalculated histogram for voxel-level FWE correction."
            )

            # Determine p- and z-values from stat values and null distribution.
            p_vfwe_values = nullhist_to_p(
                stat_values,
                self.null_distributions_[
                    "histweights_level-voxel_corr-fwe_method-montecarlo"],
                self.null_distributions_["histogram_bins"],
            )

        else:
            if vfwe_only:
                LGR.warn(
                    "In order to run this method with the 'vfwe_only' option, "
                    "the Estimator must use the 'montecarlo' null_method. "
                    "Running permutations from scratch.")

            null_xyz = vox2mm(
                np.vstack(np.where(self.masker.mask_img.get_fdata())).T,
                self.masker.mask_img.affine,
            )

            n_cores = _check_ncores(n_cores)

            # Identify summary statistic corresponding to intensity threshold
            ss_thresh = self._p_to_summarystat(voxel_thresh)

            rand_idx = np.random.choice(
                null_xyz.shape[0],
                size=(self.inputs_["coordinates"].shape[0], n_iters),
            )
            rand_xyz = null_xyz[rand_idx, :]
            iter_xyzs = np.split(rand_xyz, rand_xyz.shape[1], axis=1)
            iter_df = self.inputs_["coordinates"].copy()

            # Define connectivity matrix for cluster labeling
            conn = ndimage.generate_binary_structure(3, 2)

            with tqdm_joblib(tqdm(total=n_iters)):
                perm_results = Parallel(n_jobs=n_cores)(
                    delayed(self._correct_fwe_montecarlo_permutation)(
                        iter_xyzs[i_iter],
                        iter_df=iter_df,
                        conn=conn,
                        voxel_thresh=ss_thresh,
                        vfwe_only=vfwe_only,
                    ) for i_iter in range(n_iters))

            fwe_voxel_max, fwe_cluster_size_max, fwe_cluster_mass_max = zip(
                *perm_results)

            if not vfwe_only:
                # Cluster-level FWE
                # Extract the summary statistics in voxel-wise (3D) form, threshold, and
                # cluster-label
                thresh_stat_values = self.masker.inverse_transform(
                    stat_values).get_fdata()
                thresh_stat_values[thresh_stat_values <= ss_thresh] = 0
                labeled_matrix, _ = ndimage.measurements.label(
                    thresh_stat_values, conn)

                cluster_labels, idx, cluster_sizes = np.unique(
                    labeled_matrix,
                    return_inverse=True,
                    return_counts=True,
                )
                assert cluster_labels[0] == 0

                # Cluster mass-based inference
                cluster_masses = np.zeros(cluster_labels.shape)
                for i_val in cluster_labels:
                    if i_val == 0:
                        cluster_masses[i_val] = 0

                    cluster_mass = np.sum(
                        thresh_stat_values[labeled_matrix == i_val] -
                        ss_thresh)
                    cluster_masses[i_val] = cluster_mass

                p_cmfwe_vals = null_to_p(cluster_masses, fwe_cluster_mass_max,
                                         "upper")
                p_cmfwe_map = p_cmfwe_vals[np.reshape(idx,
                                                      labeled_matrix.shape)]

                p_cmfwe_values = np.squeeze(
                    self.masker.transform(
                        nib.Nifti1Image(p_cmfwe_map,
                                        self.masker.mask_img.affine)))
                logp_cmfwe_values = -np.log10(p_cmfwe_values)
                logp_cmfwe_values[np.isinf(logp_cmfwe_values)] = -np.log10(
                    np.finfo(float).eps)
                z_cmfwe_values = p_to_z(p_cmfwe_values, tail="one")

                # Cluster size-based inference
                cluster_sizes[
                    0] = 0  # replace background's "cluster size" with zeros
                p_csfwe_vals = null_to_p(cluster_sizes, fwe_cluster_size_max,
                                         "upper")
                p_csfwe_map = p_csfwe_vals[np.reshape(idx,
                                                      labeled_matrix.shape)]

                p_csfwe_values = np.squeeze(
                    self.masker.transform(
                        nib.Nifti1Image(p_csfwe_map,
                                        self.masker.mask_img.affine)))
                logp_csfwe_values = -np.log10(p_csfwe_values)
                logp_csfwe_values[np.isinf(logp_csfwe_values)] = -np.log10(
                    np.finfo(float).eps)
                z_csfwe_values = p_to_z(p_csfwe_values, tail="one")

                self.null_distributions_[
                    "values_desc-size_level-cluster_corr-fwe_method-montecarlo"] = fwe_cluster_size_max
                self.null_distributions_[
                    "values_desc-mass_level-cluster_corr-fwe_method-montecarlo"] = fwe_cluster_mass_max

            # Voxel-level FWE
            LGR.info("Using null distribution for voxel-level FWE correction.")
            p_vfwe_values = null_to_p(stat_values, fwe_voxel_max, tail="upper")
            self.null_distributions_[
                "values_level-voxel_corr-fwe_method-montecarlo"] = fwe_voxel_max

        z_vfwe_values = p_to_z(p_vfwe_values, tail="one")
        logp_vfwe_values = -np.log10(p_vfwe_values)
        logp_vfwe_values[np.isinf(logp_vfwe_values)] = -np.log10(
            np.finfo(float).eps)

        if vfwe_only:
            # Return unthresholded value images
            images = {
                "logp_level-voxel": logp_vfwe_values,
                "z_level-voxel": z_vfwe_values,
            }

        else:
            # Return unthresholded value images
            images = {
                "logp_level-voxel": logp_vfwe_values,
                "z_level-voxel": z_vfwe_values,
                "logp_desc-size_level-cluster": logp_csfwe_values,
                "z_desc-size_level-cluster": z_csfwe_values,
                "logp_desc-mass_level-cluster": logp_cmfwe_values,
                "z_desc-mass_level-cluster": z_cmfwe_values,
            }

        return images
Exemplo n.º 8
0
    def _summarystat_to_p(self, stat_values, null_method="approximate"):
        """Compute p- and z-values from summary statistics (e.g., ALE scores).

        Uses either histograms from "approximate" null or null distribution from "montecarlo" null.

        Parameters
        ----------
        stat_values : 1D array_like
            Array of summary statistic values from estimator.
        null_method : {"approximate", "montecarlo"}, optional
            Whether to use approximate null or montecarlo null.
            Default is "approximate".

        Returns
        -------
        p_values, z_values : 1D array
            P- and Z-values for statistic values.
            Same shape as stat_values.
        """
        if null_method.startswith("approximate"):
            assert "histogram_bins" in self.null_distributions_.keys()
            assert "histweights_corr-none_method-approximate" in self.null_distributions_.keys(
            )

            p_values = nullhist_to_p(
                stat_values,
                self.null_distributions_[
                    "histweights_corr-none_method-approximate"],
                self.null_distributions_["histogram_bins"],
            )

        elif null_method == "montecarlo":
            assert "histogram_bins" in self.null_distributions_.keys()
            assert "histweights_corr-none_method-montecarlo" in self.null_distributions_.keys(
            )

            p_values = nullhist_to_p(
                stat_values,
                self.
                null_distributions_["histweights_corr-none_method-montecarlo"],
                self.null_distributions_["histogram_bins"],
            )

        elif null_method == "reduced_montecarlo":
            assert "values_corr-none_method-reducedMontecarlo" in self.null_distributions_.keys(
            )

            p_values = null_to_p(
                stat_values,
                self.null_distributions_[
                    "values_corr-none_method-reducedMontecarlo"],
                tail="upper",
            )

        else:
            raise ValueError(
                "Argument 'null_method' must be one of: 'approximate', 'montecarlo'."
            )

        z_values = p_to_z(p_values, tail="one")
        return p_values, z_values