コード例 #1
0
ファイル: macm.py プロジェクト: NBCLab/cALE
def macm_workflow(ns_data_dir, output_dir, prefix, mask_fn):

    # download neurosynth dataset if necessary
    dataset_file = op.join(ns_data_dir, 'neurosynth_dataset.pkl.gz')

    if not op.isfile(dataset_file):
        if not op.isdir(ns_data_dir):
            os.mkdir(ns_data_dir)
        download(ns_data_dir, unpack=True)
        ###############################################################################
        # Convert Neurosynth database to NiMARE dataset file
        # --------------------------------------------------
        dset = convert_neurosynth_to_dataset(
            op.join(ns_data_dir, 'database.txt'),
            op.join(ns_data_dir, 'features.txt'))
        dset.save(dataset_file)

    dset = Dataset.load(dataset_file)
    mask_ids = dset.get_studies_by_mask(mask_fn)
    maskdset = dset.slice(mask_ids)
    nonmask_ids = sorted(list(set(dset.ids) - set(mask_ids)))
    nonmaskdset = dset.slice(nonmask_ids)

    ale = ALE(kernel__fwhm=15)
    ale.fit(maskdset)

    corr = FWECorrector(method='permutation',
                        n_iters=10,
                        n_cores=-1,
                        voxel_thresh=0.001)
    cres = corr.transform(ale.results)
    cres.save_maps(output_dir=output_dir, prefix=prefix)
コード例 #2
0
def test_KDA_fwe_1core(testdata_cbma):
    """Smoke test for KDA with montecarlo null and FWE correction."""
    meta = KDA(null_method="montecarlo", n_iters=10)
    res = meta.fit(testdata_cbma)
    corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert res.get_map("p", return_type="array").dtype == np.float64
    assert isinstance(cres, nimare.results.MetaResult)
    assert (
        cres.get_map("logp_level-voxel_corr-FWE_method-montecarlo", return_type="array").dtype
        == np.float64
    )
    assert (
        cres.get_map(
            "logp_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="array"
        ).dtype
        == np.float64
    )
    assert (
        cres.get_map(
            "logp_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="array"
        ).dtype
        == np.float64
    )
コード例 #3
0
ファイル: test_cbma_ale.py プロジェクト: julio-a-yanes/NiMARE
def test_ale_subtraction():
    """
    Smoke test for ALE
    """
    meta1 = ale.ALE()
    res1 = meta1.fit(pytest.cbma_testdata1)

    meta2 = ale.ALE()
    res2 = meta2.fit(pytest.cbma_testdata1)

    corr = FWECorrector(method='permutation',
                        voxel_thresh=0.001,
                        n_iters=5,
                        n_cores=1)
    cres1 = corr.transform(res1)
    cres2 = corr.transform(res2)

    sub_meta = ale.ALESubtraction()
    sub_meta.fit(
        meta1,
        meta2,
        image1=cres1.get_map('logp_level-cluster_corr-FWE_method-permutation',
                             return_type='image'),
        image2=cres2.get_map('logp_level-cluster_corr-FWE_method-permutation',
                             return_type='image'))
    assert isinstance(sub_meta.results, nimare.base.MetaResult)
コード例 #4
0
def test_MKDAChi2_fwe_2core(testdata_cbma):
    """Smoke test for MKDAChi2."""
    meta = MKDAChi2()
    res = meta.fit(testdata_cbma, testdata_cbma)
    assert isinstance(res, nimare.results.MetaResult)
    corr_2core = FWECorrector(method="montecarlo", n_iters=5, n_cores=2)
    cres_2core = corr_2core.transform(res)
    assert isinstance(cres_2core, nimare.results.MetaResult)
コード例 #5
0
def test_mkda_chi2_fwe_1core(testdata_cbma):
    """
    Smoke test for MKDAChi2
    """
    meta = MKDAChi2()
    res = meta.fit(testdata_cbma, testdata_cbma)
    corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)
コード例 #6
0
def test_kda_density():
    """
    Smoke test for KDA
    """
    meta = mkda.KDA()
    res = meta.fit(pytest.cbma_testdata1)
    corr = FWECorrector(method='permutation', n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.base.MetaResult)
    assert isinstance(cres, nimare.base.MetaResult)
コード例 #7
0
def test_mkda_chi2_fwe():
    """
    Smoke test for MKDAChi2
    """
    meta = mkda.MKDAChi2()
    res = meta.fit(pytest.cbma_testdata1, pytest.cbma_testdata2)
    corr = FWECorrector(method='permutation', n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.base.MetaResult)
    assert isinstance(cres, nimare.base.MetaResult)
コード例 #8
0
ファイル: test_meta_ibma.py プロジェクト: nicholst/NiMARE
def test_PermutedOLS(testdata_ibma):
    """
    Smoke test for PermutedOLS with FWE correction.
    """
    meta = ibma.PermutedOLS(two_sided=True)
    meta.fit(testdata_ibma)
    assert isinstance(meta.results, nimare.base.MetaResult)
    corr = FWECorrector(method="montecarlo", n_iters=100, n_cores=1)
    cres = corr.transform(meta.results)
    assert isinstance(cres, nimare.base.MetaResult)
コード例 #9
0
ファイル: test_meta_mkda.py プロジェクト: koudyk/NiMARE
def test_kda_density_fwe_1core(testdata_cbma):
    """
    Smoke test for KDA
    """
    meta = mkda.KDA()
    res = meta.fit(testdata_cbma)
    corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)
コード例 #10
0
def test_MKDADensity(testdata_cbma):
    """Smoke test for MKDADensity."""
    meta = MKDADensity(null_method="montecarlo", n_iters=10)
    res = meta.fit(testdata_cbma)
    corr = FWECorrector(method="montecarlo",
                        voxel_thresh=0.001,
                        n_iters=5,
                        n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)
コード例 #11
0
def test_mkda_density_analytic_null(testdata_cbma_full):
    """
    Smoke test for MKDADensity
    """
    meta = MKDADensity(null="analytic")
    res = meta.fit(testdata_cbma_full)
    corr = FWECorrector(method="montecarlo",
                        voxel_thresh=0.001,
                        n_iters=1,
                        n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)
コード例 #12
0
def test_kda_density_analytic_null(testdata_cbma):
    """
    Smoke test for KDA with analytical null and FWE correction.
    """
    meta = KDA(null_method="analytic")
    res = meta.fit(testdata_cbma)
    corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert res.get_map("p", return_type="array").dtype == np.float64
    assert isinstance(cres, nimare.results.MetaResult)
    assert (cres.get_map("logp_level-voxel_corr-FWE_method-montecarlo",
                         return_type="array").dtype == np.float64)
コード例 #13
0
def test_MKDAChi2_fwe_1core(testdata_cbma):
    """Smoke test for MKDAChi2."""
    meta = MKDAChi2()
    res = meta.fit(testdata_cbma, testdata_cbma)
    valid_methods = FWECorrector.inspect(res)
    assert "montecarlo" in valid_methods

    corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)
    assert ("values_desc-pFgA_level-voxel_corr-fwe_method-montecarlo"
            in cres.estimator.null_distributions_.keys())
    assert ("values_desc-pAgF_level-voxel_corr-fwe_method-montecarlo"
            in cres.estimator.null_distributions_.keys())
コード例 #14
0
def test_MKDADensity_approximate_null(testdata_cbma_full, caplog):
    """Smoke test for MKDADensity with the "approximate" null_method."""
    meta = MKDADensity(null="approximate")
    res = meta.fit(testdata_cbma_full)
    corr = FWECorrector(method="montecarlo",
                        voxel_thresh=0.001,
                        n_iters=5,
                        n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)

    # Check that the vfwe_only option does not work
    corr2 = FWECorrector(
        method="montecarlo",
        voxel_thresh=0.001,
        n_iters=5,
        n_cores=1,
        vfwe_only=True,
    )
    with caplog.at_level(logging.WARNING):
        cres2 = corr2.transform(res)

    assert "Running permutations from scratch." in caplog.text

    assert isinstance(cres2, nimare.results.MetaResult)
    assert "logp_level-voxel_corr-FWE_method-montecarlo" in cres2.maps
    assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" not in cres2.maps
コード例 #15
0
ファイル: macm.py プロジェクト: NBCLab/niconn
def macm(prefix=None, mask=None, output_dir=None, ns_data_dir=None):

    if mask is None or not op.isfile(mask):
        raise Exception('A valid mask is required for input!')

    if ns_data_dir is None:
        raise Exception(
            'A valid directory is required for downloading Neurosynth data!')

    if prefix is None:
        prefix = op.basename(mask).strip('.nii.gz')

    if output_dir is None:
        output_dir = op.dirname(op.abspath(mask))

    dataset_file = op.join(ns_data_dir, 'neurosynth_dataset.pkl.gz')

    # download neurosynth dataset if necessary
    if not op.isfile(dataset_file):
        from datasets.neurosynth import neurosynth_download
        neurosynth_download(ns_data_dir)

    dset = Dataset.load(dataset_file)
    mask_ids = dset.get_studies_by_mask(mask)
    maskdset = dset.slice(mask_ids)
    nonmask_ids = sorted(list(set(dset.ids) - set(mask_ids)))
    nonmaskdset = dset.slice(nonmask_ids)

    ale = ALE(kernel__fwhm=15)
    ale.fit(maskdset)

    corr = FWECorrector(method='montecarlo',
                        n_iters=5000,
                        n_cores=-1,
                        voxel_thresh=0.001)
    cres = corr.transform(ale.results)
    cres.save_maps(output_dir=output_dir, prefix=prefix)
コード例 #16
0
def test_MKDADensity_approximate_null(testdata_cbma_full):
    """Smoke test for MKDADensity with the "approximate" null_method."""
    meta = MKDADensity(null="approximate")
    res = meta.fit(testdata_cbma_full)
    corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)

    # Check that the vfwe_only option does not work
    corr2 = FWECorrector(
        method="montecarlo",
        voxel_thresh=0.001,
        n_iters=5,
        n_cores=1,
        vfwe_only=True,
    )
    with pytest.raises(ValueError):
        corr2.transform(res)
コード例 #17
0
ファイル: test_cbma_ale.py プロジェクト: julio-a-yanes/NiMARE
def test_ale():
    """
    Smoke test for ALE
    """
    meta = ale.ALE()
    res = meta.fit(pytest.cbma_testdata1)
    assert isinstance(res, nimare.base.MetaResult)
    corr = FWECorrector(method='bonferroni')
    cres = corr.transform(res)
    assert isinstance(cres, nimare.base.MetaResult)
    corr = FWECorrector(method='permutation',
                        voxel_thresh=0.001,
                        n_iters=5,
                        n_cores=1)
    cres = corr.transform(meta.results)
    assert isinstance(cres, nimare.base.MetaResult)
コード例 #18
0
def test_MKDADensity_montecarlo_null(testdata_cbma):
    """Smoke test for MKDADensity with the "montecarlo" null_method."""
    meta = MKDADensity(null_method="montecarlo", n_iters=10)
    res = meta.fit(testdata_cbma)
    corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=1)
    cres = corr.transform(res)
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(cres, nimare.results.MetaResult)

    # Check that the vfwe_only option works
    corr2 = FWECorrector(
        method="montecarlo",
        voxel_thresh=0.001,
        n_iters=5,
        n_cores=1,
        vfwe_only=True,
    )
    cres2 = corr2.transform(res)
    assert isinstance(cres2, nimare.results.MetaResult)
    assert "logp_level-voxel_corr-FWE_method-montecarlo" in cres2.maps
    assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" not in cres2.maps
コード例 #19
0
def test_ALE_montecarlo_null_unit(testdata_cbma, tmp_path_factory):
    """Unit test for ALE with an montecarlo null_method.

    This test is run with low-memory kernel transformation as well.
    """
    tmpdir = tmp_path_factory.mktemp("test_ALE_montecarlo_null_unit")
    out_file = os.path.join(tmpdir, "file.pkl.gz")

    meta = ale.ALE(null_method="montecarlo",
                   n_iters=10,
                   kernel__memory_limit="1gb")
    res = meta.fit(testdata_cbma)
    assert "stat" in res.maps.keys()
    assert "p" in res.maps.keys()
    assert "z" in res.maps.keys()
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(res.get_map("z", return_type="image"), nib.Nifti1Image)
    assert isinstance(res.get_map("z", return_type="array"), np.ndarray)
    res2 = res.copy()
    assert res2 != res
    assert isinstance(res, nimare.results.MetaResult)

    # Test saving/loading
    meta.save(out_file, compress=True)
    assert os.path.isfile(out_file)
    meta2 = ale.ALE.load(out_file, compressed=True)
    assert isinstance(meta2, ale.ALE)
    with pytest.raises(pickle.UnpicklingError):
        ale.ALE.load(out_file, compressed=False)

    meta.save(out_file, compress=False)
    assert os.path.isfile(out_file)
    meta2 = ale.ALE.load(out_file, compressed=False)
    assert isinstance(meta2, ale.ALE)
    with pytest.raises(OSError):
        ale.ALE.load(out_file, compressed=True)

    # Test MCC methods
    # Monte Carlo FWE
    corr = FWECorrector(method="montecarlo",
                        voxel_thresh=0.001,
                        n_iters=5,
                        n_cores=-1)
    cres = corr.transform(res)
    assert isinstance(cres, nimare.results.MetaResult)
    assert "z_desc-size_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys(
    )
    assert "z_desc-mass_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys(
    )
    assert "z_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
    assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys(
    )
    assert "logp_desc-mass_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys(
    )
    assert "logp_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
    assert isinstance(
        cres.get_map("z_desc-size_level-cluster_corr-FWE_method-montecarlo",
                     return_type="image"),
        nib.Nifti1Image,
    )
    assert isinstance(
        cres.get_map("z_desc-size_level-cluster_corr-FWE_method-montecarlo",
                     return_type="array"),
        np.ndarray,
    )
    assert isinstance(
        cres.get_map("z_desc-mass_level-cluster_corr-FWE_method-montecarlo",
                     return_type="image"),
        nib.Nifti1Image,
    )
    assert isinstance(
        cres.get_map("z_desc-mass_level-cluster_corr-FWE_method-montecarlo",
                     return_type="array"),
        np.ndarray,
    )

    # Check that the updated null distribution is in the corrected MetaResult's Estimator.
    assert ("values_desc-mass_level-cluster_corr-fwe_method-montecarlo"
            in cres.estimator.null_distributions_.keys())
    # The updated null distribution should *not* be in the original Estimator, nor in the
    # uncorrected MetaResult's Estimator.
    assert ("values_desc-mass_level-cluster_corr-fwe_method-montecarlo"
            not in meta.null_distributions_.keys())
    assert ("values_desc-mass_level-cluster_corr-fwe_method-montecarlo"
            not in res.estimator.null_distributions_.keys())

    # Bonferroni FWE
    corr = FWECorrector(method="bonferroni")
    cres = corr.transform(res)
    assert isinstance(cres, nimare.results.MetaResult)
    assert isinstance(
        cres.get_map("z_corr-FWE_method-bonferroni", return_type="image"),
        nib.Nifti1Image)
    assert isinstance(
        cres.get_map("z_corr-FWE_method-bonferroni", return_type="array"),
        np.ndarray)

    # FDR
    corr = FDRCorrector(method="indep", alpha=0.05)
    cres = corr.transform(res)
    assert isinstance(cres, nimare.results.MetaResult)
    assert isinstance(
        cres.get_map("z_corr-FDR_method-indep", return_type="image"),
        nib.Nifti1Image)
    assert isinstance(
        cres.get_map("z_corr-FDR_method-indep", return_type="array"),
        np.ndarray)
コード例 #20
0
        pytest.param(kernel.MKDAKernel, id="mkda_kernel"),
        pytest.param(kernel.KDAKernel, id="kda_kernel"),
    ],
)
def kern(request):
    """Define kernel transformers for tests."""
    return request.param


##########################################
# multiple comparison correctors (testing)
##########################################
@pytest.fixture(
    scope="session",
    params=[
        pytest.param(FWECorrector(method="bonferroni"), id="fwe_bonferroni"),
        pytest.param(
            FWECorrector(method="montecarlo",
                         voxel_thresh=ALPHA,
                         n_iters=100,
                         n_cores=N_CORES),
            id="fwe_montecarlo",
        ),
        pytest.param(FDRCorrector(method="indep", alpha=ALPHA),
                     id="fdr_indep"),
        pytest.param(FDRCorrector(method="negcorr", alpha=ALPHA),
                     id="fdr_negcorr"),
    ],
)
def corr(request):
    """Define multiple comparisons correctors for tests."""
コード例 #21
0
ファイル: test_meta_ale.py プロジェクト: NBCLab/NiMARE
def test_ALE_approximate_null_unit(testdata_cbma, tmp_path_factory):
    """Unit test for ALE with approximate null_method."""
    tmpdir = tmp_path_factory.mktemp("test_ALE_approximate_null_unit")
    out_file = os.path.join(tmpdir, "file.pkl.gz")

    meta = ale.ALE(null_method="approximate")
    res = meta.fit(testdata_cbma)
    assert "stat" in res.maps.keys()
    assert "p" in res.maps.keys()
    assert "z" in res.maps.keys()
    assert isinstance(res, nimare.results.MetaResult)
    assert isinstance(res.get_map("z", return_type="image"), nib.Nifti1Image)
    assert isinstance(res.get_map("z", return_type="array"), np.ndarray)
    res2 = res.copy()
    assert res2 != res
    assert isinstance(res, nimare.results.MetaResult)

    # Test saving/loading
    meta.save(out_file, compress=True)
    assert os.path.isfile(out_file)
    meta2 = ale.ALE.load(out_file, compressed=True)
    assert isinstance(meta2, ale.ALE)
    with pytest.raises(pickle.UnpicklingError):
        ale.ALE.load(out_file, compressed=False)

    meta.save(out_file, compress=False)
    assert os.path.isfile(out_file)
    meta2 = ale.ALE.load(out_file, compressed=False)
    assert isinstance(meta2, ale.ALE)
    with pytest.raises(OSError):
        ale.ALE.load(out_file, compressed=True)

    # Test MCC methods
    # Monte Carlo FWE
    corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=-1)
    cres = corr.transform(meta.results)
    assert isinstance(cres, nimare.results.MetaResult)
    assert "z_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
    assert "z_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
    assert "logp_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
    assert "logp_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
    assert isinstance(
        cres.get_map("z_level-cluster_corr-FWE_method-montecarlo", return_type="image"),
        nib.Nifti1Image,
    )
    assert isinstance(
        cres.get_map("z_level-cluster_corr-FWE_method-montecarlo", return_type="array"), np.ndarray
    )

    # Bonferroni FWE
    corr = FWECorrector(method="bonferroni")
    cres = corr.transform(res)
    assert isinstance(cres, nimare.results.MetaResult)
    assert isinstance(
        cres.get_map("z_corr-FWE_method-bonferroni", return_type="image"), nib.Nifti1Image
    )
    assert isinstance(
        cres.get_map("z_corr-FWE_method-bonferroni", return_type="array"), np.ndarray
    )

    # FDR
    corr = FDRCorrector(method="indep", alpha=0.05)
    cres = corr.transform(meta.results)
    assert isinstance(cres, nimare.results.MetaResult)
    assert isinstance(
        cres.get_map("z_corr-FDR_method-indep", return_type="image"), nib.Nifti1Image
    )
    assert isinstance(cres.get_map("z_corr-FDR_method-indep", return_type="array"), np.ndarray)
コード例 #22
0
###############################################################################
# Individual group ALEs
# -----------------------------------------------------------------------------
# Computing separate ALE analyses for each group is not strictly necessary for
# performing the subtraction analysis but will help the experimenter to appreciate the
# similarities and differences between the groups.
from nimare.correct import FWECorrector
from nimare.meta.cbma import ALE

ale = ALE(null_method="approximate")
knowledge_results = ale.fit(knowledge_dset)
related_results = ale.fit(related_dset)

corr = FWECorrector(method="montecarlo",
                    voxel_thresh=0.001,
                    n_iters=100,
                    n_cores=2)
knowledge_corrected_results = corr.transform(knowledge_results)
related_corrected_results = corr.transform(related_results)

fig, axes = plt.subplots(figsize=(12, 10), nrows=2)
knowledge_img = knowledge_corrected_results.get_map(
    "z_desc-size_level-cluster_corr-FWE_method-montecarlo")
plot_stat_map(
    knowledge_img,
    cut_coords=4,
    display_mode="z",
    title="Semantic knowledge",
    threshold=2.326,  # cluster-level p < .01, one-tailed
    cmap="RdBu_r",
    vmax=4,
コード例 #23
0
# Multiple comparisons correction in coordinate-based meta-analyses
# -----------------------------------------------------------------------------
# .. tip::
#   For more information multiple comparisons correction and CBMA in NiMARE,
#   see :ref:`multiple comparisons correction`.
from nimare.meta.cbma.ale import ALE

# First, we need to fit the Estimator to the Dataset.
meta = ALE(null_method="approximate")
results = meta.fit(dset)

# We can check which FWE correction methods are available for the ALE Estimator
# with the ``inspect`` class method.
from nimare.correct import FWECorrector

print(FWECorrector.inspect(results))

###############################################################################
# Apply the Corrector to the MetaResult
# =============================================================================
# Now that we know what FWE correction methods are available, we can use one.
#
# The "montecarlo" method is a special one that is implemented within the
# Estimator, rather than in the Corrector.
corr = FWECorrector(method="montecarlo", n_iters=50, n_cores=2)
cres = corr.transform(results)

DISTS_TO_PLOT = [
    "values_desc-size_level-cluster_corr-fwe_method-montecarlo",
    "values_desc-mass_level-cluster_corr-fwe_method-montecarlo",
    "values_level-voxel_corr-fwe_method-montecarlo",
コード例 #24
0
def ale_sleuth_workflow(
    sleuth_file,
    sleuth_file2=None,
    output_dir=None,
    prefix=None,
    n_iters=10000,
    v_thr=0.001,
    fwhm=None,
    n_cores=1,
):
    """Perform ALE meta-analysis from Sleuth text file."""
    LGR.info("Loading coordinates...")

    if fwhm:
        fwhm_str = f"of {fwhm} mm"
    else:
        fwhm_str = "determined by sample size"

    if not sleuth_file2:
        dset = convert_sleuth_to_dataset(sleuth_file, target="ale_2mm")
        n_subs = dset.get_metadata(field="sample_sizes")
        n_subs = np.sum(n_subs)

        boilerplate = """
An activation likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro,
2002; Eickhoff, Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012)
meta-analysis was performed using NiMARE. The input dataset included {n_foci}
foci from {n_subs} participants across {n_exps} studies/experiments.

Modeled activation maps were generated for each study/experiment by convolving
each focus with a Gaussian kernel {fwhm_str}.
For voxels with overlapping kernels, the maximum value was retained.
The modeled activation maps were rendered in MNI 152 space (Fonov et al., 2009;
Fonov et al., 2011) at 2x2x2mm resolution. A map of ALE values was then
computed for the sample as the union of modeled activation values across
studies/experiments. Voxelwise statistical significance was determined based on
an analytically derived null distribution using the method described in
Eickhoff, Bzdok, Laird, Kurth, & Fox (2012), prior to multiple comparisons
correction.

-> If the cluster-level FWE-corrected results were used, include the following:
A cluster-forming threshold of p < {unc} was used to perform cluster-level FWE
correction. {n_iters} iterations were performed to estimate a null distribution
of cluster sizes, in which the locations of coordinates were randomly drawn
from a gray matter template and the maximum cluster size was recorded after
applying an uncorrected cluster-forming threshold of p < {unc}. The negative
log-transformed p-value for each cluster in the thresholded map was determined
based on the cluster sizes.

-> If voxel-level FWE-corrected results were used, include the following:
Voxel-level FWE-correction was performed. {n_iters} iterations were performed
to estimate a null distribution of ALE values, in which the locations of
coordinates were randomly drawn from a gray matter template and the maximum
ALE value was recorded.

References
----------
- Eickhoff, S. B., Bzdok, D., Laird, A. R., Kurth, F., & Fox, P. T. (2012).
Activation likelihood estimation meta-analysis revisited. NeuroImage,
59(3), 2349-2361.
- Fonov, V., Evans, A. C., Botteron, K., Almli, C. R., McKinstry, R. C.,
Collins, D. L., & Brain Development Cooperative Group. (2011).
Unbiased average age-appropriate atlases for pediatric studies.
Neuroimage, 54(1), 313-327.
- Fonov, V. S., Evans, A. C., McKinstry, R. C., Almli, C. R., & Collins, D. L.
(2009). Unbiased nonlinear average age-appropriate brain templates from birth
to adulthood. NeuroImage, (47), S102.
- Turkeltaub, P. E., Eden, G. F., Jones, K. M., & Zeffiro, T. A. (2002).
Meta-analysis of the functional neuroanatomy of single-word reading: method
and validation. NeuroImage, 16(3 Pt 1), 765-780.
- Turkeltaub, P. E., Eickhoff, S. B., Laird, A. R., Fox, M., Wiener, M.,
& Fox, P. (2012). Minimizing within-experiment and within-group effects in
Activation Likelihood Estimation meta-analyses. Human Brain Mapping,
33(1), 1-13.
        """

        ale = ALE(kernel__fwhm=fwhm)

        LGR.info("Performing meta-analysis...")
        results = ale.fit(dset)
        corr = FWECorrector(method="montecarlo",
                            n_iters=n_iters,
                            voxel_thresh=v_thr,
                            n_cores=n_cores)
        cres = corr.transform(results)
        fcounter = FocusCounter(
            target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo",
            voxel_thresh=None,
        )
        count_df, _ = fcounter.transform(cres)

        boilerplate = boilerplate.format(
            n_exps=len(dset.ids),
            n_subs=n_subs,
            n_foci=dset.coordinates.shape[0],
            unc=v_thr,
            n_iters=n_iters,
            fwhm_str=fwhm_str,
        )
    else:
        dset1 = convert_sleuth_to_dataset(sleuth_file, target="ale_2mm")
        dset2 = convert_sleuth_to_dataset(sleuth_file2, target="ale_2mm")
        n_subs1 = dset1.get_metadata(field="sample_sizes")
        n_subs1 = np.sum(n_subs1)
        n_subs2 = dset2.get_metadata(field="sample_sizes")
        n_subs2 = np.sum(n_subs2)

        boilerplate = """
Activation likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro,
2002; Eickhoff, Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012)
meta-analyses were performed using NiMARE for each of two datasets.
The first input dataset included {n_foci1} foci from {n_subs1} participants
across {n_exps1} studies/experiments. The second input dataset included
{n_foci2} foci from {n_subs2} participants across {n_exps2} studies/experiments.

Foci were convolved with Gaussian kernels {fwhm_str},
implemented on the MNI 152 template (Fonov et al., 2009; Fonov et al., 2011)
at 2x2x2mm resolution.

-> If the cluster-level FWE-corrected results were used, include the following:
A cluster-forming threshold of p < {unc} was used to perform cluster-level FWE
correction. {n_iters} iterations were performed to estimate a null distribution
of cluster sizes, in which the locations of coordinates were randomly drawn
from a gray matter template and the maximum cluster size was recorded after
applying an uncorrected cluster-forming threshold of p < {unc}. The negative
log-transformed p-value for each cluster in the thresholded map was determined
based on the cluster sizes.

-> If voxel-level FWE-corrected results were used, include the following:
Voxel-level FWE-correction was performed. {n_iters} iterations were performed
to estimate a null distribution of ALE values, in which the locations of
coordinates were randomly drawn from a gray matter template and the maximum
ALE value was recorded.

Following dataset-specific ALE meta-analyses, a subtraction analysis was
performed to compare the two datasets according to the procedure from Laird
et al. (2005). {n_iters} iterations were performed.

References
----------
- Turkeltaub, P. E., Eden, G. F., Jones, K. M., & Zeffiro, T. A. (2002).
Meta-analysis of the functional neuroanatomy of single-word reading: method
and validation. NeuroImage, 16(3 Pt 1), 765-780.
- Eickhoff, S. B., Bzdok, D., Laird, A. R., Kurth, F., & Fox, P. T. (2012).
Activation likelihood estimation meta-analysis revisited. NeuroImage,
59(3), 2349-2361.
- Turkeltaub, P. E., Eickhoff, S. B., Laird, A. R., Fox, M., Wiener, M.,
& Fox, P. (2012). Minimizing within-experiment and within-group effects in
Activation Likelihood Estimation meta-analyses. Human Brain Mapping,
33(1), 1-13.
- Fonov, V., Evans, A. C., Botteron, K., Almli, C. R., McKinstry, R. C.,
Collins, D. L., & Brain Development Cooperative Group. (2011).
Unbiased average age-appropriate atlases for pediatric studies.
Neuroimage, 54(1), 313-327.
- Fonov, V. S., Evans, A. C., McKinstry, R. C., Almli, C. R., & Collins, D. L.
(2009). Unbiased nonlinear average age-appropriate brain templates from birth
to adulthood. NeuroImage, (47), S102.
- Laird, A. R., Fox, P. M., Price, C. J., Glahn, D. C., Uecker, A. M.,
Lancaster, J. L., ... & Fox, P. T. (2005). ALE meta-analysis: Controlling the
false discovery rate and performing statistical contrasts. Human brain mapping,
25(1), 155-164.
        """

        ale1 = ALE(kernel__fwhm=fwhm)
        ale2 = ALE(kernel__fwhm=fwhm)

        LGR.info("Performing meta-analyses...")
        res1 = ale1.fit(dset1)
        res2 = ale2.fit(dset2)
        corr = FWECorrector(method="montecarlo",
                            n_iters=n_iters,
                            voxel_thresh=v_thr,
                            n_cores=n_cores)
        cres1 = corr.transform(res1)
        fcounter = FocusCounter(
            target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo",
            voxel_thresh=None,
        )
        count_df1, _ = fcounter.transform(cres1)

        cres2 = corr.transform(res2)
        count_df2, _ = fcounter.transform(cres2)

        sub = ALESubtraction(n_iters=n_iters, kernel__fwhm=fwhm)
        sres = sub.fit(dset1, dset2)

        boilerplate = boilerplate.format(
            n_exps1=len(dset1.ids),
            n_subs1=n_subs1,
            n_foci1=dset1.coordinates.shape[0],
            n_exps2=len(dset2.ids),
            n_subs2=n_subs2,
            n_foci2=dset2.coordinates.shape[0],
            unc=v_thr,
            n_iters=n_iters,
            fwhm_str=fwhm_str,
        )

    if output_dir is None:
        output_dir = os.path.abspath(os.path.dirname(sleuth_file))
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

    if prefix is None:
        base = os.path.basename(sleuth_file)
        prefix, _ = os.path.splitext(base)
        prefix += "_"
    elif not prefix.endswith("_"):
        prefix = prefix + "_"

    LGR.info("Saving output maps...")
    if not sleuth_file2:
        cres.save_maps(output_dir=output_dir, prefix=prefix)
        count_df.to_csv(os.path.join(output_dir, prefix + "_clust.tsv"),
                        index=False,
                        sep="\t")
        copyfile(sleuth_file,
                 os.path.join(output_dir, prefix + "input_coordinates.txt"))
    else:
        prefix1 = os.path.splitext(os.path.basename(sleuth_file))[0] + "_"
        prefix2 = os.path.splitext(os.path.basename(sleuth_file2))[0] + "_"
        prefix3 = prefix + "subtraction_"
        cres1.save_maps(output_dir=output_dir, prefix=prefix1)
        count_df1.to_csv(os.path.join(output_dir, prefix1 + "_clust.tsv"),
                         index=False,
                         sep="\t")
        cres2.save_maps(output_dir=output_dir, prefix=prefix2)
        count_df2.to_csv(os.path.join(output_dir, prefix2 + "_clust.tsv"),
                         index=False,
                         sep="\t")
        sres.save_maps(output_dir=output_dir, prefix=prefix3)
        copyfile(
            sleuth_file,
            os.path.join(output_dir, prefix + "group1_input_coordinates.txt"))
        copyfile(
            sleuth_file2,
            os.path.join(output_dir, prefix + "group2_input_coordinates.txt"))

    LGR.info("Workflow completed.")
    LGR.info(boilerplate)
コード例 #25
0
def macm_workflow(dataset_file,
                  mask_file,
                  output_dir=None,
                  prefix=None,
                  n_iters=10000,
                  v_thr=0.001,
                  n_cores=1):
    """Perform MACM with ALE algorithm."""
    LGR.info("Loading coordinates...")
    dset = Dataset(dataset_file)
    sel_ids = dset.get_studies_by_mask(mask_file)
    sel_dset = dset.slice(sel_ids)

    # override sample size
    n_subs_db = dset.coordinates.drop_duplicates("id")["n"].astype(
        float).astype(int).sum()
    n_subs_sel = sel_dset.coordinates.drop_duplicates("id")["n"].astype(
        float).astype(int).sum()
    LGR.info(f"{len(sel_ids)} studies selected out of {len(dset.ids)}.")

    boilerplate = """
Meta-analytic connectivity modeling (MACM; Laird et al., 2009; Robinson et al.,
2009; Eickhoff et al., 2010) analysis was performed with the activation
likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro, 2002; Eickhoff,
Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012) meta-analysis
algorithm using NiMARE. The input dataset included {n_foci_db}
foci from {n_subs_db} participants across {n_exps_db} studies/experiments, from
which studies/experiments were selected for analysis if they had at least one
focus inside the target mask. The resulting sample included {n_foci_sel}
foci from {n_subs_sel} participants across {n_exps_sel} studies/experiments.

Modeled activation maps were generated for each study/experiment by convolving
each focus with a Gaussian kernel determined by the study/experiment's sample
size. For voxels with overlapping kernels, the maximum value was retained.
The modeled activation maps were rendered in MNI 152 space (Fonov et al., 2009;
Fonov et al., 2011) at 2x2x2mm resolution. A map of ALE values was then
computed for the sample as the union of modeled activation values across
studies/experiments. Voxelwise statistical significance was determined based on
an analytically derived null distribution using the method described in
Eickhoff, Bzdok, Laird, Kurth, & Fox (2012), prior to multiple comparisons
correction.

-> If the cluster-level FWE-corrected results were used, include the following:
A cluster-forming threshold of p < {unc} was used to perform cluster-level FWE
correction. {n_iters} iterations were performed to estimate a null distribution
of cluster sizes, in which the locations of coordinates were randomly drawn
from a gray matter template and the maximum cluster size was recorded after
applying an uncorrected cluster-forming threshold of p < {unc}. The negative
log-transformed p-value for each cluster in the thresholded map was determined
based on the cluster sizes.

-> If voxel-level FWE-corrected results were used, include the following:
Voxel-level FWE-correction was performed and results were thresholded at
p < {fwe}. {n_iters} iterations were performed to estimate a null
distribution of ALE values, in which the locations of coordinates were randomly
drawn from a gray matter template and the maximum ALE value was recorded.

References
----------
- Eickhoff, S. B., Bzdok, D., Laird, A. R., Kurth, F., & Fox, P. T. (2012).
Activation likelihood estimation meta-analysis revisited. NeuroImage,
59(3), 2349–2361.
- Eickhoff, S. B., Jbabdi, S., Caspers, S., Laird, A. R., Fox, P. T., Zilles,
K., & Behrens, T. E. (2010). Anatomical and functional connectivity of
cytoarchitectonic areas within the human parietal operculum. Journal of
Neuroscience, 30(18), 6409-6421.
- Fonov, V., Evans, A. C., Botteron, K., Almli, C. R., McKinstry, R. C.,
Collins, D. L., & Brain Development Cooperative Group. (2011).
Unbiased average age-appropriate atlases for pediatric studies.
Neuroimage, 54(1), 313-327.
- Fonov, V. S., Evans, A. C., McKinstry, R. C., Almli, C. R., & Collins, D. L.
(2009). Unbiased nonlinear average age-appropriate brain templates from birth
to adulthood. NeuroImage, (47), S102.
- Laird, A. R., Eickhoff, S. B., Li, K., Robin, D. A., Glahn, D. C., &
Fox, P. T. (2009). Investigating the functional heterogeneity of the default
mode network using coordinate-based meta-analytic modeling. The Journal of
Neuroscience: The Official Journal of the Society for Neuroscience, 29(46),
14496–14505.
- Robinson, J. L., Laird, A. R., Glahn, D. C., Lovallo, W. R., & Fox, P. T.
(2009). Metaanalytic connectivity modeling: Delineating the functional
connectivity of the human amygdala. Human Brain Mapping, 31(2), 173-184.
- Turkeltaub, P. E., Eden, G. F., Jones, K. M., & Zeffiro, T. A. (2002).
Meta-analysis of the functional neuroanatomy of single-word reading: method
and validation. NeuroImage, 16(3 Pt 1), 765–780.
- Turkeltaub, P. E., Eickhoff, S. B., Laird, A. R., Fox, M., Wiener, M.,
& Fox, P. (2012). Minimizing within-experiment and within-group effects in
Activation Likelihood Estimation meta-analyses. Human Brain Mapping,
33(1), 1–13.
    """

    LGR.info("Performing meta-analysis...")
    ale = ALE()
    results = ale.fit(dset)
    corr = FWECorrector(method="montecarlo",
                        n_iters=n_iters,
                        voxel_thresh=v_thr,
                        n_cores=n_cores)
    cres = corr.transform(results)

    boilerplate = boilerplate.format(
        n_exps_db=len(dset.ids),
        n_subs_db=n_subs_db,
        n_foci_db=dset.coordinates.shape[0],
        n_exps_sel=len(sel_dset.ids),
        n_subs_sel=n_subs_sel,
        n_foci_sel=sel_dset.coordinates.shape[0],
        unc=v_thr,
        n_iters=n_iters,
    )

    if output_dir is None:
        output_dir = os.path.abspath(os.path.dirname(dataset_file))
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

    if prefix is None:
        base = os.path.basename(dataset_file)
        prefix, _ = os.path.splitext(base)
        prefix += "_"

    LGR.info("Saving output maps...")
    cres.save_maps(output_dir=output_dir, prefix=prefix)
    copyfile(dataset_file,
             os.path.join(output_dir, prefix + "input_dataset.json"))
    LGR.info("Workflow completed.")
    LGR.info(boilerplate)
コード例 #26
0
ファイル: plot_ibma.py プロジェクト: nicholst/NiMARE
###############################################################################
# Permuted OLS
# -----------------------------------------------------------------------------
meta = ibma.PermutedOLS(two_sided=True)
meta.fit(dset)
plot_stat_map(meta.results.get_map("z"),
              cut_coords=[0, 0, -8],
              draw_cross=False,
              cmap="RdBu_r")

###############################################################################
# Permuted OLS with FWE Correction
# -----------------------------------------------------------------------------
meta = ibma.PermutedOLS(two_sided=True)
meta.fit(dset)
corrector = FWECorrector(method="montecarlo", n_iters=100, n_cores=1)
cresult = corrector.transform(meta.results)
plot_stat_map(
    cresult.get_map("z_level-voxel_corr-FWE_method-montecarlo"),
    cut_coords=[0, 0, -8],
    draw_cross=False,
    cmap="RdBu_r",
)

###############################################################################
# Weighted Least Squares
# -----------------------------------------------------------------------------
meta = ibma.WeightedLeastSquares(tau2=0)
meta.fit(dset)
plot_stat_map(meta.results.get_map("z"),
              cut_coords=[0, 0, -8],
コード例 #27
0
###############################################################################
# Select studies with *no* reported coordinates in the ROI
# -----------------------------------------------------------------------------
no_roi_ids = list(set(dset.ids).difference(roi_ids))
dset_unsel = dset.slice(no_roi_ids)
print(f"{len(no_roi_ids)}/{len(dset.ids)} studies report zero coordinates in the ROI")


###############################################################################
# MKDA Chi2 with FWE correction
# -----------------------------------------------------------------------------
mkda = MKDAChi2(kernel__r=10)
results = mkda.fit(dset_sel, dset_unsel)

corr = FWECorrector(method="montecarlo", n_iters=10000)
cres = corr.transform(results)

# We want the "specificity" map (2-way chi-square between sel and unsel)
plotting.plot_stat_map(
    cres.get_map("z_desc-consistency_level-voxel_corr-FWE_method-montecarlo"),
    threshold=3.09,
    draw_cross=False,
    cmap="RdBu_r",
)

###############################################################################
# SCALE
# -----------------------------------------------------------------------------
# Another good option for a MACM analysis is the SCALE algorithm, which was
# designed specifically for MACM. Unfortunately, SCALE does not support
コード例 #28
0
dset = convert_sleuth_to_dataset(sleuth_file)
dset2 = convert_sleuth_to_dataset(sleuth_file2)

###############################################################################
# Individual group ALEs
# --------------------------------------------------
# Computing seperate ALE analyses for each group is not strictly necessary for
# performing the subtraction analysis but will help to appreciate the
# similarities and differences between the groups.
ale = ALE(null_method="approximate")
res = ale.fit(dset)
res2 = ale.fit(dset2)

corr = FWECorrector(method="montecarlo",
                    voxel_thresh=0.001,
                    n_iters=100,
                    n_cores=1)
cres = corr.transform(res)
cres2 = corr.transform(res2)

img = cres.get_map("z_level-cluster_corr-FWE_method-montecarlo")
plot_stat_map(
    img,
    cut_coords=4,
    display_mode="z",
    title="Semantic knowledge",
    threshold=2.326,  # cluster-level p < .01, one-tailed
    cmap="RdBu_r",
    vmax=4,
)