示例#1
0
# Download data
# -----------------------------------------------------------------------------
from nimare.extract import download_nidm_pain

dset_dir = download_nidm_pain()

###############################################################################
# Load Dataset
# -----------------------------------------------------------------------------
import os

from nimare.dataset import Dataset
from nimare.utils import get_resource_path

dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)
dset.update_path(dset_dir)

mask_img = dset.masker.mask_img

###############################################################################
# .. _corrector-cbma-example:
#
# Multiple comparisons correction in coordinate-based meta-analyses
# -----------------------------------------------------------------------------
# .. tip::
#   For more information multiple comparisons correction and CBMA in NiMARE,
#   see :ref:`multiple comparisons correction`.
from nimare.meta.cbma.ale import ALE

# First, we need to fit the Estimator to the Dataset.
示例#2
0
###############################################################################
# Start with the necessary imports
# -----------------------------------------------------------------------------
import os

from nilearn.plotting import plot_glass_brain

from nimare.dataset import Dataset
from nimare.meta.kernel import Peaks2MapsKernel
from nimare.utils import get_resource_path

###############################################################################
# Load Dataset
# -----------------------------------------------------------------------------
dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)

###############################################################################
# Run peaks2maps
# -----------------------------------------------------------------------------
k = Peaks2MapsKernel()
imgs = k.transform(dset, return_type="image")

###############################################################################
# Plot modeled activation maps
# -----------------------------------------------------------------------------
for img in imgs:
    display = plot_glass_brain(img,
                               display_mode="lyrz",
                               plot_abs=False,
                               colorbar=True,
示例#3
0
def macm_workflow(dataset_file,
                  mask_file,
                  output_dir=None,
                  prefix=None,
                  n_iters=10000,
                  v_thr=0.001,
                  n_cores=1):
    """Perform MACM with ALE algorithm."""
    LGR.info("Loading coordinates...")
    dset = Dataset(dataset_file)
    sel_ids = dset.get_studies_by_mask(mask_file)
    sel_dset = dset.slice(sel_ids)

    # override sample size
    n_subs_db = dset.coordinates.drop_duplicates("id")["n"].astype(
        float).astype(int).sum()
    n_subs_sel = sel_dset.coordinates.drop_duplicates("id")["n"].astype(
        float).astype(int).sum()
    LGR.info(f"{len(sel_ids)} studies selected out of {len(dset.ids)}.")

    boilerplate = """
Meta-analytic connectivity modeling (MACM; Laird et al., 2009; Robinson et al.,
2009; Eickhoff et al., 2010) analysis was performed with the activation
likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro, 2002; Eickhoff,
Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012) meta-analysis
algorithm using NiMARE. The input dataset included {n_foci_db}
foci from {n_subs_db} participants across {n_exps_db} studies/experiments, from
which studies/experiments were selected for analysis if they had at least one
focus inside the target mask. The resulting sample included {n_foci_sel}
foci from {n_subs_sel} participants across {n_exps_sel} studies/experiments.

Modeled activation maps were generated for each study/experiment by convolving
each focus with a Gaussian kernel determined by the study/experiment's sample
size. For voxels with overlapping kernels, the maximum value was retained.
The modeled activation maps were rendered in MNI 152 space (Fonov et al., 2009;
Fonov et al., 2011) at 2x2x2mm resolution. A map of ALE values was then
computed for the sample as the union of modeled activation values across
studies/experiments. Voxelwise statistical significance was determined based on
an analytically derived null distribution using the method described in
Eickhoff, Bzdok, Laird, Kurth, & Fox (2012), prior to multiple comparisons
correction.

-> If the cluster-level FWE-corrected results were used, include the following:
A cluster-forming threshold of p < {unc} was used to perform cluster-level FWE
correction. {n_iters} iterations were performed to estimate a null distribution
of cluster sizes, in which the locations of coordinates were randomly drawn
from a gray matter template and the maximum cluster size was recorded after
applying an uncorrected cluster-forming threshold of p < {unc}. The negative
log-transformed p-value for each cluster in the thresholded map was determined
based on the cluster sizes.

-> If voxel-level FWE-corrected results were used, include the following:
Voxel-level FWE-correction was performed and results were thresholded at
p < {fwe}. {n_iters} iterations were performed to estimate a null
distribution of ALE values, in which the locations of coordinates were randomly
drawn from a gray matter template and the maximum ALE value was recorded.

References
----------
- Eickhoff, S. B., Bzdok, D., Laird, A. R., Kurth, F., & Fox, P. T. (2012).
Activation likelihood estimation meta-analysis revisited. NeuroImage,
59(3), 2349–2361.
- Eickhoff, S. B., Jbabdi, S., Caspers, S., Laird, A. R., Fox, P. T., Zilles,
K., & Behrens, T. E. (2010). Anatomical and functional connectivity of
cytoarchitectonic areas within the human parietal operculum. Journal of
Neuroscience, 30(18), 6409-6421.
- Fonov, V., Evans, A. C., Botteron, K., Almli, C. R., McKinstry, R. C.,
Collins, D. L., & Brain Development Cooperative Group. (2011).
Unbiased average age-appropriate atlases for pediatric studies.
Neuroimage, 54(1), 313-327.
- Fonov, V. S., Evans, A. C., McKinstry, R. C., Almli, C. R., & Collins, D. L.
(2009). Unbiased nonlinear average age-appropriate brain templates from birth
to adulthood. NeuroImage, (47), S102.
- Laird, A. R., Eickhoff, S. B., Li, K., Robin, D. A., Glahn, D. C., &
Fox, P. T. (2009). Investigating the functional heterogeneity of the default
mode network using coordinate-based meta-analytic modeling. The Journal of
Neuroscience: The Official Journal of the Society for Neuroscience, 29(46),
14496–14505.
- Robinson, J. L., Laird, A. R., Glahn, D. C., Lovallo, W. R., & Fox, P. T.
(2009). Metaanalytic connectivity modeling: Delineating the functional
connectivity of the human amygdala. Human Brain Mapping, 31(2), 173-184.
- Turkeltaub, P. E., Eden, G. F., Jones, K. M., & Zeffiro, T. A. (2002).
Meta-analysis of the functional neuroanatomy of single-word reading: method
and validation. NeuroImage, 16(3 Pt 1), 765–780.
- Turkeltaub, P. E., Eickhoff, S. B., Laird, A. R., Fox, M., Wiener, M.,
& Fox, P. (2012). Minimizing within-experiment and within-group effects in
Activation Likelihood Estimation meta-analyses. Human Brain Mapping,
33(1), 1–13.
    """

    LGR.info("Performing meta-analysis...")
    ale = ALE()
    results = ale.fit(dset)
    corr = FWECorrector(method="montecarlo",
                        n_iters=n_iters,
                        voxel_thresh=v_thr,
                        n_cores=n_cores)
    cres = corr.transform(results)

    boilerplate = boilerplate.format(
        n_exps_db=len(dset.ids),
        n_subs_db=n_subs_db,
        n_foci_db=dset.coordinates.shape[0],
        n_exps_sel=len(sel_dset.ids),
        n_subs_sel=n_subs_sel,
        n_foci_sel=sel_dset.coordinates.shape[0],
        unc=v_thr,
        n_iters=n_iters,
    )

    if output_dir is None:
        output_dir = os.path.abspath(os.path.dirname(dataset_file))
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

    if prefix is None:
        base = os.path.basename(dataset_file)
        prefix, _ = os.path.splitext(base)
        prefix += "_"

    LGR.info("Saving output maps...")
    cres.save_maps(output_dir=output_dir, prefix=prefix)
    copyfile(dataset_file,
             os.path.join(output_dir, prefix + "input_dataset.json"))
    LGR.info("Workflow completed.")
    LGR.info(boilerplate)
示例#4
0
from nimare.dataset import Dataset
from nimare.extract import download_nidm_pain
from nimare.meta.cbma import ALE
from nimare.transforms import ImagesToCoordinates, ImageTransformer
from nimare.utils import get_resource_path

###############################################################################
# Download data
# -----------------------------------------------------------------------------
dset_dir = download_nidm_pain()

###############################################################################
# Load Dataset
# -----------------------------------------------------------------------------
dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)
dset.update_path(dset_dir)

# ImagesToCoordinates uses z or p statistical maps
z_transformer = ImageTransformer(target="z")
dset = z_transformer.transform(dset)

study_no_images = "pain_02.nidm-1"
# delete images for study
dset.images = dset.images.query(f"id != '{study_no_images}'")

study_no_coordinates = "pain_03.nidm-1"

# delete coordinates for study
dset.coordinates = dset.coordinates.query(f"id != '{study_no_coordinates}'")
示例#5
0
import os

import nibabel as nib
import numpy as np
from nilearn.plotting import plot_roi

from nimare.dataset import Dataset
from nimare.decode import discrete
from nimare.utils import get_resource_path

###############################################################################
# Load dataset with abstracts
# -----------------------------------------------------------------------------
# We'll load a small dataset composed only of studies in Neurosynth with
# Angela Laird as a coauthor, for the sake of speed.
dset = Dataset(
    os.path.join(get_resource_path(), "neurosynth_laird_studies.json"))
dset.annotations.head(5)

###############################################################################
# Create a region of interest
# -----------------------------------------------------------------------------

# First we'll make an ROI
arr = np.zeros(dset.masker.mask_img.shape, int)
arr[65:75, 50:60, 50:60] = 1
mask_img = nib.Nifti1Image(arr, dset.masker.mask_img.affine)
plot_roi(mask_img, draw_cross=False)

# Get studies with voxels in the mask
ids = dset.get_studies_by_mask(mask_img)
示例#6
0
def scale_workflow(
    dataset_file,
    baseline=None,
    output_dir=None,
    prefix=None,
    n_iters=2500,
    v_thr=0.001,
    n_cores=1,
):
    """Perform SCALE meta-analysis from Sleuth text file or NiMARE json file.

    Warnings
    --------
    This method is not yet implemented.
    """
    if dataset_file.endswith(".json"):
        dset = Dataset(dataset_file, target="mni152_2mm")
    elif dataset_file.endswith(".txt"):
        dset = convert_sleuth_to_dataset(dataset_file, target="mni152_2mm")
    else:
        dset = Dataset.load(dataset_file)

    boilerplate = """
A specific coactivation likelihood estimation (SCALE; Langner et al., 2014)
meta-analysis was performed using NiMARE. The input dataset included {n}
studies/experiments.

Voxel-specific null distributions were generated using base rates from {bl}
with {n_iters} iterations. Results were thresholded at p < {thr}.

References
----------
- Langner, R., Rottschy, C., Laird, A. R., Fox, P. T., & Eickhoff, S. B. (2014).
Meta-analytic connectivity modeling revisited: controlling for activation base
rates. NeuroImage, 99, 559-570.
    """
    boilerplate = boilerplate.format(
        n=len(dset.ids),
        thr=v_thr,
        bl=baseline if baseline else "a gray matter template",
        n_iters=n_iters,
    )

    # At the moment, the baseline file should be an n_coords X 3 list of matrix
    # indices matching the dataset template, where the base rate for a given
    # voxel is reflected by the number of times that voxel appears in the array
    if not baseline:
        xyz = vox2mm(
            np.vstack(np.where(dset.masker.mask_img.get_fdata())).T,
            dset.masker.mask_img.affine,
        )
    else:
        xyz = np.loadtxt(baseline)

    estimator = SCALE(xyz=xyz, n_iters=n_iters, n_cores=n_cores)
    results = estimator.fit(dset)

    if output_dir is None:
        output_dir = os.path.dirname(dataset_file)
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

    if prefix is None:
        base = os.path.basename(dataset_file)
        prefix, _ = os.path.splitext(base)
        prefix += "_"
    elif not prefix.endswith("_"):
        prefix = prefix + "_"

    results.save_maps(output_dir=output_dir, prefix=prefix)
    copyfile(dataset_file,
             os.path.join(output_dir, prefix + "input_coordinates.txt"))

    LGR.info("Workflow completed.")
    LGR.info(boilerplate)
示例#7
0
Download the Cognitive Atlas and extract CogAt terms from text.
"""
import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

from nimare import annotate, extract
from nimare.dataset import Dataset
from nimare.utils import get_resource_path

###############################################################################
# Load dataset with abstracts
# -----------------------------------------------------------------------------
dset = Dataset(
    os.path.join(get_resource_path(), "neurosynth_laird_studies.json"))

###############################################################################
# Download Cognitive Atlas
# -----------------------------------------------------------------------------
cogatlas = extract.download_cognitive_atlas(data_dir=get_resource_path(),
                                            overwrite=False)
id_df = pd.read_csv(cogatlas["ids"])
rel_df = pd.read_csv(cogatlas["relationships"])

###############################################################################
# ID DataFrame
id_df.head()

###############################################################################
# Relationships DataFrame
示例#8
0
def main(
    workdir,
    outdir,
    atlas,
    kernel,
    sparsity,
    affinity,
    approach,
    gradients,
    subcort,
    neurosynth,
    neurosynth_file,
    sleuth_file,
    nimare_dataset,
    roi_mask,
    term,
    topic,
):
    workdir = op.join(workdir, "tmp")
    if op.isdir(workdir):
        shutil.rmtree(workdir)
    os.makedirs(workdir)

    atlas_name = "atlas-{0}".format(atlas)
    kernel_name = "kernel-{0}".format(kernel)
    sparsity_name = "sparsity-{0}".format(sparsity)
    affinity_name = "affinity-{0}".format(affinity)
    approach_name = "approach-{0}".format(approach)
    gradients_name = "gradients-{0}".format(gradients)
    dset = None

    # handle neurosynth dataset, if called
    if neurosynth:
        if neurosynth_file is None:

            ns_data_dir = op.join(workdir, "neurosynth")
            dataset_file = op.join(ns_data_dir, "neurosynth_dataset.pkl.gz")
            # download neurosynth dataset if necessary
            if not op.isfile(dataset_file):
                neurosynth_download(ns_data_dir)

        else:
            dataset_file = neurosynth_file

        dset = Dataset.load(dataset_file)
        dataset_name = "dataset-neurosynth"

    # handle sleuth text file, if called
    if sleuth_file is not None:
        dset = convert_sleuth_to_dataset(sleuth_file, target="mni152_2mm")
        dataset_name = "dataset-{0}".format(op.basename(sleuth_file).split(".")[0])

    if nimare_dataset is not None:
        dset = Dataset.load(nimare_dataset)
        dataset_name = "dataset-{0}".format(op.basename(nimare_dataset).split(".")[0])

    if dset:
        # slice studies, if needed
        if roi_mask is not None:
            roi_ids = dset.get_studies_by_mask(roi_mask)
            print(
                "{}/{} studies report at least one coordinate in the "
                "ROI".format(len(roi_ids), len(dset.ids))
            )
            dset_sel = dset.slice(roi_ids)
            dset = dset_sel
            dataset_name = "dataset-neurosynth_mask-{0}".format(
                op.basename(roi_mask).split(".")[0]
            )

        if term is not None:
            labels = ["Neurosynth_TFIDF__{label}".format(label=label) for label in [term]]
            term_ids = dset.get_studies_by_label(labels=labels, label_threshold=0.1)
            print(
                "{}/{} studies report association "
                "with the term {}".format(len(term_ids), len(dset.ids), term)
            )
            dset_sel = dset.slice(term_ids)
            dset = dset_sel
            # img_inds = np.nonzero(dset.masker.mask_img.get_fdata())  # unused
            # vox_locs = np.unravel_index(img_inds, dset.masker.mask_img.shape)  # unused
            dataset_name = "dataset-neurosynth_term-{0}".format(term)

        if topic is not None:
            topics = [
                "Neurosynth_{version}__{topic}".format(version=topic[0], topic=topic)
                for topic in topic[1:]
            ]
            topics_ids = []
            for topic in topics:
                topic_ids = dset.annotations.id[np.where(dset.annotations[topic])[0]].tolist()
                topics_ids.extend(topic_ids)
                print(
                    "{}/{} studies report association "
                    "with the term {}".format(len(topic_ids), len(dset.ids), topic)
                )
            topics_ids_unique = np.unique(topics_ids)
            print("{} unique ids".format(len(topics_ids_unique)))
            dset_sel = dset.slice(topics_ids_unique)
            dset = dset_sel
            # img_inds = np.nonzero(dset.masker.mask_img.get_fdata())  # unused
            # vox_locs = np.unravel_index(img_inds, dset.masker.mask_img.shape)  # unused
            dataset_name = "dataset-neurosynth_topic-{0}".format("_".join(topic[1:]))

        if (
            neurosynth
            or (sleuth_file is not None)
            or (nimare_dataset is not None)
        ):
            # set kernel for MA smoothing
            if kernel == "peaks2maps":
                print("Running peak2maps")
                k = Peaks2MapsKernel(resample_to_mask=True)
            elif kernel == "alekernel":
                print("Running alekernel")
                k = ALEKernel(fwhm=15)

            if atlas is not None:
                if atlas == "harvard-oxford":
                    print("Parcellating using the Harvard Oxford Atlas")
                    # atlas_labels = atlas.labels[1:]  # unused
                    atlas_shape = atlas.maps.shape
                    atlas_affine = atlas.maps.affine
                    atlas_data = atlas.maps.get_fdata()
                elif atlas == "aal":
                    print("Parcellating using the AAL Atlas")
                    atlas = datasets.fetch_atlas_aal()
                    # atlas_labels = atlas.labels  # unused
                    atlas_shape = nib.load(atlas.maps).shape
                    atlas_affine = nib.load(atlas.maps).affine
                    atlas_data = nib.load(atlas.maps).get_fdata()
                elif atlas == "craddock-2012":
                    print("Parcellating using the Craddock-2012 Atlas")
                    atlas = datasets.fetch_atlas_craddock_2012()
                elif atlas == "destrieux-2009":
                    print("Parcellating using the Destrieux-2009 Atlas")
                    atlas = datasets.fetch_atlas_destrieux_2009(lateralized=True)
                    # atlas_labels = atlas.labels[3:]  # unused
                    atlas_shape = nib.load(atlas.maps).shape
                    atlas_affine = nib.load(atlas.maps).affine
                    atlas_data = nib.load(atlas.maps).get_fdata()
                elif atlas == "msdl":
                    print("Parcellating using the MSDL Atlas")
                    atlas = datasets.fetch_atlas_msdl()
                elif atlas == "surface":
                    print("Generating surface vertices")

                if atlas != "fsaverage5" and atlas != "hcp":
                    imgs = k.transform(dset, return_type="image")

                    masker = NiftiLabelsMasker(
                        labels_img=atlas.maps, standardize=True, memory="nilearn_cache"
                    )
                    time_series = masker.fit_transform(imgs)

                else:
                    # change to array for other approach
                    imgs = k.transform(dset, return_type="image")
                    print(np.shape(imgs))

                    if atlas == "fsaverage5":
                        fsaverage = fetch_surf_fsaverage(mesh="fsaverage5")
                        pial_left = fsaverage.pial_left
                        pial_right = fsaverage.pial_right
                        medial_wall_inds_left = surface.load_surf_data(
                            "./templates/lh.Medial_wall.label"
                        )
                        print(np.shape(medial_wall_inds_left))
                        medial_wall_inds_right = surface.load_surf_data(
                            "./templates/rh.Medial_wall.label"
                        )
                        print(np.shape(medial_wall_inds_right))
                        sulc_left = fsaverage.sulc_left
                        sulc_right = fsaverage.sulc_right

                    elif atlas == "hcp":
                        pial_left = "./templates/S1200.L.pial_MSMAll.32k_fs_LR.surf.gii"
                        pial_right = "./templates/S1200.R.pial_MSMAll.32k_fs_LR.surf.gii"
                        medial_wall_inds_left = np.where(
                            nib.load("./templates/hcp.tmp.lh.dscalar.nii").get_fdata()[0] == 0
                        )[0]
                        medial_wall_inds_right = np.where(
                            nib.load("./templates/hcp.tmp.rh.dscalar.nii").get_fdata()[0] == 0
                        )[0]
                        left_verts = 32492 - len(medial_wall_inds_left)
                        sulc_left = nib.load(
                            "./templates/S1200.sulc_MSMAll.32k_fs_LR.dscalar.nii"
                        ).get_fdata()[0][0:left_verts]
                        sulc_left = np.insert(
                            sulc_left,
                            np.subtract(
                                medial_wall_inds_left, np.arange(len(medial_wall_inds_left))
                            ),
                            0,
                        )
                        sulc_right = nib.load(
                            "./templates/S1200.sulc_MSMAll.32k_fs_LR.dscalar.nii"
                        ).get_fdata()[0][left_verts:]
                        sulc_right = np.insert(
                            sulc_right,
                            np.subtract(
                                medial_wall_inds_right, np.arange(len(medial_wall_inds_right))
                            ),
                            0,
                        )

                    surf_lh = surface.vol_to_surf(
                        imgs,
                        pial_left,
                        radius=6.0,
                        interpolation="nearest",
                        kind="ball",
                        n_samples=None,
                        mask_img=dset.masker.mask_img,
                    )
                    surf_rh = surface.vol_to_surf(
                        imgs,
                        pial_right,
                        radius=6.0,
                        interpolation="nearest",
                        kind="ball",
                        n_samples=None,
                        mask_img=dset.masker.mask_img,
                    )
                    surfs = np.transpose(np.vstack((surf_lh, surf_rh)))
                    del surf_lh, surf_rh

                    # handle cortex first
                    coords_left = surface.load_surf_data(pial_left)[0]
                    coords_left = np.delete(coords_left, medial_wall_inds_left, axis=0)
                    coords_right = surface.load_surf_data(pial_right)[0]
                    coords_right = np.delete(coords_right, medial_wall_inds_right, axis=0)

                    print("Left Hemipshere Vertices")
                    surface_macms_lh, inds_discard_lh = build_macms(dset, surfs, coords_left)
                    print(np.shape(surface_macms_lh))
                    print(inds_discard_lh)

                    print("Right Hemipshere Vertices")
                    surface_macms_rh, inds_discard_rh = build_macms(dset, surfs, coords_right)
                    print(np.shape(surface_macms_rh))
                    print(len(inds_discard_rh))

                    lh_vertices_total = np.shape(surface_macms_lh)[0]
                    rh_vertices_total = np.shape(surface_macms_rh)[0]
                    time_series = np.transpose(np.vstack((surface_macms_lh, surface_macms_rh)))
                    print(np.shape(time_series))
                    del surface_macms_lh, surface_macms_rh

                    if subcort:
                        subcort_img = nib.load("templates/rois-subcortical_mni152_mask.nii.gz")
                        subcort_vox = np.asarray(np.where(subcort_img.get_fdata()))
                        subcort_mm = vox2mm(subcort_vox.T, subcort_img.affine)

                        print("Subcortical Voxels")
                        subcort_macm, inds_discard_subcort = build_macms(dset, surfs, subcort_mm)

                        num_subcort_vox = np.shape(subcort_macm)[0]
                        print(inds_discard_subcort)

                        time_series = np.hstack((time_series, np.asarray(subcort_macm).T))
                        print(np.shape(time_series))

                time_series = time_series.astype("float32")

                print("calculating correlation matrix")
                correlation = ConnectivityMeasure(kind="correlation")
                time_series = correlation.fit_transform([time_series])[0]
                print(np.shape(time_series))

                if affinity == "cosine":
                    time_series = calculate_affinity(time_series, 10 * sparsity)

            else:
                time_series = np.transpose(k.transform(dset, return_type="array"))

    print("Performing gradient analysis")

    gradients, statistics = embed.compute_diffusion_map(
        time_series, alpha=0.5, return_result=True, overwrite=True
    )
    pickle.dump(statistics, open(op.join(workdir, "statistics.p"), "wb"))

    # if subcortical included in gradient decomposition, remove gradient scores
    if subcort:
        subcort_grads = gradients[np.shape(gradients)[0] - num_subcort_vox :, :]
        subcort_grads = insert(subcort_grads, inds_discard_subcort)
        gradients = gradients[0 : np.shape(gradients)[0] - num_subcort_vox, :]

    # get left hemisphere gradient scores, and insert 0's where medial wall is
    gradients_lh = gradients[0:lh_vertices_total, :]
    if len(inds_discard_lh) > 0:
        gradients_lh = insert(gradients_lh, inds_discard_lh)
    gradients_lh = insert(gradients_lh, medial_wall_inds_left)

    # get right hemisphere gradient scores and insert 0's where medial wall is
    gradients_rh = gradients[-rh_vertices_total:, :]
    if len(inds_discard_rh) > 0:
        gradients_rh = insert(gradients_rh, inds_discard_rh)
    gradients_rh = insert(gradients_rh, medial_wall_inds_right)

    grad_dict = {
        "grads_lh": gradients_lh,
        "grads_rh": gradients_rh,
        "pial_left": pial_left,
        "sulc_left": sulc_left,
        "pial_right": pial_right,
        "sulc_right": sulc_right,
    }
    if subcort:
        grad_dict["subcort_grads"] = subcort_grads
    pickle.dump(grad_dict, open(op.join(workdir, "gradients.p"), "wb"))

    # map the gradient to the parcels
    for i in range(np.shape(gradients)[1]):
        if atlas is not None:
            if atlas == "fsaverage5" or atlas == "hcp":

                plot_surfaces(grad_dict, i, workdir)

                if subcort:
                    tmpimg = masking.unmask(subcort_grads[:, i], subcort_img)
                    nib.save(tmpimg, op.join(workdir, "gradient-{0}.nii.gz".format(i)))
            else:
                tmpimg = np.zeros(atlas_shape)
                for j, n in enumerate(np.unique(atlas_data)[1:]):
                    inds = atlas_data == n
                    tmpimg[inds] = gradients[j, i]
                    nib.save(
                        nib.Nifti1Image(tmpimg, atlas_affine),
                        op.join(workdir, "gradient-{0}.nii.gz".format(i)),
                    )
        else:
            tmpimg = np.zeros(np.prod(dset.masker.mask_img.shape))
            inds = np.ravel_multi_index(
                np.nonzero(dset.masker.mask_img.get_fdata()), dset.masker.mask_img.shape
            )
            tmpimg[inds] = gradients[:, i]
            nib.save(
                nib.Nifti1Image(
                    np.reshape(tmpimg, dset.masker.mask_img.shape), dset.masker.mask_img.affine
                ),
                op.join(workdir, "gradient-{0}.nii.gz".format(i)),
            )

            os.system(
                "python3 /Users/miriedel/Desktop/GitHub/surflay/make_figures.py "
                "-f {grad_image} --colormap jet".format(
                    grad_image=op.join(workdir, "gradient-{0}.nii.gz".format(i))
                )
            )

    output_dir = op.join(
        outdir,
        (
            f"{dataset_name}_{atlas_name}_{kernel_name}_{sparsity_name}_{gradients_name}_"
            f"{affinity_name}_{approach_name}"
        )
    )

    shutil.copytree(workdir, output_dir)

    shutil.rmtree(workdir)
示例#9
0
a tour of available types.
"""
# sphinx_gallery_thumbnail_number = 2
import os

import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map

###############################################################################
# Load Dataset
# -----------------------------------------------------------------------------
from nimare.dataset import Dataset
from nimare.utils import get_resource_path

dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)

# First, let us reduce this Dataset to only two studies
dset = dset.slice(dset.ids[2:4])

###############################################################################
# Kernels ingest Datasets and can produce a few types of outputs
# -----------------------------------------------------------------------------
from nimare.meta.kernel import MKDAKernel

# First, the kernel should be initialized with any parameters.
kernel = MKDAKernel()

# Then, the ``transform`` method takes in the Dataset and produces the MA maps.
output = kernel.transform(dset)
示例#10
0
#   downloaded from Neurovault collection 1425, uploaded by Dr. Camille Maumet.
#
#   Creation of the Dataset from the NIDM-Results packs was done with custom
#   code. The Results packs for collection 1425 are not completely
#   NIDM-Results-compliant, so the nidmresults library could not be used to
#   facilitate data extraction.
import os

from nilearn.plotting import plot_stat_map

from nimare.correct import FWECorrector
from nimare.dataset import Dataset
from nimare.utils import get_resource_path

dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)

# Some of the CBMA algorithms compare two Datasets,
# so we'll split this example Dataset in half.
dset1 = dset.slice(dset.ids[:10])
dset2 = dset.slice(dset.ids[10:])

###############################################################################
# Multilevel Kernel Density Analysis
# -----------------------------------------------------------------------------
from nimare.meta.cbma.mkda import MKDADensity

meta = MKDADensity()
results = meta.fit(dset)

corr = FWECorrector(method="montecarlo", n_iters=10, n_cores=1)
示例#11
0
###############################################################################
# Datasets are stored as json or pkl[.gz] files
# -----------------------------------------------------------------------------
# Json files are used to create Datasets, while generated Datasets are saved
# to, and loaded from, pkl[.gz] files.
# We use jsons because they are easy to edit, and thus build by hand, if
# necessary.
# We then store the generated Datasets as pkl.gz files because an initialized
# Dataset is no longer a dictionary.

# Let's start by downloading a dataset
dset_dir = download_nidm_pain()

# Now we can load and save the Dataset object
dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file, target="mni152_2mm", mask=None)
dset.save("pain_dset.pkl")
dset = Dataset.load("pain_dset.pkl")
os.remove("pain_dset.pkl")  # cleanup

###############################################################################
# Much of the data in Datasets is stored as DataFrames
# -----------------------------------------------------------------------------
# The five DataFrames in Dataset are "coordinates" (reported peaks),
# "images" (statistical maps), "metadata", "texts", and "annotations" (labels).

###############################################################################
# ``Dataset.annotations`` contains labels describing studies
# `````````````````````````````````````````````````````````````````````````````
# Columns include the standard identifiers and any labels.
# The labels may be grouped together based on label source, in which case they
示例#12
0
def convert_neurovault_to_dataset(
    collection_ids,
    contrasts,
    img_dir=None,
    map_type_conversion=None,
    **dset_kwargs,
):
    """Convert a group of NeuroVault collections into a NiMARE Dataset.

    .. versionadded:: 0.0.8

    Parameters
    ----------
    collection_ids : :obj:`list` of :obj:`int` or :obj:`dict`
        A list of collections on neurovault specified by their id.
        The collection ids can accessed through the neurovault API
        (i.e., https://neurovault.org/api/collections) or
        their main website (i.e., https://neurovault.org/collections).
        For example, in this URL https://neurovault.org/collections/8836/,
        `8836` is the collection id.
        collection_ids can also be a dictionary whose keys are the informative
        study name and the values are collection ids to give the collections
        more informative names in the dataset.
    contrasts : :obj:`dict`
        Dictionary whose keys represent the name of the contrast in
        the dataset and whose values represent a regular expression that would
        match the names represented in NeuroVault.
        For example, under the ``Name`` column in this URL
        https://neurovault.org/collections/8836/,
        a valid contrast could be "as-Animal", which will be called "animal" in the created
        dataset if the contrasts argument is ``{'animal': "as-Animal"}``.
    img_dir : :obj:`str` or None, optional
        Base path to save all the downloaded images, by default the images
        will be saved to a temporary directory with the prefix "neurovault".
    map_type_conversion : :obj:`dict` or None, optional
        Dictionary whose keys are what you expect the `map_type` name to
        be in neurovault and the values are the name of the respective
        statistic map in a nimare dataset. Default = None.
    **dset_kwargs : keyword arguments passed to Dataset
        Keyword arguments to pass in when creating the Dataset object.
        see :obj:`~nimare.dataset.Dataset` for details.

    Returns
    -------
    :obj:`~nimare.dataset.Dataset`
        Dataset object containing experiment information from neurovault.
    """
    img_dir = Path(
        _get_dataset_dir("_".join(contrasts.keys()), data_dir=img_dir))

    if map_type_conversion is None:
        map_type_conversion = DEFAULT_MAP_TYPE_CONVERSION

    if not isinstance(collection_ids, dict):
        collection_ids = {nv_coll: nv_coll for nv_coll in collection_ids}

    dataset_dict = {}
    for coll_name, nv_coll in collection_ids.items():

        nv_url = f"https://neurovault.org/api/collections/{nv_coll}/images/?format=json"
        images = requests.get(nv_url).json()
        if "Not found" in images.get("detail", ""):
            raise ValueError(
                f"Collection {nv_coll} not found. "
                "Three likely causes are (1) the collection doesn't exist, "
                "(2) the collection is private, or "
                "(3) the provided ID corresponds to an image instead of a collection."
            )

        dataset_dict[f"study-{coll_name}"] = {"contrasts": {}}
        for contrast_name, contrast_regex in contrasts.items():
            dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name] = {
                "images": {
                    "beta": None,
                    "t": None,
                    "varcope": None,
                },
                "metadata": {
                    "sample_sizes": None
                },
            }

            sample_sizes = []
            no_images = True
            for img_dict in images["results"]:
                if not (re.match(contrast_regex, img_dict["name"])
                        and img_dict["map_type"] in map_type_conversion
                        and img_dict["analysis_level"] == "group"):
                    continue

                no_images = False
                filename = img_dir / (
                    f"collection-{nv_coll}_id-{img_dict['id']}_" +
                    Path(img_dict["file"]).name)

                if not filename.exists():
                    r = requests.get(img_dict["file"])
                    with open(filename, "wb") as f:
                        f.write(r.content)

                (dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name]
                 ["images"][map_type_conversion[img_dict["map_type"]]]
                 ) = filename.as_posix()

                # aggregate sample sizes (should all be the same)
                sample_sizes.append(img_dict["number_of_subjects"])

            if no_images:
                raise ValueError(
                    f"No images were found for contrast {contrast_name}. "
                    f"Please check the contrast regular expression: {contrast_regex}"
                )
            # take modal sample size (raise warning if there are multiple values)
            if len(set(sample_sizes)) > 1:
                sample_size = _resolve_sample_size(sample_sizes)
                LGR.warning((
                    f"Multiple sample sizes were found for neurovault collection: {nv_coll}"
                    f"for contrast: {contrast_name}, sample sizes: {set(sample_sizes)}"
                    f", selecting modal sample size: {sample_size}"))
            else:
                sample_size = sample_sizes[0]
            (dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name]
             ["metadata"]["sample_sizes"]) = [sample_size]

    dataset = Dataset(dataset_dict, **dset_kwargs)

    return dataset
示例#13
0
def create_coordinate_dataset(
    foci=1,
    foci_percentage="100%",
    fwhm=10,
    sample_size=30,
    n_studies=30,
    n_noise_foci=0,
    seed=None,
    space="MNI",
):
    """Generate coordinate based dataset for meta analysis.

    .. versionadded:: 0.0.4

    Parameters
    ----------
    foci : :obj:`int` or :obj:`list`
        The number of foci to be generated per study or the
        x,y,z coordinates of the ground truth foci. (Default=1)
    foci_percentage : :obj:`float`
        Percentage of studies where the foci appear. (Default="100%")
    fwhm : :obj:`float`
        Full width at half maximum (fwhm) to define the probability
        spread of the foci. (Default=10)
    sample_size : :obj:`int` or :obj:`list`
        Either mean number of participants in each study
        or a list specifying the sample size for each
        study. If a list of two numbers and n_studies is
        not two, then the first number will represent a lower
        bound and the second number will represent an upper bound
        of a uniform sample. (Default=30)
    n_studies : :obj:`int`
        Number of studies to generate. (Default=30)
    n_noise_foci : :obj:`int`
        Number of foci considered to be noise in each study. (Default=0)
    seed : :obj:`int` or None
        Random state to reproducibly initialize random numbers.
        If seed is None, then the random state will try to be initialized
        with data from /dev/urandom (or the Windows analogue) if available
        or will initialize from the clock otherwise. (Default=None)
    space : :obj:`str`
        The template space the coordinates are reported in. (Default='MNI')

    Returns
    -------
    ground_truth_foci : :obj:`list`
        generated foci in xyz (mm) coordinates
    dataset : :class:`~nimare.dataset.Dataset`
    """
    # set random state
    rng = np.random.RandomState(seed=seed)

    # check foci argument
    if not isinstance(foci, int) and not _array_like(foci):
        raise ValueError("foci must be a positive integer or array like")

    # check foci_percentage argument
    if ((not isinstance(foci_percentage, (float, str))) or
        (isinstance(foci_percentage, str) and foci_percentage[-1] != "%")
            or (isinstance(foci_percentage, float)
                and not (0.0 <= foci_percentage <= 1.0))):
        raise ValueError(
            "foci_percentage must be a string (example '96%') or a float between 0 and 1"
        )

    # check sample_size argument
    if _array_like(sample_size) and len(sample_size) != n_studies and len(
            sample_size) != 2:
        raise ValueError(
            "sample_size must be the same length as n_studies or list of 2 items"
        )
    elif not _array_like(sample_size) and not isinstance(sample_size, int):
        raise ValueError("sample_size must be array like or integer")

    # check space argument
    if space != "MNI":
        raise NotImplementedError(
            "Only coordinates for the MNI atlas has been defined")

    # process foci_percentage argument
    if isinstance(foci_percentage, str) and foci_percentage[-1] == "%":
        foci_percentage = float(foci_percentage[:-1]) / 100

    # process sample_size argument
    if isinstance(sample_size, int):
        sample_size = [sample_size] * n_studies
    elif _array_like(sample_size) and len(sample_size) == 2 and n_studies != 2:
        sample_size_lower_limit = sample_size[0]
        sample_size_upper_limit = sample_size[1]
        sample_size = rng.randint(sample_size_lower_limit,
                                  sample_size_upper_limit,
                                  size=n_studies)

    ground_truth_foci, foci_dict = _create_foci(foci, foci_percentage, fwhm,
                                                n_studies, n_noise_foci, rng,
                                                space)

    source_dict = _create_source(foci_dict, sample_size, space)
    dataset = Dataset(source_dict)

    return ground_truth_foci, dataset
示例#14
0
def run_meta_complete(ds_dict, ibma):
    # ds = Dataset(ds_dict, mask=gray_mask)
    ds = Dataset(ds_dict)
    print(ds.masker)
    assert (ds.masker is not None)
    return ibma, ibma.fit(ds)
示例#15
0
def run_meta(ds_dict, ibma, map_types):
    ds = Dataset(ds_dict)
    res = ibma.fit(ds)
    maps = [res.get_map(map_type) for map_type in map_types]
    return tuple(maps)