コード例 #1
0
def plot_activation_by_ID(identifier):
	localizer_dataset = datasets.fetch_localizer_contrasts(
		[identifier],
		 n_subjects=2,
		get_tmaps=True)
	localizer_tmap_filename = localizer_dataset.tmaps[1]
	plotting.plot_glass_brain(localizer_tmap_filename,threshold=3)
2. A permuted Ordinary Least Squares algorithm is run at each voxel. Data
   smoothed at 5 voxels FWHM are used.


"""
# Author: Virgile Fritsch, <*****@*****.**>, May. 2014
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols

### Load Localizer contrast ###################################################
n_samples = 94
localizer_dataset = datasets.fetch_localizer_contrasts(
    ['left button press (auditory cue)'], n_subjects=n_samples)
tested_var = localizer_dataset.ext_vars['pseudo']
# Quality check / Remove subjects with bad tested variate
mask_quality_check = np.where(tested_var != 'None')[0]
n_samples = mask_quality_check.size
contrast_map_filenames = [localizer_dataset.cmaps[i]
                          for i in mask_quality_check]
tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1))
print("Actual number of subjects after quality check: %d" % n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(
    smoothing_fwhm=5,
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(contrast_map_filenames)
コード例 #3
0
The goal of this example is to illustrate the use of the function
:func:`nilearn.image.resample_to_img` to resample an image to a template.
We use the MNI152 template as the reference for resampling a t-map image.
Function :func:`nilearn.image.resample_img` could also be used to achieve this.
"""

###############################################################################
# First we load the required datasets using the nilearn datasets module.
from nilearn.datasets import fetch_localizer_contrasts
from nilearn.datasets import load_mni152_template

template = load_mni152_template()

localizer_dataset = fetch_localizer_contrasts(["left vs right button press"],
                                              n_subjects=1,
                                              get_anats=True,
                                              get_tmaps=True)

localizer_tmap_filename = localizer_dataset.tmaps[0]
localizer_anat_filename = localizer_dataset.anats[0]

###############################################################################
# Now, the localizer t-map image can be resampled to the MNI template image.
from nilearn.image import resample_to_img

resampled_localizer_tmap = resample_to_img(localizer_tmap_filename, template)

###############################################################################
# Let's check the shape and affine have been correctly updated.
from nilearn.image import index_img
コード例 #4
0
expected to elicit activity in the motor cortex (positive in the right
hemisphere, negative in the left hemisphere).

"""

#########################################################################
# Fetch dataset
# --------------
# We download a list of left vs right button press :term:`contrasts<contrast>`
# from a localizer dataset. Note that we fetch individual t-maps that represent
# the :term:`Bold<BOLD>` activity estimate divided by the uncertainty about this
# estimate.
from nilearn.datasets import fetch_localizer_contrasts
n_subjects = 16
data = fetch_localizer_contrasts(["left vs right button press"],
                                 n_subjects,
                                 get_tmaps=True)

###########################################################################
# Display subject t_maps
# ----------------------
# We plot a grid with all the subjects t-maps thresholded at t = 2 for simple
# visualization purposes. The button press effect is visible among all
# subjects.
from nilearn import plotting
import matplotlib.pyplot as plt
subjects = [subject_data[0] for subject_data in data['ext_vars']]
fig, axes = plt.subplots(nrows=4, ncols=4)
for cidx, tmap in enumerate(data['tmaps']):
    plotting.plot_glass_brain(tmap,
                              colorbar=False,
コード例 #5
0
             'auditory processing vs visual processing',
             'auditory&visual calculation vs sentences',
             'sentence reading vs checkerboard']

nifti_masker = NiftiMasker('mask_GM_forFunc.nii')
grp_mask = load(nifti_masker.mask).get_data()
affine = load(nifti_masker.mask).get_affine()

# Create the data matrix
n_contrasts, n_subjects = len(contrasts), 40
subjects = ['S%02d' % i for i in range(n_subjects)]
n_voxels = grp_mask.sum()
X = np.zeros((n_voxels, n_contrasts, n_subjects))

for nc, contrast in enumerate(contrasts):
    imgs = datasets.fetch_localizer_contrasts(
        [contrast], n_subjects=n_subjects)['cmaps']
    X[:, nc, :] = nifti_masker.fit_transform(imgs).T

# improve the mask
second_mask = (X == 0).sum(1).sum(1) < 100
grp_mask[grp_mask > 0] = second_mask
X = X[second_mask]

# write directory
write_dir = os.path.join(os.getcwd(), 'results')
if not os.path.exists(write_dir):
    os.mkdir(write_dir)

###############################################################################

# what shall we do in the present experiment ?
コード例 #6
0
1. A sequence of subject fMRI button press contrasts is downloaded.
2. a mask of the useful brain volume is computed
3. A one-sample t-test is applied to the brain maps

We focus on a given contrast of the localizer dataset: the motor response to left versus right button press. Both at the ndividual and group level, this is expected to elicit activity in the motor cortex (positive in the right hemisphere, negative in the left hemisphere).

"""

#########################################################################
# Fetch dataset
# --------------
# We download a list of left vs right button press contrasts from a
# localizer dataset. Note that we fetc individual t-maps that represent the Bold activity estimate divided by the uncertainty about this estimate. 
from nilearn.datasets import fetch_localizer_contrasts
n_subjects = 16
data = fetch_localizer_contrasts(["left vs right button press"], n_subjects,
                                 get_tmaps=True)

###########################################################################
# Display subject t_maps
# ----------------------
# We plot a grid with all the subjects t-maps thresholded at t = 2 for
# simple visualization purposes. The button press effect is visible among
# all subjects
from nilearn import plotting
import matplotlib.pyplot as plt
subjects = [subject_data[0] for subject_data in data['ext_vars']]
fig, axes = plt.subplots(nrows=4, ncols=4)
for cidx, tmap in enumerate(data['tmaps']):
    plotting.plot_glass_brain(tmap, colorbar=False, threshold=2.0,
                              title=subjects[cidx],
                              axes=axes[int(cidx / 4), int(cidx % 4)],
コード例 #7
0
   smoothed at 5 voxels FWHM are used.


"""
# Author: Virgile Fritsch, <*****@*****.**>, May. 2014
import numpy as np
import matplotlib.pyplot as plt
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn.image import get_data

##############################################################################
# Load Localizer contrast
n_samples = 94
localizer_dataset = datasets.fetch_localizer_contrasts(
    ['left button press (auditory cue)'], n_subjects=n_samples)

# print basic information on the dataset
print('First contrast nifti image (3D) is located at: %s' %
      localizer_dataset.cmaps[0])

tested_var = localizer_dataset.ext_vars['pseudo']
# Quality check / Remove subjects with bad tested variate
mask_quality_check = np.where(tested_var != b'n/a')[0]
n_samples = mask_quality_check.size
contrast_map_filenames = [
    localizer_dataset.cmaps[i] for i in mask_quality_check
]
tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1))
print("Actual number of subjects after quality check: %d" % n_samples)
コード例 #8
0
"""
# Author: Virgile Fritsch, <*****@*****.**>, Mar. 2014
import numpy as np
from nilearn import datasets
from scipy import linalg
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn_sandbox.mass_univariate.rpbi import randomized_parcellation_based_inference

### Load Localizer motor contrast #############################################
n_samples = 20
# localizer_dataset = datasets.fetch_localizer_calculation_task(
#     n_subjects=n_samples)
localizer_dataset = datasets.fetch_localizer_contrasts(
    ["calculation vs sentences"],
    n_subjects=n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(localizer_dataset.cmaps)

### Perform massively univariate analysis with permuted OLS ###################
tested_var = np.ones((n_samples, 1), dtype=float)  # intercept
neg_log_pvals, all_scores, h0 = permuted_ols(
    tested_var, fmri_masked, model_intercept=False,
    n_perm=5000,  # 5,000 for the sake of time. 10,000 is recommended
    two_sided_test=False,  # RPBI does not perform a two-sided test
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
コード例 #9
0
visual areas. At the group level, such a mapping is not possible. Yet,
we may observe some significant effects in these areas.

"""

import pandas as pd
from nilearn import plotting
from nilearn.datasets import fetch_localizer_contrasts

#########################################################################
# Fetch dataset
# --------------
# We download a list of left vs right button press contrasts from a
# localizer dataset.
n_subjects = 16
sample_vertical = fetch_localizer_contrasts(
    ["vertical checkerboard"], n_subjects, get_tmaps=True)
sample_horizontal = fetch_localizer_contrasts(
    ["horizontal checkerboard"], n_subjects, get_tmaps=True)

# What remains implicit here is that there is a one-to-one
# correspondence between the two samples: the first image of both
# samples comes from subject S1, the second from subject S2 etc.

############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
second_level_input = sample_vertical['cmaps'] + sample_horizontal['cmaps']

############################################################################
コード例 #10
0
def test_fetch_localizer_contrasts():
    local_url = "file://" + datadir
    ids = np.asarray([('S%2d' % i).encode() for i in range(94)])
    ids = ids.view(dtype=[('subject_id', 'S3')])
    file_mock.add_csv('cubicwebexport.csv', ids)
    file_mock.add_csv('cubicwebexport2.csv', ids)

    # Disabled: cannot be tested without actually fetching covariates CSV file
    # All subjects
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.cmaps), 94)

    # 20 subjects
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 n_subjects=20,
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_equal(len(dataset.cmaps), 20)
    assert_equal(dataset.ext_vars.size, 20)

    # Multiple contrasts
    dataset = datasets.fetch_localizer_contrasts(
        ["checkerboard", "horizontal checkerboard"],
        n_subjects=20,
        data_dir=tmpdir,
        verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(len(dataset.cmaps), 20 * 2)  # two contrasts are fetched
    assert_equal(dataset.ext_vars.size, 20)

    # get_anats=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_anats=True,
                                                 verbose=0)
    assert_true(dataset.masks is None)
    assert_true(dataset.tmaps is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.anats[0], _basestring))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.anats), 94)
    assert_equal(len(dataset.cmaps), 94)

    # get_masks=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_masks=True,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.masks[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.cmaps), 94)
    assert_equal(len(dataset.masks), 94)

    # get_tmaps=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_tmaps=True,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.tmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.cmaps), 94)
    assert_equal(len(dataset.tmaps), 94)

    # all get_*=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_anats=True,
                                                 get_masks=True,
                                                 get_tmaps=True,
                                                 verbose=0)

    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.anats[0], _basestring))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.masks[0], _basestring))
    assert_true(isinstance(dataset.tmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.anats), 94)
    assert_equal(len(dataset.cmaps), 94)
    assert_equal(len(dataset.masks), 94)
    assert_equal(len(dataset.tmaps), 94)
コード例 #11
0
    'auditory&visual calculation vs sentences',
    'sentence reading vs checkerboard'
]

nifti_masker = NiftiMasker('mask_GM_forFunc.nii')
grp_mask = load(nifti_masker.mask).get_data()
affine = load(nifti_masker.mask).get_affine()

# Create the data matrix
n_contrasts, n_subjects = len(contrasts), 40
subjects = ['S%02d' % i for i in range(n_subjects)]
n_voxels = grp_mask.sum()
X = np.zeros((n_voxels, n_contrasts, n_subjects))

for nc, contrast in enumerate(contrasts):
    imgs = datasets.fetch_localizer_contrasts([contrast],
                                              n_subjects=n_subjects)['cmaps']
    X[:, nc, :] = nifti_masker.fit_transform(imgs).T

# improve the mask
second_mask = (X == 0).sum(1).sum(1) < 100
grp_mask[grp_mask > 0] = second_mask
X = X[second_mask]

# write directory
write_dir = os.path.join(os.getcwd(), 'results')
if not os.path.exists(write_dir):
    os.mkdir(write_dir)

###############################################################################

# what shall we do in the present experiment ?
コード例 #12
0
ファイル: script_localizer.py プロジェクト: bthirion/fMRI_PCR
    "horizontal checkerboard", "vertical checkerboard", 'sentence listening',
    "sentence reading", "calculation (auditory cue)",
    "calculation (visual cue)", "left button press (auditory cue)",
    "left button press (visual cue)", "right button press (auditory cue)",
    "right button press (visual cue)"
]

test_set = ['left button press (auditory cue)']
ref = [contrast for contrast in contrasts if contrast not in test_set]
n_ref = len(ref)

nifti_masker = NiftiMasker('mask_GM_forFunc.nii')
affine = load(nifti_masker.mask).get_affine()

# fetch the data
ref_imgs = datasets.fetch_localizer_contrasts(ref).cmaps
n_subjects = len(ref_imgs) / n_ref

# Create a population mask
one_contrast = [img for img in ref_imgs if 'horizontal' in img]
mask_ = compute_multi_background_mask(one_contrast)
mask_image = intersect_masks(['mask_GM_forFunc.nii', mask_])
mask = mask_image.get_data()
n_voxels = mask.sum()
save(mask_image, '/tmp/mask.nii')
nifti_masker = NiftiMasker(mask_image)

# write directory
write_dir = op.join(getcwd(), 'results')
if not op.exists(write_dir):
    mkdir(write_dir)
コード例 #13
0
that it conveys more sensitivity.

"""
# Author: Virgile Fritsch, <*****@*****.**>, Mar. 2014
import numpy as np
from nilearn import datasets
from scipy import linalg
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn_sandbox.mass_univariate.rpbi import randomized_parcellation_based_inference

### Load Localizer motor contrast #############################################
n_samples = 20
# localizer_dataset = datasets.fetch_localizer_calculation_task(
#     n_subjects=n_samples)
localizer_dataset = datasets.fetch_localizer_contrasts(
    ["calculation vs sentences"], n_subjects=n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(memory='nilearn_cache',
                           memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(localizer_dataset.cmaps)

### Perform massively univariate analysis with permuted OLS ###################
tested_var = np.ones((n_samples, 1), dtype=float)  # intercept
neg_log_pvals, all_scores, h0 = permuted_ols(
    tested_var,
    fmri_masked,
    model_intercept=False,
    n_perm=5000,  # 5,000 for the sake of time. 10,000 is recommended
    two_sided_test=False,  # RPBI does not perform a two-sided test
    n_jobs=1)  # can be changed to use more CPUs
コード例 #14
0
We use localizer t-statistic maps from :func:`nilearn.datasets.fetch_localizer_contrasts`
as an input image.

The idea is to threshold an image to get foreground objects using a
function :func:`nilearn.image.threshold_img` and extract objects using a function
:func:`nilearn.regions.connected_regions`.
"""

################################################################################
# Fetching t-statistic image of localizer constrasts by loading from datasets
# utilities
from nilearn import datasets

n_subjects = 3
localizer_path = datasets.fetch_localizer_contrasts(
    ['calculation (auditory cue)'], n_subjects=n_subjects, get_tmaps=True)
tmap_filename = localizer_path.tmaps[0]

################################################################################
# Threshold the t-statistic image by importing threshold function
from nilearn.image import threshold_img

# Two types of strategies can be used from this threshold function
# Type 1: strategy used will be based on scoreatpercentile
threshold_percentile_img = threshold_img(tmap_filename, threshold='97%')


# Type 2: threshold strategy used will be based on image intensity
# Here, threshold value should be within the limits i.e. less than max value.
threshold_value_img = threshold_img(tmap_filename, threshold=4.)
コード例 #15
0
def test_fetch_localizer_contrasts():
    local_url = "file://" + datadir
    ids = np.asarray([('S%2d' % i).encode() for i in range(94)])
    ids = ids.view(dtype=[('subject_id', 'S3')])
    file_mock.add_csv('cubicwebexport.csv', ids)
    file_mock.add_csv('cubicwebexport2.csv', ids)

    # Disabled: cannot be tested without actually fetching covariates CSV file
    # All subjects
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.cmaps), 94)

    # 20 subjects
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 n_subjects=20,
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_equal(len(dataset.cmaps), 20)
    assert_equal(dataset.ext_vars.size, 20)

    # Multiple contrasts
    dataset = datasets.fetch_localizer_contrasts(
        ["checkerboard", "horizontal checkerboard"],
        n_subjects=20, data_dir=tmpdir,
        verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(len(dataset.cmaps), 20 * 2)  # two contrasts are fetched
    assert_equal(dataset.ext_vars.size, 20)

    # get_anats=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_anats=True,
                                                 verbose=0)
    assert_true(dataset.masks is None)
    assert_true(dataset.tmaps is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.anats[0], _basestring))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.anats), 94)
    assert_equal(len(dataset.cmaps), 94)

    # get_masks=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_masks=True,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.tmaps is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.masks[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.cmaps), 94)
    assert_equal(len(dataset.masks), 94)

    # get_tmaps=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_tmaps=True,
                                                 verbose=0)
    assert_true(dataset.anats is None)
    assert_true(dataset.masks is None)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.tmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.cmaps), 94)
    assert_equal(len(dataset.tmaps), 94)

    # all get_*=True
    dataset = datasets.fetch_localizer_contrasts(["checkerboard"],
                                                 data_dir=tmpdir,
                                                 url=local_url,
                                                 get_anats=True,
                                                 get_masks=True,
                                                 get_tmaps=True,
                                                 verbose=0)

    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.anats[0], _basestring))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_true(isinstance(dataset.masks[0], _basestring))
    assert_true(isinstance(dataset.tmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 94)
    assert_equal(len(dataset.anats), 94)
    assert_equal(len(dataset.cmaps), 94)
    assert_equal(len(dataset.masks), 94)
    assert_equal(len(dataset.tmaps), 94)
コード例 #16
0
    print("Done")
    np.save(str(maps_file), term_maps)

######################################################################
# Obtain some example brain maps
# ------------------------------
# We load example subject-level tmaps from a localizer dataset, and also
# generate a brain maps from a set of MNI coordinates.

queries = {}

contrasts = ["left vs right button press", "sentence listening"]

for contrast in contrasts:
    query_map = fetch_localizer_contrasts([contrast],
                                          n_subjects=1,
                                          get_tmaps=True)["tmaps"][0]
    queries[contrast] = query_map

dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]
dmn_img = gaussian_coord_smoothing(dmn_coords, encoder.get_masker())
masked_dmn = encoder.get_masker().transform(dmn_img).ravel()
queries["DMN coordinates"] = dmn_img

######################################################################
# Discover which terms have activations similar to the query map
# --------------------------------------------------------------
# Here we simply use the dot product with the absolute values of the input map

for name, query_map in queries.items():
    masked_query = encoder.get_masker().transform(query_map).ravel()
コード例 #17
0
we may observe some significant effects in these areas.

"""

import pandas as pd
from nilearn import plotting
from nilearn.datasets import fetch_localizer_contrasts

#########################################################################
# Fetch dataset
# --------------
# We download a list of left vs right button press contrasts from a
# localizer dataset.
n_subjects = 16
sample_vertical = fetch_localizer_contrasts(["vertical checkerboard"],
                                            n_subjects,
                                            get_tmaps=True)
sample_horizontal = fetch_localizer_contrasts(["horizontal checkerboard"],
                                              n_subjects,
                                              get_tmaps=True)

# What remains implicit here is that there is a one-to-one
# correspondence between the two samples: the first image of both
# samples comes from subject S1, the second from subject S2 etc.

############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
second_level_input = sample_vertical['cmaps'] + sample_horizontal['cmaps']
コード例 #18
0
# Source: python notebook: [Massively univariate analysis of a motor task from the Localizer dataset](https://nilearn.github.io/auto_examples/05_advanced/plot_localizer_mass_univariate_methods.html#sphx-glr-auto-examples-05-advanced-plot-localizer-mass-univariate-methods-py)

# ---------------
# setup
# ---------------
import numpy as np
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.image import get_data

# ---------------
# load data
# ---------------
n_samples = 16

localizer_dataset_left = datasets.fetch_localizer_contrasts(
    ["left visual click"], n_subjects=n_samples)

localizer_dataset_right = datasets.fetch_localizer_contrasts(
    ["right visual click"], n_subjects=n_samples)

# ---------------
# quality control
# ---------------

tested_var_left = localizer_dataset_left.ext_vars['pseudo']
# Quality check / Remove subjects with bad tested variate
mask_quality_check_left = np.where(tested_var_left != b'n/a')[0]
n_samples_left = mask_quality_check_left.size
contrast_map_filenames_left = [
    localizer_dataset_left.cmaps[i] for i in mask_quality_check_left
]
コード例 #19
0
             'auditory&visual calculation vs sentences',
             'sentence reading vs checkerboard']

nifti_masker = NiftiMasker('mask_GM_forFunc.nii')
grp_mask = load(nifti_masker.mask_img).get_data()
affine = load(nifti_masker.mask_img).get_affine()

# Create the data matrix
n_contrasts, n_subjects = len(contrasts), 40
subjects = ['S%02d' % i for i in range(n_subjects)]
n_voxels = grp_mask.sum()
X = np.zeros((n_voxels, n_contrasts, n_subjects))

for nc, contrast in enumerate(contrasts):
    imgs = datasets.fetch_localizer_contrasts(
        [contrast], n_subjects=n_subjects,
        data_dir='/tmp/')['cmaps']
    X[:, nc, :] = nifti_masker.fit_transform(imgs).T

# improve the mask
second_mask = (X == 0).sum(1).sum(1) < 100
grp_mask[grp_mask > 0] = second_mask
X = X[second_mask]

# write directory
write_dir = os.path.join(os.getcwd(), 'results')
if not os.path.exists(write_dir):
    os.mkdir(write_dir)

###############################################################################
コード例 #20
0
from nilearn import datasets
from nilearn import plotting, image

###############################################################################
# Retrieve the data: haxby dataset to have EPI images and masks, and
# localizer dataset to have contrast maps

haxby_dataset = datasets.fetch_haxby(n_subjects=1)
haxby_anat_filename = haxby_dataset.anat[0]
haxby_mask_filename = haxby_dataset.mask_vt[0]
haxby_func_filename = haxby_dataset.func[0]

localizer_dataset = datasets.fetch_localizer_contrasts(
    ["left vs right button press"],
    n_subjects=2,
    get_anats=True,
    get_tmaps=True)
localizer_anat_filename = localizer_dataset.anats[1]
localizer_cmap_filename = localizer_dataset.cmaps[1]
localizer_tmap_filename = localizer_dataset.tmaps[1]

###############################################################################
# demo the different plotting functions

# Plotting statistical maps
plotting.plot_stat_map(localizer_cmap_filename, bg_img=localizer_anat_filename,
                       threshold=3, title="plot_stat_map",
                       cut_coords=(36, -27, 66))

# Plotting glass brain
コード例 #21
0
We use localizer t-statistic maps from :func:`nilearn.datasets.fetch_localizer_contrasts`
as an input image.

The idea is to threshold an image to get foreground objects using a
function :func:`nilearn.image.threshold_img` and extract objects using a function
:func:`nilearn.regions.connected_regions`.
"""

################################################################################
# Fetching t-statistic image of localizer constrasts by loading from datasets
# utilities
from nilearn import datasets

n_subjects = 3
localizer_path = datasets.fetch_localizer_contrasts(
    ['calculation (auditory cue)'], n_subjects=n_subjects, get_tmaps=True)
tmap_filename = localizer_path.tmaps[0]

################################################################################
# Threshold the t-statistic image by importing threshold function
from nilearn.image import threshold_img

# Two types of strategies can be used from this threshold function
# Type 1: strategy used will be based on scoreatpercentile
threshold_percentile_img = threshold_img(tmap_filename, threshold='97%')

# Type 2: threshold strategy used will be based on image intensity
# Here, threshold value should be within the limits i.e. less than max value.
threshold_value_img = threshold_img(tmap_filename, threshold=4.)

################################################################################
コード例 #22
0
See :ref:`plotting` for more details.
"""

from nilearn import datasets
from nilearn import plotting, image

###############################################################################
# Retrieve the data: haxby dataset to have EPI images and masks, and
# localizer dataset to have contrast maps

haxby_dataset = datasets.fetch_haxby(n_subjects=1)
haxby_anat_filename = haxby_dataset.anat[0]
haxby_mask_filename = haxby_dataset.mask_vt[0]
haxby_func_filename = haxby_dataset.func[0]

localizer_dataset = datasets.fetch_localizer_contrasts(
    ["left vs right button press"], n_subjects=2, get_anats=True)
localizer_anat_filename = localizer_dataset.anats[1]
localizer_cmap_filename = localizer_dataset.cmaps[1]

###############################################################################
# demo the different 'display_mode' options

plotting.plot_stat_map(localizer_cmap_filename,
                       display_mode='ortho',
                       cut_coords=(36, -27, 60),
                       title="display_mode='ortho', cut_coords=(36, -27, 60)")

plotting.plot_stat_map(localizer_cmap_filename,
                       display_mode='z',
                       cut_coords=5,
                       title="display_mode='z', cut_coords=5")
コード例 #23
0
expected to elicit activity in the motor cortex (positive in the right
hemisphere, negative in the left hemisphere).

"""

#########################################################################
# Fetch dataset
# --------------
# We download a list of left vs right button press :term:`contrasts<contrast>`
# from a localizer dataset. Note that we fetch individual t-maps that represent
# the :term:`Bold<BOLD>` activity estimate divided by the uncertainty about this
# estimate.
from nilearn.datasets import fetch_localizer_contrasts
n_subjects = 16
data = fetch_localizer_contrasts(
    ["left vs right button press"], n_subjects,
    get_tmaps=True, legacy_format=False
)

###########################################################################
# Display subject t_maps
# ----------------------
# We plot a grid with all the subjects t-maps thresholded at t = 2 for simple
# visualization purposes. The button press effect is visible among all
# subjects.
from nilearn import plotting
import matplotlib.pyplot as plt
subjects = [subject_data[0] for subject_data in data['ext_vars']]
fig, axes = plt.subplots(nrows=4, ncols=4)
for cidx, tmap in enumerate(data['tmaps']):
    plotting.plot_glass_brain(tmap, colorbar=False, threshold=2.0,
                              title=subjects[cidx],
コード例 #24
0
ファイル: script_localizer.py プロジェクト: bthirion/fMRI_PCR
             "calculation (auditory cue)",
             "calculation (visual cue)",
             "left button press (auditory cue)",
             "left button press (visual cue)",
             "right button press (auditory cue)",
             "right button press (visual cue)"]

test_set = ['left button press (auditory cue)']
ref = [contrast for contrast in contrasts if contrast not in test_set]
n_ref = len(ref)

nifti_masker = NiftiMasker('mask_GM_forFunc.nii')
affine = load(nifti_masker.mask).get_affine()

# fetch the data
ref_imgs = datasets.fetch_localizer_contrasts(ref).cmaps
n_subjects = len(ref_imgs) / n_ref

# Create a population mask
one_contrast = [img for img in ref_imgs if 'horizontal' in img]
mask_ = compute_multi_background_mask(one_contrast)
mask_image = intersect_masks(['mask_GM_forFunc.nii', mask_])
mask = mask_image.get_data()
n_voxels = mask.sum()
save(mask_image, '/tmp/mask.nii')
nifti_masker = NiftiMasker(mask_image)

# write directory
write_dir = op.join(getcwd(), 'results')
if not op.exists(write_dir):
    mkdir(write_dir)