예제 #1
0
# functions. Let's define a `Support Vector Classifier <http://scikit-learn.org/stable/modules/svm.html >`_ (or SVC).
from sklearn.svm import SVC
svc = SVC()

###########################################################################
# Masking the data
# ...................................
# To use a scikit-learn estimator on brain images, you should first mask the
# data using a :class:`nilearn.maskers.NiftiMasker` to extract only the
# voxels inside the mask of interest, and transform 4D input fMRI data to
# 2D arrays(`shape=(n_timepoints, n_voxels)`) that estimators can work on.
from nilearn.maskers import NiftiMasker
masker = NiftiMasker(mask_img=mask_filename, runs=session_label,
                     smoothing_fwhm=4, standardize=True,
                     memory="nilearn_cache", memory_level=1)
fmri_masked = masker.fit_transform(fmri_niimgs)

###########################################################################
# Cross-validation with scikit-learn
# ...................................
# To train and test the model in a meaningful way we use cross-validation with
# the function :func:`sklearn.model_selection.cross_val_score` that computes
# for you the score for the different folds of cross-validation.
from sklearn.model_selection import cross_val_score
# Here `cv=5` stipulates a 5-fold cross-validation
cv_scores = cross_val_score(svc, fmri_masked, conditions, cv=5)
print("SVC accuracy: {:.3f}".format(cv_scores.mean()))


###########################################################################
# Tuning cross-validation parameters
예제 #2
0
from nilearn.image import get_data

############################################################################
# Load Localizer contrast
n_samples = 20
localizer_dataset = datasets.fetch_localizer_calculation_task(
    n_subjects=n_samples, legacy_format=False)
tested_var = np.ones((n_samples, 1))

############################################################################
# Mask data
nifti_masker = NiftiMasker(smoothing_fwhm=5,
                           memory='nilearn_cache',
                           memory_level=1)  # cache options
cmap_filenames = localizer_dataset.cmaps
fmri_masked = nifti_masker.fit_transform(cmap_filenames)

############################################################################
# Anova (parametric F-scores)
from sklearn.feature_selection import f_regression
_, pvals_anova = f_regression(fmri_masked, tested_var,
                              center=False)  # do not remove intercept
pvals_anova *= fmri_masked.shape[1]
pvals_anova[np.isnan(pvals_anova)] = 1
pvals_anova[pvals_anova > 1] = 1
neg_log_pvals_anova = -np.log10(pvals_anova)
neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_anova)

############################################################################
# Visualization
예제 #3
0
y = y[non_rest]

# Get the labels of the numerical conditions represented by the vector y
unique_conditions, order = np.unique(y, return_index=True)
# Sort the conditions by the order of appearance
unique_conditions = unique_conditions[np.argsort(order)]

##############################################################################
# Prepare the fMRI data
# ----------------------
from nilearn.maskers import NiftiMasker
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask_img=mask_filename, standardize=True,
                           runs=session, smoothing_fwhm=4,
                           memory="nilearn_cache", memory_level=1)
X = nifti_masker.fit_transform(func_filename)

# Remove the "rest" condition
X = X[non_rest]
session = session[non_rest]

##############################################################################
# Build the decoders, using scikit-learn
# ----------------------------------------
# Here we use a Support Vector Classification, with a linear kernel,
# and a simple feature selection step

from sklearn.svm import SVC
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
from sklearn.pipeline import Pipeline
예제 #4
0
gm_imgs_train, gm_imgs_test, age_train, age_test = train_test_split(
    gray_matter_map_filenames, age, train_size=.6, random_state=0)

# print basic information on the dataset
print('First gray-matter anatomy image (3D) is located at: %s' %
      oasis_dataset.gray_matter_maps[0])  # 3D data
print('First white-matter anatomy image (3D) is located at: %s' %
      oasis_dataset.white_matter_maps[0])  # 3D data

#############################################################################
# Preprocess data
# ----------------
nifti_masker = NiftiMasker(standardize=False,
                           smoothing_fwhm=2,
                           memory='nilearn_cache')  # cache options
gm_maps_masked = nifti_masker.fit_transform(gm_imgs_train)

# The features with too low between-subject variance are removed using
# :class:`sklearn.feature_selection.VarianceThreshold`.
from sklearn.feature_selection import VarianceThreshold
variance_threshold = VarianceThreshold(threshold=.01)
gm_maps_thresholded = variance_threshold.fit_transform(gm_maps_masked)

# Then we convert the data back to the mask image in order to use it for
# decoding process
mask = nifti_masker.inverse_transform(variance_threshold.get_support())

############################################################################
# Prediction pipeline with ANOVA and SVR using
# :class:`nilearn.decoding.DecoderRegressor` Object
예제 #5
0
print('First subject functional nifti image (4D) is at: %s' %
      dataset.func[0])  # 4D data

#####################################################################
# Preprocess
from nilearn.maskers import NiftiMasker

# This is fmri timeseries data: the background has not been removed yet,
# thus we need to use mask_strategy='epi' to compute the mask from the
# EPI images
masker = NiftiMasker(smoothing_fwhm=8,
                     memory='nilearn_cache',
                     memory_level=1,
                     mask_strategy='epi',
                     standardize=True)
data_masked = masker.fit_transform(func_filename)

#####################################################################
# Apply ICA

from sklearn.decomposition import FastICA
n_components = 10
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(data_masked.T).T

# Normalize estimated components, for thresholding to make sense
components_masked -= components_masked.mean(axis=0)
components_masked /= components_masked.std(axis=0)
# Threshold
import numpy as np
components_masked[np.abs(components_masked) < .8] = 0
# The NiftiMasker report allows us to see the mask before and after resampling.
# Simply hover over the report to see the mask from the original image.

import numpy as np

masker = NiftiMasker(mask_strategy='epi', target_affine=np.eye(3) * 8)
masker.fit(epi_img)
report = masker.generate_report()
report

###############################################################################
# After mask computation: extracting time series
###############################################################################
#
# Extract time series

# trended vs detrended
trended = NiftiMasker(mask_strategy='epi')
detrended = NiftiMasker(mask_strategy='epi', detrend=True)
trended_data = trended.fit_transform(epi_img)
detrended_data = detrended.fit_transform(epi_img)

# The timeseries are numpy arrays, so we can manipulate them with numpy

print("Trended: mean %.2f, std %.2f" %
      (np.mean(trended_data), np.std(trended_data)))
print("Detrended: mean %.2f, std %.2f" %
      (np.mean(detrended_data), np.std(detrended_data)))

show()
                                           verbose=1,
                                           cv=cv)
searchlight.fit(fmri_img, y)

#########################################################################
# F-scores computation
# ----------------------
from nilearn.maskers import NiftiMasker

# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask_img=mask_img,
                           runs=session,
                           standardize=True,
                           memory='nilearn_cache',
                           memory_level=1)
fmri_masked = nifti_masker.fit_transform(fmri_img)

from sklearn.feature_selection import f_classif
f_values, p_values = f_classif(fmri_masked, y)
p_values = -np.log10(p_values)
p_values[p_values > 10] = 10
p_unmasked = get_data(nifti_masker.inverse_transform(p_values))

#########################################################################
# Visualization
# --------------
# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
mean_fmri = image.mean_img(fmri_img)

from nilearn.plotting import plot_stat_map, plot_img, show
from nilearn.maskers import NiftiMasker

brain_masker = NiftiMasker(smoothing_fwhm=6,
                           detrend=True,
                           standardize=True,
                           low_pass=0.1,
                           high_pass=0.01,
                           t_r=2,
                           memory='nilearn_cache',
                           memory_level=1,
                           verbose=0)

##########################################################################
# Then we extract the brain-wide voxel-wise time series while regressing
# out the confounds as before
brain_time_series = brain_masker.fit_transform(func_filename,
                                               confounds=[confound_filename])

##########################################################################
# We can now inspect the extracted time series. Note that the **seed time
# series** is an array with shape n_volumes, 1), while the
# **brain time series** is an array with shape (n_volumes, n_voxels).

print("Seed time series shape: (%s, %s)" % seed_time_series.shape)
print("Brain time series shape: (%s, %s)" % brain_time_series.shape)

##########################################################################
# We can plot the **seed time series**.

import matplotlib.pyplot as plt

plt.plot(seed_time_series)
예제 #9
0
sessions = labels['chunks']
condition_mask = conditions.isin(['face', 'house'])
conditions_encoded = conditions_encoded[condition_mask]

##############################################################################
# Mask data
mask_filename = haxby_dataset.mask
from nilearn.image import index_img
from nilearn.maskers import NiftiMasker
nifti_masker = NiftiMasker(smoothing_fwhm=8,
                           mask_img=mask_filename,
                           memory='nilearn_cache',
                           memory_level=1)  # cache options
func_filename = haxby_dataset.func[0]
func_reduced = index_img(func_filename, condition_mask)
fmri_masked = nifti_masker.fit_transform(func_reduced)

# We consider the mean image per session and per condition.
# Otherwise, the observations cannot be exchanged at random because
# a time dependence exists between observations within a same session.
n_sessions = np.unique(sessions).size
grouped_fmri_masked = np.empty((
    2 * n_sessions,  # two conditions per session
    fmri_masked.shape[1]))
grouped_conditions_encoded = np.empty((2 * n_sessions, 1))

for s in range(n_sessions):
    session_mask = sessions[condition_mask] == s
    session_house_mask = np.logical_and(session_mask,
                                        conditions[condition_mask] == 'house')
    session_face_mask = np.logical_and(session_mask,