Пример #1
0
# As a decoder, we use a Support Vector Classification, with a linear kernel.
# We first create it using by using :class:`nilearn.decoding.Decoder`.
from nilearn.decoding import Decoder

decoder = Decoder(estimator='svc', mask=mask_filename, standardize=True)

###########################################################################
# The svc object is an object that can be fit (or trained) on data with
# labels, and then predict labels on data without.
#
# We first fit it on the data
decoder.fit(fmri_niimgs, conditions)

###########################################################################
# We can then predict the labels from the data
prediction = decoder.predict(fmri_niimgs)
print(prediction)

###########################################################################
# Let's measure the prediction accuracy:
print((prediction == conditions).sum() / float(len(conditions)))

###########################################################################
# This prediction accuracy score is meaningless. Why?

###########################################################################
# Measuring prediction scores using cross-validation
# ---------------------------------------------------
#
# The proper way to measure error rates or prediction accuracy is via
# cross-validation: leaving out some data and testing on it.
Пример #2
0
# -------------------------------------------------
# As a complete pipeline by itself, decoder will perform cross-validation
# for the estimator, in this case Support Vector Machine. We can output the
# best parameters selected for each cross-validation fold. See
# https://scikit-learn.org/stable/modules/cross_validation.html for an
# excellent explanation of how cross-validation works.
#
# First we fit the Decoder
decoder.fit(fmri_niimgs, y)
for i, (param, cv_score) in enumerate(
        zip(decoder.cv_params_['shoe']['C'], decoder.cv_scores_['shoe'])):

    print("Fold %d | Best SVM parameter: %.1f with score: %.3f" %
          (i + 1, param, cv_score))
# Output the prediction with Decoder
y_pred = decoder.predict(fmri_niimgs)

###########################################################################
# Compute prediction scores with different values of screening percentile
# -----------------------------------------------------------------------
import numpy as np
screening_percentile_range = [2, 4, 8, 16, 32, 64]
cv_scores = []
val_scores = []

for sp in screening_percentile_range:
    decoder = Decoder(estimator='svc',
                      mask=mask_img,
                      smoothing_fwhm=4,
                      cv=3,
                      standardize=True,
# on nested cross-validation.
from nilearn.decoding import Decoder
# Here screening_percentile is set to 5 percent
mask_img = haxby_dataset.mask
decoder = Decoder(estimator='svc',
                  mask=mask_img,
                  smoothing_fwhm=4,
                  standardize=True,
                  screening_percentile=5,
                  scoring='accuracy')

#############################################################################
# Fit the decoder and predict
# ----------------------------
decoder.fit(func_img, conditions)
y_pred = decoder.predict(func_img)

#############################################################################
# Obtain prediction scores via cross validation
# -----------------------------------------------
# Define the cross-validation scheme used for validation. Here we use a
# LeaveOneGroupOut cross-validation on the session group which corresponds to a
# leave a session out scheme, then pass the cross-validator object to the cv
# parameter of decoder.leave-one-session-out For more details please take a
# look at:
# <https://nilearn.github.io/stable/auto_examples/plot_decoding_tutorial.html#measuring-prediction-scores-using-cross-validation>
from sklearn.model_selection import LeaveOneGroupOut
cv = LeaveOneGroupOut()

decoder = Decoder(estimator='svc',
                  mask=mask_img,
Пример #4
0
# ---------------------------------
#
# Note that for this classification task both classes contain the same number
# of samples (the problem is balanced). Then, we can use accuracy to measure
# the performance of the decoder. This is done by defining accuracy as the
# `scoring`.
from nilearn.decoding import Decoder
decoder = Decoder(estimator='svc', mask_strategy='background',
                  smoothing_fwhm=4, scoring='accuracy')

decoder.fit(X_train, y_train)
accuracy = np.mean(decoder.cv_scores_[b"house"]) * 100
print("Decoder cross-validation accuracy : %f%%" % accuracy)

# Testing on out-of-sample data
y_pred = decoder.predict(X_test)
accuracy = (y_pred == y_test).mean() * 100.
print("Decoder classification accuracy : %f%%" % accuracy)

######################################################################
# Visualization
# --------------

weight_img = decoder.coef_img_[b"face"]

from nilearn.image import mean_img
background_img = mean_img(func_filenames)

from nilearn.plotting import plot_stat_map, show
plot_stat_map(weight_img, background_img, cut_coords=[-52, -5],
              display_mode="yz",