Beispiel #1
0
    print("Working on %s" % mask_name)
    # For decoding, standardizing is often very important
    mask_filename = haxby_dataset[mask_name][0]
    masker = NiftiMasker(mask_img=mask_filename, standardize=True)
    masked_timecourses = masker.fit_transform(func_filename)[task_mask]
    mask_scores[mask_name] = {}
    mask_chance_scores[mask_name] = {}

    for category in categories:
        print("Processing %s %s" % (mask_name, category))
        classification_target = (stimuli[task_mask] == category)
        # Specify the classifier to the decoder object.
        # With the decoder we can input the masker directly.
        # We are using the svc_l1 here because it is intra subject.
        decoder = Decoder(estimator='svc_l1',
                          cv=cv,
                          mask=masker,
                          scoring='roc_auc')
        decoder.fit(task_data, classification_target, groups=session_labels)
        mask_scores[mask_name][category] = decoder.cv_scores_[1]
        print("Scores: %1.2f +- %1.2f" %
              (np.mean(mask_scores[mask_name][category]),
               np.std(mask_scores[mask_name][category])))

        mask_chance_scores[mask_name][category] = cross_val_score(
            dummy_classifier,
            masked_timecourses,
            classification_target,
            cv=cv,
            groups=session_labels,
            scoring="roc_auc",
        )
#############################################################################
# ANOVA pipeline with :class:`nilearn.decoding.Decoder` object
# ------------------------------------------------------------
#
# Nilearn Decoder object aims to provide smooth user experience by acting as a
# pipeline of several tasks: preprocessing with NiftiMasker, reducing dimension
# by selecting only relevant features with ANOVA -- a classical univariate
# feature selection based on F-test, and then decoding with different types of
# estimators (in this example is Support Vector Machine with a linear kernel)
# on nested cross-validation.
from nilearn.decoding import Decoder
# Here screening_percentile is set to 5 percent
mask_img = haxby_dataset.mask
decoder = Decoder(estimator='svc',
                  mask=mask_img,
                  smoothing_fwhm=4,
                  standardize=True,
                  screening_percentile=5,
                  scoring='accuracy')

#############################################################################
# Fit the decoder and predict
# ----------------------------
decoder.fit(func_img, conditions)
y_pred = decoder.predict(func_img)

#############################################################################
# Obtain prediction scores via cross validation
# -----------------------------------------------
# Define the cross-validation scheme used for validation. Here we use a
# LeaveOneGroupOut cross-validation on the session group which corresponds to a
# leave a session out scheme, then pass the cross-validator object to the cv
Beispiel #3
0
###########################################################################
# We apply the same mask to the targets
conditions = conditions[condition_mask]
# Convert to numpy array
conditions = conditions.values
print(conditions.shape)

###########################################################################
# Decoding with Support Vector Machine
# ------------------------------------
#
# As a decoder, we use a Support Vector Classification, with a linear kernel.
# We first create it using by using :class:`nilearn.decoding.Decoder`.
from nilearn.decoding import Decoder

decoder = Decoder(estimator='svc', mask=mask_filename, standardize=True)

###########################################################################
# The svc object is an object that can be fit (or trained) on data with
# labels, and then predict labels on data without.
#
# We first fit it on the data
decoder.fit(fmri_niimgs, conditions)

###########################################################################
# We can then predict the labels from the data
prediction = decoder.predict(fmri_niimgs)
print(prediction)

###########################################################################
# Let's measure the prediction accuracy:
# Here we compute prediction scores and run time for all these
# classifiers
import time
from nilearn.decoding import Decoder
from sklearn.model_selection import LeaveOneGroupOut

cv = LeaveOneGroupOut()
classifiers_data = {}

for classifier_name in sorted(classifiers):
    classifiers_data[classifier_name] = {}
    print(70 * '_')

    # The decoder has as default score the `roc_auc`
    decoder = Decoder(estimator=classifier_name,
                      mask=mask_filename,
                      standardize=True,
                      cv=cv)
    t0 = time.time()
    decoder.fit(fmri_niimgs, classification_target, groups=session_labels)

    classifiers_data[classifier_name] = {}
    classifiers_data[classifier_name]['score'] = decoder.cv_scores_
    classifiers_data[classifier_name]['map'] = decoder.coef_img_['house']

    print("%10s: %.2fs" % (classifier_name, time.time() - t0))
    for category in categories:
        print("    %14s vs all -- AUC: %1.2f +- %1.2f" %
              (category,
               np.mean(classifiers_data[classifier_name]['score'][category]),
               np.std(classifiers_data[classifier_name]['score'][category])))
Beispiel #5
0
###########################################################################
# ANOVA pipeline with :class:`nilearn.decoding.Decoder` object
# ------------------------------------------------------------
#
# Nilearn Decoder object aims to provide smooth user experience by acting as a
# pipeline of several tasks: preprocessing with NiftiMasker, reducing dimension
# by selecting only relevant features with ANOVA -- a classical univariate
# feature selection based on F-test, and then decoding with different types of
# estimators (in this example is Support Vector Machine with a linear kernel)
# on nested cross-validation.
from nilearn.decoding import Decoder
# Here screening_percentile is set to 2 percent, meaning around 800
# features will be selected with ANOVA.
decoder = Decoder(estimator='svc',
                  cv=5,
                  mask=mask_img,
                  smoothing_fwhm=4,
                  standardize=True,
                  screening_percentile=2)

###########################################################################
# Fit the Decoder and predict the reponses
# -------------------------------------------------
# As a complete pipeline by itself, decoder will perform cross-validation
# for the estimator, in this case Support Vector Machine. We can output the
# best parameters selected for each cross-validation fold. See
# https://scikit-learn.org/stable/modules/cross_validation.html for an
# excellent explanation of how cross-validation works.
#
# First we fit the Decoder
decoder.fit(fmri_niimgs, y)
for i, (param, cv_score) in enumerate(
Beispiel #6
0
func_filenames = data_files.func[0]
X_train = index_img(func_filenames, condition_mask_train)
X_test = index_img(func_filenames, condition_mask_test)
y_train = target[condition_mask_train]
y_test = target[condition_mask_test]

######################################################################
# Fit and predict with the decoder
# ---------------------------------
#
# Note that for this classification task both classes contain the same number
# of samples (the problem is balanced). Then, we can use accuracy to measure
# the performance of the decoder. This is done by defining accuracy as the
# `scoring`.
from nilearn.decoding import Decoder
decoder = Decoder(estimator='svc', mask_strategy='background',
                  smoothing_fwhm=4, scoring='accuracy')

decoder.fit(X_train, y_train)
accuracy = np.mean(decoder.cv_scores_[b"house"]) * 100
print("Decoder cross-validation accuracy : %f%%" % accuracy)

# Testing on out-of-sample data
y_pred = decoder.predict(X_test)
accuracy = (y_pred == y_test).mean() * 100.
print("Decoder classification accuracy : %f%%" % accuracy)

######################################################################
# Visualization
# --------------

weight_img = decoder.coef_img_[b"face"]
###########################################################################
# We apply the same mask to the targets
conditions = conditions[condition_mask]
# Convert to numpy array
conditions = conditions.values
print(conditions.shape)

###########################################################################
# Decoding with Support Vector Machine
# ------------------------------------
#
# As a decoder, we use a Support Vector Classifier with a linear kernel. We
# first create it using by using :class:`nilearn.decoding.Decoder`.
from nilearn.decoding import Decoder
decoder = Decoder(estimator='svc', mask=mask_filename, standardize=True)

###########################################################################
# The decoder object is an object that can be fit (or trained) on data with
# labels, and then predict labels on data without.
#
# We first fit it on the data
decoder.fit(fmri_niimgs, conditions)

###########################################################################
# We can then predict the labels from the data
prediction = decoder.predict(fmri_niimgs)
print(prediction)

###########################################################################
# Note that for this classification task both classes contain the same number
#     * although it usually helps to decode better, z-maps time series don't
#       need to be rescaled to a 0 mean, variance of 1 so we use
#       standardize=False.
#
#     * we use univariate feature selection to reduce the dimension of the
#       problem keeping only 5% of voxels which are most informative.
#
#     * a cross-validation scheme, here we use LeaveOneGroupOut
#       cross-validation on the sessions which corresponds to a
#       leave-one-session-out
#
# We fit directly this pipeline on the Niimgs outputs of the GLM, with
# corresponding conditions labels and session labels (for the cross validation).

from nilearn.decoding import Decoder
from sklearn.model_selection import LeaveOneGroupOut

decoder = Decoder(estimator='svc',
                  mask=haxby_dataset.mask,
                  standardize=False,
                  screening_percentile=5,
                  cv=LeaveOneGroupOut())
decoder.fit(z_maps, conditions_label, groups=session_label)

# Return the corresponding mean prediction accuracy compared to chance

classification_accuracy = np.mean(list(decoder.cv_scores_.values()))
chance_level = 1. / len(np.unique(conditions))
print('Classification accuracy: {:.4f} / Chance level: {}'.format(
    classification_accuracy, chance_level))