Ejemplo n.º 1
0
def _viewer(brain, thresh, idx, percentile_threshold, surface, anatomical,
            **kwargs):
    if thresh == 0:
        thresh = 1e-6
    else:
        if percentile_threshold:
            thresh = str(thresh) + "%"
    if isinstance(idx, int):
        b = brain[idx].to_nifti()
    else:
        b = brain.to_nifti()
    if anatomical:
        bg_img = anatomical
    else:
        bg_img = "MNI152"
    cut_coords = kwargs.get("cut_coords", [0, 0, 0])

    if surface:
        return view_img_on_surf(b, threshold=thresh, **kwargs)
    else:
        return view_img(b,
                        bg_img=bg_img,
                        threshold=thresh,
                        cut_coords=cut_coords,
                        **kwargs)
Ejemplo n.º 2
0
dmn = '/Users/Katie/Dropbox/Katie and Angie/2011 BrainMap ICA Maps/18-network-parcellation/comp13-bin.nii.gz'
cen = '/Users/Katie/Dropbox/Katie and Angie/2011 BrainMap ICA Maps/cen_combo.nii.gz'
sal = '/Users/Katie/Dropbox/Katie and Angie/2011 BrainMap ICA Maps/18-network-parcellation/comp4-bin.nii.gz'

# In[37]:

contrast_colors = sns.blend_palette(['#ff4c80', '#00a572'],
                                    n_colors=2,
                                    as_cmap=True)
color1 = sns.light_palette("#ff467e", as_cmap=True)
color2 = sns.light_palette("#00efd8", as_cmap=True)
color3 = sns.light_palette("#cedf3f", as_cmap=True)

# In[38]:

dmn_html = plotting.view_img_on_surf(dmn, threshold=0.9, cmap=color1)
dmn_html.save_as_html('dmn_surf.html')
cen_html = plotting.view_img_on_surf(cen, threshold=0.9, cmap=color2)
cen_html.save_as_html('cen_surf.html')
sal_html = plotting.view_img_on_surf(sal, threshold=0.9, cmap=color3)
sal_html.save_as_html('sal_surf.html')

# In[40]:

#making figure 2 heatmaps
fig2_df = df.drop(columns=[
    'Cohort', 'GPA.PreSem', 'Phy48Grade', 'AgeOnScanDate', 'Pre_SNA', 'Pre_BK',
    'Pre_ScA', 'Pre_MA', 'Pre_STEMcomposite', 'Post_SNA', 'Post_BK',
    'Post_ScA', 'Post_MA', 'Post_STEMcomposite', 'Post_Clustering',
    'Pre_Clustering', 'Post_Efficiency', 'Pre_Efficiency', 'Pre_BK_Sq',
    'Post_BK_Sq', 'Pre_SNA_Sq', 'Pre_ScA_Sq', 'Post_ScA_Sq', 'Pre_MA_Sq',
Ejemplo n.º 3
0
svc_linear = SVC(kernel='linear')
svc_linear.fit(beta_masked, AMBIG)
prediction = svc_linear.predict(beta_masked)

### Unmasking & visualization
coef_img = masker.inverse_transform(svc_linear.coef_)
threshold = 1e-3
plotting.plot_stat_map(coef_img,
                       bg_img=T1,
                       threshold=threshold,
                       display_mode='z')
# Orth view
view = plotting.view_img(coef_img, bg_img=T1, threshold=threshold)
view.open_in_browser()
# Surf view
view = plotting.view_img_on_surf(coef_img, threshold=threshold)
view.open_in_browser()

### Cross validation
svc_crossval = SVC(kernel='linear')
cv = LeaveOneGroupOut()
cv_scores = cross_val_score(svc_crossval,
                            beta_masked,
                            AMBIG,
                            cv=cv,
                            groups=GROUP,
                            n_jobs=-1,
                            verbose=10)
print(cv_scores)
classification_accuracy = np.mean(cv_scores)
print(classification_accuracy)

######################################################################
plotting.plot_stat_map(
    img, cut_coords=[-18, 64], display_mode='z', threshold=3.)


######################################################################
plotting.plot_glass_brain(img, plot_abs=False, threshold=3.)


######################################################################
# Visualize projections on the cortical surface
# ---------------------------------------------

plotting.view_img_on_surf(img, threshold='95%', surf_mesh='fsaverage')


######################################################################
# Atlases
# =======

destrieux = datasets.fetch_atlas_destrieux_2009()
plotting.view_img(destrieux['maps'],
                  resampling_interpolation='nearest',
                  cmap='gist_ncar', symmetric_cmap=False, colorbar=False)
plotting.plot_roi(destrieux['maps'])


######################################################################
# Harvard-Oxford probabilistic (4D) atlas
Ejemplo n.º 5
0
plt.hist(dict(G.degree).values())
plt.ylabel('Frequency', fontsize=18)
plt.xlabel('Degree', fontsize=18)

What if we wanted to map the degree of each node back onto the brain?

This would allow us to visualize which of the parcels had more direct pairwise connections.

To do this, we will simply scale our expanded binary mask object by the node degree. We will then combine the masks by concatenating through recasting as a brain_data object and then summing across all ROIs.

degree = pd.Series(dict(G.degree()))
brain_degree = roi_to_brain(degree, mask_x)
brain_degree.plot()

view_img_on_surf(brain_degree.to_nifti())

This analysis shows that the insula is one of the regions that appears to have the highest degree in this analysis. This is a fairly classic [finding](https://link.springer.com/article/10.1007/s00429-010-0262-0) with the insula frequently found to be highly connected with other regions. Of course, we are only looking at one subject in a very short task (and selecting a completely arbitrary cutoff). We would need to show this survives correction after performing a group analysis.

## Exercises

Let's practice what we learned through a few different exercises.

### 1) Let's calculate seed-based functional connectivity using a different ROI - the right motor cortex

- Calculate functional connectivity using roi=48 with the whole brain.



### 2) Calculate a group level analysis for this connectivity analysis
- this will require running this analysis over all subjects
Ejemplo n.º 6
0
                          bg_map=fsaverage.sulc_right)

# In a Jupyter notebook, if ``view`` is the output of a cell, it will
# be displayed below the cell
view

##############################################################################

# uncomment this to open the plot in a web browser:
# view.open_in_browser()

##############################################################################
# We don't need to do the projection ourselves, we can use
# :func:`~nilearn.plotting.view_img_on_surf`:

view = plotting.view_img_on_surf(stat_img, threshold='90%')
# view.open_in_browser()

view

##############################################################################
# Impact of plot parameters on visualization
# ------------------------------------------
#
# You can specify arguments to be passed on to the function
# :func:`nilearn.surface.vol_to_surf` using `vol_to_surf_kwargs`. This allows
# fine-grained control of how the input 3D image is resampled and interpolated -
# for example if you are viewing a volumetric atlas, you would want to avoid
# averaging the labels between neighboring regions. Using nearest-neighbor
# interpolation with zero radius will achieve this.
##############################################################################
# 3D visualization in a web browser
# ---------------------------------
# An alternative to :func:`nilearn.plotting.plot_surf_stat_map` is to use
# :func:`nilearn.plotting.view_surf` or
# :func:`nilearn.plotting.view_img_on_surf` that give more interactive
# visualizations in a web browser. See :ref:`interactive-surface-plotting` for
# more details.

view = plotting.view_surf(fsaverage.infl_right,
                          texture,
                          threshold='90%',
                          bg_map=fsaverage.sulc_right)
# uncomment this to open the plot in a web browser:
# view.open_in_browser()

##############################################################################
# In a Jupyter notebook, if ``view`` is the output of a cell, it will
# be displayed below the cell

view

##############################################################################
# We don't need to do the projection ourselves, we can use view_img_on_surf:

view = plotting.view_img_on_surf(stat_img, threshold='90%')
# view.open_in_browser()

view
Ejemplo n.º 8
0
# regions that are most strongly associated with the query in the neuroimaging
# literature. It is a `Nifti1Image` which can be saved, displayed, etc.

from nilearn import plotting

print(type(response["z_map"]))
plotting.plot_stat_map(response["z_map"],
                       display_mode="z",
                       title="aphasia",
                       threshold=3.1)

######################################################################
#

# Display the map on the cortical surface:
view = plotting.view_img_on_surf(response["z_map"], threshold=3.1)
view.open_in_browser()
# (in a Jupyter notebook, we can display an inline view):
view

######################################################################
#

# Or open interactive viewer:
view = plotting.view_img(response["z_map"], threshold=3.1)
view.open_in_browser()
# (in a Jupyter notebook, we can display an inline view):
view

######################################################################
# "similar_words" is a DataFrame containing terms that are related to the
Ejemplo n.º 9
0
root_dir = '/raid/data'
task = 'dj'
base_dir = root_dir + '/shared/KK_KR_JLBS/Longitudinal_Data/W1_W2/MRI/FMRI/group_analyses/' + task
files = [
    files for files in os.listdir(base_dir)
    if ('masked.nii.gz' in files) and (not '._' in files)
]
paths = [base_dir + '/' + s for s in files]

for i in range(len(paths)):
    img = load_img(paths[i])
    data = img.get_fdata()
    min_thr = min(abs(data[abs(data) > 0]))
    title = files[i].replace('_masked.nii.gz', '').replace('results_', '')
    view = plotting.view_img_on_surf(img, threshold=min_thr, title=title)
    out_file = base_dir + '/results_images/' + files[i].replace(
        '.nii.gz', '.html')
    view.save_as_html(out_file)

for i in range(len(paths)):
    img = load_img(paths[i])
    data = img.get_fdata()
    min_thr = min(abs(data[abs(data) > 0]))
    title = files[i].replace('_masked.nii.gz', '').replace('results_', '')
    out_file = base_dir + '/results_images/' + files[i].replace(
        '.nii.gz', '.svg')
    plotting.plot_img_on_surf(img,
                              views=['lateral', 'medial'],
                              hemispheres=['left', 'right'],
                              colorbar=True,
Ejemplo n.º 10
0
    rsn = subject_img
    #convert to 3d image
    first_rsn = image.index_img(rsn, 0)
    print(first_rsn.shape)
    plotting.plot_roi(first_rsn)
    print("-" * 50)

#%%
files = random.choices(
    os.listdir('E:/Dataset/trends-assessment-prediction/fMRI_train/'), k=1)
for file in files:
    subject = os.path.join(
        'E:/Dataset/trends-assessment-prediction/fMRI_train/', file)
    subject_img = load_subject(subject, mask_img)
    print("Image shape is %s" % (str(subject_img.shape)))
    num_components = subject_img.shape[-1]
    print("Detected {num_components} spatial maps".format(
        num_components=num_components))
    rsn = subject_img
    #convert to 3d image
    first_rsn = image.index_img(rsn, 0)
    print(first_rsn.shape)
    plotting.plot_glass_brain(first_rsn, display_mode='lyrz')
    print("-" * 50)

#%%
motor_images = datasets.fetch_neurovault_motor_task()
stat_img = motor_images.images[0]
view = plotting.view_img_on_surf(first_rsn, threshold='90%')
view.open_in_browser()
view
Ejemplo n.º 11
0
# - **dist_from_hyperplane_all**: how far the prediction is from the classifier hyperplane through feature space, > 0 indicates left, while < 0 indicates right.
# - **intercept**: scalar value which indicates how much to add to the prediction to get the correct class label.
# - **weight_map**: multivariate brain model
# - **mcr_all**: overall model accuracy in classifying training data

# In[12]:

print(svm_stats.keys())

# You can see that that the model can perfectly discriminate between left and right using the training data. This is great, but we definitely shouldn't get our hopes up as this model is completely being overfit to the training data. To get an unbiased estimate of the accuracy we will need to test the model on independent data.
#
# We can also examine the model weights more thoroughly by plotting it.  This shows that we see a very nice expected motor cortex representation, but notice that there are many other regions also contributing to the prediction.

# In[6]:

view_img_on_surf(svm_stats['weight_map'].to_nifti())

# ### Feature Selection
#
# Feature selection describes the process of deciding which features to include when training the model.  Here it is simply, which voxels should we use to train the model?
#
# There are several ways to perform feature selection.  Searchlights are a popular approach.  I personally have a preference for using parcellation schemes.
#  - Parcellations are orders of magnitude computationally less expensive than searchlights.
#  - Parcellations are easier to correct for multiple comparisons (50 vs 300k)
#  - Parcellations can include regions distributed throughout the brain (searchlights are only local)
#  - Parcellations can be integrated into a meta-model.
#
# Here we download a single 50 parcel map from a forthcoming paper on conducting automated parcellations using neurosynth.
#
#     Yarkoni, T., de la Vega, A., & Chang, L.J. (In Prep).  Fully automated meta-analytic clustering and decoding of human brain activity
#
# :func:`nilearn.plotting.view_img_on_surf` that give more interactive
# visualizations in a web browser. See :ref:`interactive-surface-plotting` for
# more details.

view = plotting.view_surf(fsaverage.infl_right,
                          texture,
                          threshold='90%',
                          bg_map=fsaverage.sulc_right)
# uncomment this to open the plot in a web browser:
view.open_in_browser()

##############################################################################
# In a Jupyter notebook, if ``view`` is the output of a cell, it will
# be displayed below the cell

#view

##############################################################################
# We don't need to do the projection ourselves, we can use view_img_on_surf:

view = plotting.view_img_on_surf(stat_img,
                                 threshold='90%',
                                 surf_mesh='fsaverage')
view.open_in_browser()

#view

from nilearn import plotting, datasets
img = datasets.fetch_localizer_button_task()['tmap']
view = plotting.view_img_on_surf(img, threshold='90%', surf_mesh='fsaverage')
view.open_in_browser()
plotting.show()


##############################################################################
# 3D visualization in a web browser
# ---------------------------------
# An alternative to :func:`nilearn.plotting.plot_surf_stat_map` is to use
# :func:`nilearn.plotting.view_surf` or
# :func:`nilearn.plotting.view_img_on_surf` that give more interactive
# visualizations in a web browser. See :ref:`interactive-surface-plotting` for
# more details.

view = plotting.view_surf(fsaverage.infl_right, texture, threshold='90%',
                          bg_map=fsaverage.sulc_right)
# uncomment this to open the plot in a web browser:
# view.open_in_browser()

##############################################################################
# In a Jupyter notebook, if ``view`` is the output of a cell, it will
# be displayed below the cell

view

##############################################################################
# We don't need to do the projection ourselves, we can use view_img_on_surf:

view = plotting.view_img_on_surf(stat_img, threshold='90%')
# view.open_in_browser()

view
Ejemplo n.º 14
0
the results of this analysis are stored in a dictionary.

- **Y**: training labels
- **yfit_all**: predicted labels
- **dist_from_hyperplane_all**: how far the prediction is from the classifier hyperplane through feature space, > 0 indicates left, while < 0 indicates right.
- **intercept**: scalar value which indicates how much to add to the prediction to get the correct class label.
- **weight_map**: multivariate brain model
- **mcr_all**: overall model accuracy in classifying training data

print(svm_stats.keys())

You can see that that the model can perfectly discriminate between left and right using the training data. This is great, but we definitely shouldn't get our hopes up as this model is completely being overfit to the training data. To get an unbiased estimate of the accuracy we will need to test the model on independent data.

We can also examine the model weights more thoroughly by plotting it.  This shows that we see a very nice expected motor cortex representation, but notice that there are many other regions also contributing to the prediction.

view_img_on_surf(svm_stats['weight_map'].to_nifti())

### Feature Selection

Feature selection describes the process of deciding which features to include when training the model.  Here it is simply, which voxels should we use to train the model?

There are several ways to perform feature selection.  Searchlights are a popular approach.  I personally have a preference for using parcellation schemes.
 - Parcellations are orders of magnitude computationally less expensive than searchlights.
 - Parcellations are easier to correct for multiple comparisons (50 vs 300k)
 - Parcellations can include regions distributed throughout the brain (searchlights are only local)
 - Parcellations can be integrated into a meta-model.
 
Here we download a single 50 parcel map from a forthcoming paper on conducting automated parcellations using neurosynth.

    Yarkoni, T., de la Vega, A., & Chang, L.J. (In Prep).  Fully automated meta-analytic clustering and decoding of human brain activity
def main():

    parser = argparse.ArgumentParser(
        description='Encoding model analysis for Algonauts 2021')
    parser.add_argument('-rd',
                        '--result_dir',
                        help='saves predicted fMRI activity',
                        default='./results',
                        type=str)
    parser.add_argument('-ad',
                        '--activation_dir',
                        help='directory containing DNN activations',
                        default='./alexnet/',
                        type=str)
    parser.add_argument(
        '-model',
        '--model',
        help='model name under which predicted fMRI activity will be saved',
        default='alexnet_devkit',
        type=str)
    parser.add_argument(
        '-l',
        '--layer',
        help=
        'layer from which activations will be used to train and predict fMRI activity',
        default='layer_5',
        type=str)
    parser.add_argument(
        '-sub',
        '--sub',
        help='subject number from which real fMRI data will be used',
        default='sub04',
        type=str)
    parser.add_argument(
        '-r',
        '--roi',
        help='brain region, from which real fMRI data will be used',
        default='EBA',
        type=str)
    parser.add_argument(
        '-m',
        '--mode',
        help=
        'test or val, val returns mean correlation by using 10% of training data for validation',
        default='val',
        type=str)
    parser.add_argument('-fd',
                        '--fmri_dir',
                        help='directory containing fMRI activity',
                        default='./participants_data_v2021',
                        type=str)
    parser.add_argument(
        '-v',
        '--visualize',
        help='visualize whole brain results in MNI space or not',
        default=True,
        type=bool)
    parser.add_argument(
        '-b',
        '--batch_size',
        help=
        ' number of voxel to fit at one time in case of memory constraints',
        default=1000,
        type=int)
    args = vars(parser.parse_args())

    mode = args['mode']  # test or val
    sub = args['sub']
    ROI = args['roi']
    model = args['model']
    layer = args['layer']
    visualize_results = args['visualize']
    batch_size = args[
        'batch_size']  # number of voxel to fit at one time in case of memory constraints

    if torch.cuda.is_available():
        use_gpu = True
    else:
        use_gpu = False

    if ROI == "WB":
        track = "full_track"
    else:
        track = "mini_track"

    activation_dir = os.path.join(args['activation_dir'], 'pca_100')
    fmri_dir = os.path.join(args['fmri_dir'], track)

    sub_fmri_dir = os.path.join(fmri_dir, sub)
    results_dir = os.path.join(args['result_dir'], args['model'],
                               args['layer'], track, sub)
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)

    print("ROi is : ", ROI)

    train_activations, test_activations = get_activations(
        activation_dir, layer)
    if track == "full_track":
        fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)
    else:
        fmri_train_all = get_fmri(sub_fmri_dir, ROI)
    num_voxels = fmri_train_all.shape[1]
    if mode == 'val':
        # Here as an example we use first 900 videos as training and rest of the videos as validation
        test_activations = train_activations[900:, :]
        train_activations = train_activations[:900, :]
        fmri_train = fmri_train_all[:900, :]
        fmri_test = fmri_train_all[900:, :]
        pred_fmri = np.zeros_like(fmri_test)
        pred_fmri_save_path = os.path.join(results_dir, ROI + '_val.npy')
    else:
        fmri_train = fmri_train_all
        num_test_videos = 102
        pred_fmri = np.zeros((num_test_videos, num_voxels))
        pred_fmri_save_path = os.path.join(results_dir, ROI + '_test.npy')

    print("number of voxels is ", num_voxels)
    iter = 0
    while iter < num_voxels - batch_size:
        pred_fmri[:, iter:iter + batch_size] = predict_fmri_fast(
            train_activations,
            test_activations,
            fmri_train[:, iter:iter + batch_size],
            use_gpu=use_gpu)
        iter = iter + batch_size
        print((100 * iter) // num_voxels, " percent complete")
    pred_fmri[:, iter:] = predict_fmri_fast(train_activations,
                                            test_activations,
                                            fmri_train[:,
                                                       iter:iter + batch_size],
                                            use_gpu=use_gpu)

    if mode == 'val':
        score = vectorized_correlation(fmri_test, pred_fmri)
        print(
            "----------------------------------------------------------------------------"
        )
        print("Mean correlation for ROI : ", ROI, "in ", sub, " is :",
              round(score.mean(), 3))

        # result visualization for whole brain (full_track)
        if track == "full_track" and visualize_results:
            visual_mask_3D = np.zeros((78, 93, 71))
            visual_mask_3D[voxel_mask == 1] = score
            brain_mask = './example.nii'
            nii_save_path = os.path.join(results_dir, ROI + '_val.nii')
            saveasnii(brain_mask, nii_save_path, visual_mask_3D)
            view = plotting.view_img_on_surf(nii_save_path, threshold=None, surf_mesh='fsaverage',\
                                            title = 'Correlation for sub' + sub, colorbar=False)
            view_save_path = os.path.join(results_dir, ROI + '_val.html')
            view.save_as_html(view_save_path)
            print("Results saved in this directory: ", results_dir)
            view.open_in_browser()

    np.save(pred_fmri_save_path, pred_fmri)

    print(
        "----------------------------------------------------------------------------"
    )
    print("ROI done : ", ROI)