Exemplo n.º 1
0
def _get_atlas():
    """Fetch small atlas for testing purposes"""
    # save to path in test directory to only download atlas once
    test_path = os.path.dirname(__file__)
    data_dir = os.path.join(test_path, 'data-dir')
    os.makedirs(data_dir, exist_ok=True)
    return fetch_atlas_aal(data_dir=data_dir)
Exemplo n.º 2
0
    def map_brain(self, average_proportions, selection_method):
                
        dataset = datasets.fetch_atlas_aal()
        aal_img = nib.load(dataset["maps"])
        aal_data = aal_img.get_data()
        
        new_data = np.zeros(aal_img.shape, dtype='>i2')
        for x in range(len(aal_data)):
            print(x)
            for y in range(len(aal_data[x])):
                for z in range(len(aal_data[x][y])):

                    if str(aal_data[x][y][z]) in dataset.indices:
        
                        # get the index in indices and look which area it is in labels
                        roi = dataset.labels[dataset.indices.index(str(aal_data[x][y][z]))]
                        if roi in average_proportions.keys():
                            new_value = average_proportions[roi] * 100
                        else:
                            new_value = 0
                        new_data[x][y][z] = new_value
                
            
        aal_new = nib.Nifti1Image(new_data, aal_img.affine)
        hot = cm.get_cmap('hot_r')
        vmin = 0
        vmax = 55
        
        plotting.plot_roi(aal_new, cmap=hot, colorbar=True, vmin = vmin, vmax = vmax, output_file = self.save_dir + self.analysis_method + "_brain_map_" + selection_method + "_" + self.paradigm +".png")
        plotting.show()
Exemplo n.º 3
0
    def _load_4D_func(self, args):

        if args['atlas'] == 'aal':

            aal = fetch_atlas_aal()
            aal = {'file': aal.maps}
            atlas_filename = aal['file']
        else:
            harvard_oxford = fetch_atlas_harvard_oxford(
                'cort-maxprob-thr25-2mm')
            harvard_oxford = {'file': harvard_oxford.maps}
            atlas_filename = harvard_oxford['file']

        data = Parallel(n_jobs=args['n_cores'])(delayed(
            _parallelize_4D_func_loading)(f, atlas_filename, args['method'])
                                                for f in args['paths'])

        # Load last func for some meta-data
        data = np.concatenate(data, axis=0)

        func = nib.load(args['paths'][0])
        self.voxel_idx.append(np.arange(data.shape[1]))
        self.affine.append(func.affine)
        self.data_shape.append(func.shape)
        feature_ids = np.ones(data.shape[1], dtype=np.uint32) * len(self.X)
        self.featureset_id.append(feature_ids)
        self.X.append(data)
Exemplo n.º 4
0
def _fetch_aal():
    """ The AAL atlas does not contain a background label.
    To make the API consistent we fix it here.
    """
    aal = datasets.fetch_atlas_aal()
    aal['labels'] = ['Background'] + aal['labels']
    return aal
Exemplo n.º 5
0
    def apply_mask(self, atlas="AAL"):

        if atlas == "AAL":
            # load atlas
            atlas_filename = datasets.fetch_atlas_aal(version="SPM12",
                                                      verbose=0).maps
        elif atlas == "multiscale":
            raise NotImplementedError()
        else:
            raise ValueError("Altas should be 'AAL' or 'multiscale'")

        # set mask
        masker = NiftiLabelsMasker(labels_img=atlas_filename,
                                   standardize=True,
                                   detrend=True,
                                   low_pass=0.08,
                                   high_pass=0.01,
                                   t_r=3.7,
                                   memory="nilearn_cache",
                                   verbose=0)

        # apply mask to data
        confounds = high_variance_confounds(self.fmri_filename,
                                            n_confounds=1,
                                            detrend=True)
        ts_hvar = masker.fit_transform(self.fmri_filename, confounds=confounds)

        return ts_hvar
Exemplo n.º 6
0
    def map_brain(self, average_proportions, selection_method, paradigm):

        self.paradigm = paradigm

        dataset = datasets.fetch_atlas_aal()
        aal_img = nib.load(dataset["maps"])
        aal_data = aal_img.get_data()
        roi_ids = []

        new_data_rank = np.zeros(aal_img.shape, dtype='>i2')
        new_data_proportion = np.zeros(aal_img.shape, dtype='>i2')
        for x in range(len(aal_data)):
            print(x)
            for y in range(len(aal_data[x])):
                for z in range(len(aal_data[x][y])):

                    if str(aal_data[x][y][z]) in dataset.indices:

                        # get the index in indices and look which area it is in labels
                        roi = dataset.labels[dataset.indices.index(
                            str(aal_data[x][y][z]))]
                        if roi in average_proportions.keys():

                            if average_proportions[roi]["accuracy"][
                                    "mean"] > 0.52:
                                if not roi in roi_ids:
                                    roi_ids.append(roi)
                                print(roi)
                                new_rank_value = average_proportions[roi][
                                    "rank"]["mean"]
                                print(new_rank_value)
                                new_proportion_value = average_proportions[
                                    roi]["percentage_of_max"]["mean"]
                            else:
                                new_rank_value = 0
                                new_proportion_value = 0
                        else:
                            new_rank_value = 0
                            new_proportion_value = 0
                        new_data_rank[x][y][z] = new_rank_value
                        new_data_proportion[x][y][z] = new_proportion_value

        print(roi_ids)
        for roi in roi_ids:
            roi_id = dataset.indices[dataset.labels.index(roi)]
            roi_map = image.math_img('img == %s' % roi_id, img=dataset.maps)
            plotting.plot_roi(roi_map, title=roi)

        aal_new = nib.Nifti1Image(new_data_rank, aal_img.affine)
        hot = cm.get_cmap('hot')
        plotting.plot_roi(aal_new,
                          cmap=hot,
                          colorbar=True,
                          output_file=self.save_dir + self.analysis_method +
                          "_brain_map_rank_" + selection_method + "_" +
                          self.paradigm + "_>.52.png")
        plotting.show()
Exemplo n.º 7
0
def coordinate_label(mni_coord, atlas='aal', thresh=None, ret_proba=False):

    if atlas == 'aal':
        atl = datasets.fetch_atlas_aal()
        atl.prob = False
    elif atlas == 'harvard_oxford':
        atl = datasets.fetch_atlas_harvard_oxford('cort-prob-2mm')
        atl.prob = True

    elif atlas == 'destrieux':
        atl = datasets.fetch_atlas_destrieux_2009()
        atl.indices = atl.labels['index']
        atl.labels = atl.labels['name']
        atl.prob = False

    atl_map = load_img(atl.maps)
    atl_aff = atl_map.affine

    if atl.prob == True:
        atl_labels = atl.labels
    if atl.prob == False:
        atl_labels = atl.labels
        atl_indices = atl.indices

    labels_out = list()

    for coord in mni_coord:

        mat_coord = np.asarray(resampling.coord_transform(
            coord[0], coord[1], coord[2], np.linalg.inv(atl_aff)),
                               dtype=int)

        if atl.prob == True and ret_proba == True:

            lab_out = get_prob_atlas_label(atl_map,
                                           atl_labels,
                                           mat_coord,
                                           thresh=thresh)

        elif atl.prob == True and ret_proba == False:

            lab_out, _ = get_prob_atlas_label(atl_map,
                                              atl_labels,
                                              mat_coord,
                                              thresh=thresh)

        elif atl.prob == False:

            lab_out = get_atlas_label(atl_map, atl_labels, atl_indices,
                                      mat_coord)

        labels_out.append(lab_out)

    return labels_out
Exemplo n.º 8
0
def make_masker(scheme):
    '''
    Parameters
    ----------
    scheme : String
        The type of parcellation wanted.

    Returns
    -------
    masker: nilearn.input_data.NiftiLabelsMasker
        Masker of the chosen scheme.
    labels: list
        Labels of all the regions in parcellation.
    '''
    if scheme.lower() == "harvox":  # 48 regions
        dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
        atlas_filename = dataset.maps
        labels = dataset.labels[1:]  # trim off "background" label
        masker = NiftiLabelsMasker(labels_img=atlas_filename,
                                   standardize=True,
                                   high_variance_confounds=True,
                                   verbose=1)
    elif scheme.lower() == "yeo":  # 17 regions
        dataset = datasets.fetch_atlas_yeo_2011()
        masker = NiftiLabelsMasker(labels_img=dataset['thick_17'],
                                   standardize=True,
                                   high_variance_confounds=True,
                                   verbose=1)
        labels = [
            "Visual A", "Visual B", "Somatomotor A", "Somatomotor B",
            "Dorsal Attention A", "Dorsal Attention B",
            "Salience/Ventral Attention A", "Salience/Ventral Attention B",
            "Limbic A", "Limbic B", "Control C", "Control A", "Control B",
            "Temporal Parietal", "Default C", "Default A", "Default B"
        ]  # list from valerie-jzr
    elif scheme.lower() == "aal":  # 116 regions
        dataset = datasets.fetch_atlas_aal(version='SPM12')
        labels = dataset['labels']
        masker = NiftiLabelsMasker(labels_img=dataset['maps'],
                                   standardize=True,
                                   high_variance_confounds=True,
                                   verbose=1)
    elif scheme.lower() == "schaefer":
        dataset = datasets.fetch_atlas_schaefer_2018(n_rois=100,
                                                     yeo_networks=17)
        labels = dataset['labels']
        masker = NiftiLabelsMasker(labels_img=dataset['maps'],
                                   standardize=True,
                                   high_variance_confounds=True,
                                   verbose=1)
    return masker, labels
Exemplo n.º 9
0
def get_atlas(name):
    if name == "destrieux_2009":
        atlas = datasets.fetch_atlas_destrieux_2009()
        atlas_filename = atlas['maps']
    elif name == "harvard_oxford":
        atlas = datasets.fetch_atlas_harvard_oxford("cort-maxprob-thr25-2mm")
        atlas_filename = atlas['maps']
    elif name == "aal":
        atlas = datasets.fetch_atlas_aal()
        atlas_filename = atlas['maps']
    elif name == "smith_2009":
        atlas = datasets.fetch_atlas_smith_2009()
        atlas_filename = atlas['rsn70']
    else:
        raise ValueError('Atlas name unkown')
    return atlas_filename
Exemplo n.º 10
0
def get_atlas(atlas_name, verbose=False):
    """
    Get atlas from nilearn.
    Atlases are currently only HarvardOxford and AAL
    """

    if atlas_name == "HarvardOxford":
        dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
    elif atlas_name == "AAL":
        dataset = datasets.fetch_atlas_aal()

    atlas_filename = dataset.maps
    labels = dataset.labels

    if verbose:
        print('Atlas ROIs are located in nifti image (4D) at: %s' %
              atlas_filename)  # 4D data

    return ((atlas_filename, labels))
Exemplo n.º 11
0
def fetch_aal_vascular_atlas(target_affine=np.diag((5, 5, 5))):
    """ Fetch the AAL brain atlas given its resolution.

    Parameters
    ----------
    target_affine : np.array, (default=np.diag((5, 5, 5))), affine matrix for
        the produced Nifti images

    Return
    ------
    mask_full_brain : Nifti Image, full mask brain
    atlas_rois : Nifti Image, ROIs atlas
    """
    aal_dataset = datasets.fetch_atlas_aal()
    atlas_rois_fname = aal_dataset.maps
    atlas_to_return = image.load_img(atlas_rois_fname)

    atlas_to_return = image.resample_img(atlas_to_return,
                                         target_affine,
                                         interpolation='nearest')

    brain_mask = image_nilearn.binarize_img(atlas_to_return, threshold=0)

    return brain_mask, atlas_to_return
Exemplo n.º 12
0
def find_atlas(volume):
    #atlas = fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None, resume=True, verbose=1)
    atlas = fetch_atlas_aal(version='SPM12',
                            data_dir=None,
                            url=None,
                            resume=True,
                            verbose=1)
    dictionary = dict(zip([int(i) for i in atlas.indices], atlas.labels))
    A = load_img(atlas.maps).affine
    trA = np.linalg.inv(A)
    atlas_map = load_img(atlas.maps).get_fdata()
    affine = [[-3, 0, 0, 78], [0, 3, 0, -112], [0, 0, 6, -50], [0, 0, 0, 1]]

    def cast(x, y, z, affine, trA):
        coord = coord_transform(x, y, z, affine)
        return np.dot(trA, [coord[0], coord[1], coord[2], 1])

    def find(x, y, z, affine, trA):
        vox_coord = cast(x, y, z, affine, trA)
        label = atlas_map[int(vox_coord[0]),
                          int(vox_coord[1]),
                          int(vox_coord[2])]
        if int(label) == 0: return
        return dictionary[int(label)]

    index = np.where(volume > 0.8)
    for i in range(np.sum(volume > 0.8)):
        x, y, z = index[0][i], index[1][i], index[2][i]
        print(x, y, z, coord_transform(x, y, z, affine),
              find(x, y, z, affine, trA), volume[x, y, z])

    index = np.where(volume < -0.8)
    for i in range(np.sum(volume < -0.8)):
        x, y, z = index[0][i], index[1][i], index[2][i]
        print(x, y, z, coord_transform(x, y, z, affine),
              find(x, y, z, affine, trA), volume[x, y, z])
Exemplo n.º 13
0
#######
X_ss_beh =  StandardScaler().fit_transform(X_ss_beh)

#########
# PCA the beh
#########
pca_beh = PCA(n_components=10).fit(X_ss_beh)
X_ss_beh = pca_beh.transform(X_ss_beh)


###########################################
##### EXTRACTING STRUCTURAL BRAIN DATA ####
###########################################

tmp_nii = nib.load(df_RLS.data_path[0])
atlas = ds.fetch_atlas_aal()
ratlas_nii = resample_img(
  atlas.maps, target_affine=tmp_nii.affine, interpolation='nearest')
# ratlas_nii.to_filename('debug_ratlas.nii.gz')

# extracting data MRI
FS = []
for i_nii, nii_path in enumerate(df_RLS.data_path.values):
  print(nii_path)
  nii = nib.load(nii_path)
  cur_ratlas_nii = resample_img(
    atlas.maps, target_affine=nii.affine, interpolation='nearest')
  nii_fake4D = nib.Nifti1Image(
    nii.get_data()[:, :, :, None], affine=nii.affine)
  masker = NiftiLabelsMasker(labels_img=ratlas_nii)
  masker.fit()
phenotypic = data_store[name].phenotypic
motion_confounds = data_store[name].motion_param
connectome_regress_confounds = None

from utils import data_info

shape, affine, _ = data_info(func_imgs[0])

###########################################################################
# Predefined Atlases
# ------------------
# Fetch the atlas
from nilearn import datasets as nidatasets

# By default we have atlas of version='SPM12'
aal = nidatasets.fetch_atlas_aal()
atlas_img = aal.maps

# Define atlases for LearnBrainRegions object as dict()
atlases = dict()
atlases['aal_spm12'] = atlas_img
###########################################################################
# Masker
# ------
# Masking the data

from nilearn import datasets

# Fetch grey matter mask from nilearn shipped with ICBM templates
gm_mask = datasets.fetch_icbm152_brain_gm_mask(threshold=0.2)
Exemplo n.º 15
0
def get_labelled_atlas(query, data_dir=None, return_labels=True):
    """Parses input query to determine which atlas to fetch and what version
    of the atlas to use (if applicable).

    Parameters
    ----------
    query : str
        Input string in the following format:
        nilearn:{atlas_name}:{atlas_parameters}. The following can be for
        `atlas_name`: 'destrieux', 'yeo', 'aal', 'talairach', and 'schaefer'.
        `atlas_parameters` is not available for the `destrieux` atlas.
    data_dir : str, optional
        Directory in which to save atlas data. By default None, which creates
        a ~/nilearn_data/ directory as per nilearn.
    return_labels : bool, optional
        Whether to return atlas labels. Default is True. Not available for the
        'basc' atlas.

    Returns
    -------
    str, list or None
        The atlas image and the accompanying labels (if provided)

    Raises
    ------
    ValueError
        Raised when the query does is not formatted correctly or if the no
        match found.
    """

    # extract parameters
    params = query.split(':')
    if len(params) == 3:
        _, atlas_name, sub_param = params
    elif len(params) == 2:
        _, atlas_name = params
        sub_param = None
    else:
        raise ValueError('Incorrect atlas query string provided')

    # get atlas
    if atlas_name == 'destrieux':
        atlas = fetch_atlas_destrieux_2009(lateralized=True, data_dir=data_dir)
        img = atlas['maps']
        labels = atlas['labels']
    elif atlas_name == 'yeo':
        atlas = fetch_atlas_yeo_2011(data_dir=data_dir)
        img = atlas[sub_param]
        if '17' in sub_param:
            labels = pd.read_csv(atlas['colors_17'],
                                 sep=r'\s+')['NONE'].tolist()
    elif atlas_name == 'aal':
        version = 'SPM12' if sub_param is None else sub_param
        atlas = fetch_atlas_aal(version=version, data_dir=data_dir)
        img = atlas['maps']
        labels = atlas['labels']
    elif atlas_name == 'basc':

        version, scale = sub_param.split('-')
        atlas = fetch_atlas_basc_multiscale_2015(version=version,
                                                 data_dir=data_dir)
        img = atlas['scale{}'.format(scale.zfill(3))]
        labels = None
    elif atlas_name == 'talairach':
        atlas = fetch_atlas_talairach(level_name=sub_param, data_dir=data_dir)
        img = atlas['maps']
        labels = atlas['labels']
    elif atlas_name == 'schaefer':
        n_rois, networks, resolution = sub_param.split('-')
        # corrected version of schaefer labels until fixed in nilearn
        correct_url = ('https://raw.githubusercontent.com/ThomasYeoLab/CBIG/'
                       'v0.14.3-Update_Yeo2011_Schaefer2018_labelname/'
                       'stable_projects/brain_parcellation/'
                       'Schaefer2018_LocalGlobal/Parcellations/MNI/')
        atlas = fetch_atlas_schaefer_2018(n_rois=int(n_rois),
                                          yeo_networks=int(networks),
                                          resolution_mm=int(resolution),
                                          data_dir=data_dir,
                                          base_url=correct_url)
        img = atlas['maps']
        labels = atlas['labels']
    else:
        raise ValueError('No atlas detected. Check query string')

    if not return_labels:
        labels = None
    else:
        labels = labels.astype(str).tolist()

    return img, labels
Exemplo n.º 16
0
def get_atlas_rois(atlas, roi_idx, hemisphere, res=None, path=None):
    """
    Extract ROIs from a given atlas.

    Parameters
    ----------
    atlas : str
        Atlas dataset to be downloaded through nilearn's dataset_fetch_atlas functionality.
    roi_idx: list
        List of int of the ROI(s) you want to extract from the atlas. If not sure, use get_atlas_info.
    hemisphere: list
        List of str, that is hemispheres of the ROI(s) you want to extract. Can be ['left'], ['right'] or ['left', 'right'].
    res: str
        Specific version of atlas to be downloaded. Only necessary for Harvard-Oxford and Talairach.
        Please check nilearns respective documentation at
        https://nilearn.github.io/modules/generated/nilearn.datasets.fetch_atlas_harvard_oxford.html or
        https://nilearn.github.io/modules/generated/nilearn.datasets.fetch_atlas_talairach.html
    path: str
        Path to where the extracted ROI(s) will be saved to. If None, ROI(s) will be saved in the current
        working directory.

    Returns
    -------
    list_rois: list
        A list of the extracted ROIs.

    Examples
    --------
    >>> get_atlas_rois('aal', [1, 2, 3], ['left', 'right'], path='/home/urial/Desktop')
    list_rois
    """

    if atlas == 'aal':
        atl_ds = datasets.fetch_atlas_aal()

    elif atlas == 'harvard_oxford':
        if res is None:
            print(
                'Please provide the specific version of the Harvard-Oxford atlas you would like to use.'
            )
        else:
            atl_ds = datasets.fetch_atlas_harvard_oxford(res)

    elif atlas == 'destriuex':
        atl_ds = datasets.fetch_atlas_destrieux_2009()

    elif atlas == 'msdl':
        atl_ds = datasets.fetch_atlas_msdl()

    elif atlas == 'talairach':
        if res is None:
            print(
                'Please provide the level of the Talairach atlas you would like to use.'
            )
        else:
            atl_ds = datasets.fetch_atlas_talairach(level_name=res)

    elif atlas == 'pauli_2017':
        atl_ds = datasets.fetch_atlas_pauli_2017()

    if roi_idx is None:
        print('Please provide the indices of the ROIs you want to extract.')
    elif hemisphere is None:
        print(
            'Please provide the hemisphere(s) from which you want to extract ROIs.'
        )

    for label in roi_idx:
        for hemi in hemisphere:
            roi_ex = Node(PickAtlas(), name='roi_ex')
            roi_ex.inputs.atlas = atl_ds.maps
            roi_ex.inputs.labels = label
            roi_ex.inputs.hemi = hemi
            if path is None:
                roi_ex.inputs.output_file = '%s_%s_%s.nii.gz' % (
                    atlas, str(label), hemi)
                roi_ex.run()
                list_rois = glob('%s_*.nii.gz' % atlas)
            elif path:
                roi_ex.inputs.output_file = opj(
                    path, '%s_%s_%s.nii.gz' % (atlas, str(label), hemi))
                roi_ex.run()
                list_rois = glob(opj(path, '%s_*.nii.gz' % atlas))

    print('The following ROIs were extracted: ')
    print('\n'.join(map(str, list_rois)))

    return list_rois
Exemplo n.º 17
0
def get_atlas_info(atlas, res=None):
    """
    Gather all information from a specified atlas, including the path to the atlas maps, as well as labels
    and their indexes.

    Parameters
    ----------
    atlas : str
        Atlas dataset to be downloaded through nilearn's dataset_fetch_atlas functionality.
    res: str
        Specific version of atlas to be downloaded. Only necessary for Harvard-Oxford and Talairach.
        Please check nilearns respective documentation at
        https://nilearn.github.io/modules/generated/nilearn.datasets.fetch_atlas_harvard_oxford.html or
        https://nilearn.github.io/modules/generated/nilearn.datasets.fetch_atlas_talairach.html

    Returns
    -------
    atlas_info_df : pandas dataframe
        A pandas dataframe containing information about the ROIs and their indexes included in a given atlas.
    atl_ds.maps : str
        Path to the atlas maps.

    Examples
    --------
    >>> get_atlas_info('aal')
    atlas_info_df
    atl_ds.maps
    """

    if atlas == 'aal':
        atl_ds = datasets.fetch_atlas_aal()

    elif atlas == 'harvard_oxford':
        if res is None:
            print(
                'Please provide the specific version of the Harvard-Oxford atlas you would like to use.'
            )
        else:
            atl_ds = datasets.fetch_atlas_harvard_oxford(res)

    elif atlas == 'destriuex':
        atl_ds = datasets.fetch_atlas_destrieux_2009()

    elif atlas == 'msdl':
        atl_ds = datasets.fetch_atlas_msdl()

    elif atlas == 'talairach':
        if res is None:
            print(
                'Please provide the level of the Talairach atlas you would like to use.'
            )
        else:
            atl_ds = datasets.fetch_atlas_talairach(level_name=res)

    elif atlas == 'pauli_2017':
        atl_ds = datasets.fetch_atlas_pauli_2017()

    index = []
    labels = []

    for ind, label in enumerate(atl_ds.labels):
        index.append(ind)
        if atlas == 'destriuex':
            labels.append(label[1])
        else:
            labels.append(label)

    atlas_info_df = pd.DataFrame({'index': index, 'label': labels})

    return atlas_info_df, atl_ds.maps
Exemplo n.º 18
0
    "Caudate_L", "Temporal_Sup_R", "Temporal_Pole_Mid_L"
]

time_series_ = numpy.zeros((len(time_series), len(brain_roi_labels_)))
atlas_coords_ = []

i = 0
for x in range(0, len(brain_roi_labels)):
    if brain_roi_labels[x] in brain_roi_labels_:
        time_series_[:, i] = time_series[:, x]
        i += 1
        atlas_coords_.append(atlas_coords[x])

# Download atlas from internet
# Retrieve the atlas and the data
atlas = datasets.fetch_atlas_aal()
# Loading atlas image stored in 'maps'
atlas_filename = atlas['maps']
# Loading atlas data stored in 'labels'
labels = atlas['labels']

labels = brain_roi_labels_

############################################################################
# Build and display a correlation matrix
correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([time_series_])[0]

plt.figure(figsize=(10, 10))
# Mask out the major diagonal
numpy.fill_diagonal(correlation_matrix, 0)
Exemplo n.º 19
0
def make_correlation_matrix(path_to_fmriprep_data,
                            path_to_save_connectivity_matrices,
                            subject_name=False,
                            path_to_save_ts=False,
                            atlas='aal'):
    """
        Process the fmriprep preprocessed functional MRI time-series into 2D correlation matrix as DataFrame using Nilearn lib. 
        Takes `fmriprep/preproc` file as input, frequently with suffix "MNI152NLin2009cAsym_preproc.nii.gz".
        Saves in dedicated folder `path_to_save_connectivity_matrices`.
        Atlas: 'aal' or 'cc200'
    """
    import os

    import pandas as pd
    import numpy as np

    import nilearn
    from nilearn import datasets
    from nilearn.image import concat_imgs
    from nilearn.input_data import NiftiLabelsMasker
    from nilearn.image import high_variance_confounds
    from nilearn.connectome import ConnectivityMeasure

    tr = tr_extractor(path_to_fmriprep_data)
    if subject_name == False:
        subject_name = path_to_fmriprep_data.split('/')[-1][4:11]

    if atlas == 'aal':
        dataset = datasets.fetch_atlas_aal(version='SPM12',
                                           data_dir='./datadir/',
                                           url=None,
                                           resume=True,
                                           verbose=0)
        atlas_filename = dataset.maps
        labels = dataset.labels
    elif atlas == 'cc200':
        dataset = datasets.fetch_atlas_craddock_2012(data_dir='./datadir/',
                                                     url=None,
                                                     resume=True,
                                                     verbose=0)
        atlas_filename = './datadir/craddock_2012/cc200_roi_atlas.nii.gz'
        labels = list(
            pd.read_csv(
                '../data_preprocessing/datadir/craddock_2012/CC200_ROI_labels.csv'
            )['ROI number'])
    else:
        print('Atlas name is not recognized.')

    correlation_measure = ConnectivityMeasure(kind='correlation')

    img = concat_imgs(path_to_fmriprep_data, auto_resample=True, verbose=0)
    atlas = nilearn.image.resample_to_img(atlas_filename,
                                          img,
                                          interpolation='nearest',
                                          copy=True,
                                          order='F',
                                          clip=False)
    # filtering
    masker = NiftiLabelsMasker(labels_img=atlas,
                               standardize=True,
                               detrend=True,
                               low_pass=0.08,
                               high_pass=0.009,
                               t_r=tr,
                               memory='nilearn_cache',
                               memory_level=1,
                               verbose=0)

    confounds = high_variance_confounds(img, 1)
    time_series = masker.fit_transform(img, confounds)

    # Saves time series, for each ROI confound
    if path_to_save_ts:
        os.makedirs(path_to_save_ts, exist_ok=True)
        np.save(path_to_save_ts + '/' + subject_name, time_series)

    correlation_matrix = correlation_measure.fit_transform([time_series])[0]
    np.fill_diagonal(correlation_matrix, 1)
    df = pd.DataFrame(correlation_matrix)

    # Saves connectivity matrix
    os.makedirs(path_to_save_connectivity_matrices, exist_ok=True)
    output_path = os.path.join(path_to_save_connectivity_matrices,
                               subject_name)
    df.to_csv(output_path + '.csv', sep=',')


#   print ('TR: ', tr, ' subject:', subject_name)
Exemplo n.º 20
0
def main(
    workdir,
    outdir,
    atlas,
    kernel,
    sparsity,
    affinity,
    approach,
    gradients,
    subcort,
    neurosynth,
    neurosynth_file,
    sleuth_file,
    nimare_dataset,
    roi_mask,
    term,
    topic,
):
    workdir = op.join(workdir, "tmp")
    if op.isdir(workdir):
        shutil.rmtree(workdir)
    os.makedirs(workdir)

    atlas_name = "atlas-{0}".format(atlas)
    kernel_name = "kernel-{0}".format(kernel)
    sparsity_name = "sparsity-{0}".format(sparsity)
    affinity_name = "affinity-{0}".format(affinity)
    approach_name = "approach-{0}".format(approach)
    gradients_name = "gradients-{0}".format(gradients)
    dset = None

    # handle neurosynth dataset, if called
    if neurosynth:
        if neurosynth_file is None:

            ns_data_dir = op.join(workdir, "neurosynth")
            dataset_file = op.join(ns_data_dir, "neurosynth_dataset.pkl.gz")
            # download neurosynth dataset if necessary
            if not op.isfile(dataset_file):
                neurosynth_download(ns_data_dir)

        else:
            dataset_file = neurosynth_file

        dset = Dataset.load(dataset_file)
        dataset_name = "dataset-neurosynth"

    # handle sleuth text file, if called
    if sleuth_file is not None:
        dset = convert_sleuth_to_dataset(sleuth_file, target="mni152_2mm")
        dataset_name = "dataset-{0}".format(op.basename(sleuth_file).split(".")[0])

    if nimare_dataset is not None:
        dset = Dataset.load(nimare_dataset)
        dataset_name = "dataset-{0}".format(op.basename(nimare_dataset).split(".")[0])

    if dset:
        # slice studies, if needed
        if roi_mask is not None:
            roi_ids = dset.get_studies_by_mask(roi_mask)
            print(
                "{}/{} studies report at least one coordinate in the "
                "ROI".format(len(roi_ids), len(dset.ids))
            )
            dset_sel = dset.slice(roi_ids)
            dset = dset_sel
            dataset_name = "dataset-neurosynth_mask-{0}".format(
                op.basename(roi_mask).split(".")[0]
            )

        if term is not None:
            labels = ["Neurosynth_TFIDF__{label}".format(label=label) for label in [term]]
            term_ids = dset.get_studies_by_label(labels=labels, label_threshold=0.1)
            print(
                "{}/{} studies report association "
                "with the term {}".format(len(term_ids), len(dset.ids), term)
            )
            dset_sel = dset.slice(term_ids)
            dset = dset_sel
            # img_inds = np.nonzero(dset.masker.mask_img.get_fdata())  # unused
            # vox_locs = np.unravel_index(img_inds, dset.masker.mask_img.shape)  # unused
            dataset_name = "dataset-neurosynth_term-{0}".format(term)

        if topic is not None:
            topics = [
                "Neurosynth_{version}__{topic}".format(version=topic[0], topic=topic)
                for topic in topic[1:]
            ]
            topics_ids = []
            for topic in topics:
                topic_ids = dset.annotations.id[np.where(dset.annotations[topic])[0]].tolist()
                topics_ids.extend(topic_ids)
                print(
                    "{}/{} studies report association "
                    "with the term {}".format(len(topic_ids), len(dset.ids), topic)
                )
            topics_ids_unique = np.unique(topics_ids)
            print("{} unique ids".format(len(topics_ids_unique)))
            dset_sel = dset.slice(topics_ids_unique)
            dset = dset_sel
            # img_inds = np.nonzero(dset.masker.mask_img.get_fdata())  # unused
            # vox_locs = np.unravel_index(img_inds, dset.masker.mask_img.shape)  # unused
            dataset_name = "dataset-neurosynth_topic-{0}".format("_".join(topic[1:]))

        if (
            neurosynth
            or (sleuth_file is not None)
            or (nimare_dataset is not None)
        ):
            # set kernel for MA smoothing
            if kernel == "peaks2maps":
                print("Running peak2maps")
                k = Peaks2MapsKernel(resample_to_mask=True)
            elif kernel == "alekernel":
                print("Running alekernel")
                k = ALEKernel(fwhm=15)

            if atlas is not None:
                if atlas == "harvard-oxford":
                    print("Parcellating using the Harvard Oxford Atlas")
                    # atlas_labels = atlas.labels[1:]  # unused
                    atlas_shape = atlas.maps.shape
                    atlas_affine = atlas.maps.affine
                    atlas_data = atlas.maps.get_fdata()
                elif atlas == "aal":
                    print("Parcellating using the AAL Atlas")
                    atlas = datasets.fetch_atlas_aal()
                    # atlas_labels = atlas.labels  # unused
                    atlas_shape = nib.load(atlas.maps).shape
                    atlas_affine = nib.load(atlas.maps).affine
                    atlas_data = nib.load(atlas.maps).get_fdata()
                elif atlas == "craddock-2012":
                    print("Parcellating using the Craddock-2012 Atlas")
                    atlas = datasets.fetch_atlas_craddock_2012()
                elif atlas == "destrieux-2009":
                    print("Parcellating using the Destrieux-2009 Atlas")
                    atlas = datasets.fetch_atlas_destrieux_2009(lateralized=True)
                    # atlas_labels = atlas.labels[3:]  # unused
                    atlas_shape = nib.load(atlas.maps).shape
                    atlas_affine = nib.load(atlas.maps).affine
                    atlas_data = nib.load(atlas.maps).get_fdata()
                elif atlas == "msdl":
                    print("Parcellating using the MSDL Atlas")
                    atlas = datasets.fetch_atlas_msdl()
                elif atlas == "surface":
                    print("Generating surface vertices")

                if atlas != "fsaverage5" and atlas != "hcp":
                    imgs = k.transform(dset, return_type="image")

                    masker = NiftiLabelsMasker(
                        labels_img=atlas.maps, standardize=True, memory="nilearn_cache"
                    )
                    time_series = masker.fit_transform(imgs)

                else:
                    # change to array for other approach
                    imgs = k.transform(dset, return_type="image")
                    print(np.shape(imgs))

                    if atlas == "fsaverage5":
                        fsaverage = fetch_surf_fsaverage(mesh="fsaverage5")
                        pial_left = fsaverage.pial_left
                        pial_right = fsaverage.pial_right
                        medial_wall_inds_left = surface.load_surf_data(
                            "./templates/lh.Medial_wall.label"
                        )
                        print(np.shape(medial_wall_inds_left))
                        medial_wall_inds_right = surface.load_surf_data(
                            "./templates/rh.Medial_wall.label"
                        )
                        print(np.shape(medial_wall_inds_right))
                        sulc_left = fsaverage.sulc_left
                        sulc_right = fsaverage.sulc_right

                    elif atlas == "hcp":
                        pial_left = "./templates/S1200.L.pial_MSMAll.32k_fs_LR.surf.gii"
                        pial_right = "./templates/S1200.R.pial_MSMAll.32k_fs_LR.surf.gii"
                        medial_wall_inds_left = np.where(
                            nib.load("./templates/hcp.tmp.lh.dscalar.nii").get_fdata()[0] == 0
                        )[0]
                        medial_wall_inds_right = np.where(
                            nib.load("./templates/hcp.tmp.rh.dscalar.nii").get_fdata()[0] == 0
                        )[0]
                        left_verts = 32492 - len(medial_wall_inds_left)
                        sulc_left = nib.load(
                            "./templates/S1200.sulc_MSMAll.32k_fs_LR.dscalar.nii"
                        ).get_fdata()[0][0:left_verts]
                        sulc_left = np.insert(
                            sulc_left,
                            np.subtract(
                                medial_wall_inds_left, np.arange(len(medial_wall_inds_left))
                            ),
                            0,
                        )
                        sulc_right = nib.load(
                            "./templates/S1200.sulc_MSMAll.32k_fs_LR.dscalar.nii"
                        ).get_fdata()[0][left_verts:]
                        sulc_right = np.insert(
                            sulc_right,
                            np.subtract(
                                medial_wall_inds_right, np.arange(len(medial_wall_inds_right))
                            ),
                            0,
                        )

                    surf_lh = surface.vol_to_surf(
                        imgs,
                        pial_left,
                        radius=6.0,
                        interpolation="nearest",
                        kind="ball",
                        n_samples=None,
                        mask_img=dset.masker.mask_img,
                    )
                    surf_rh = surface.vol_to_surf(
                        imgs,
                        pial_right,
                        radius=6.0,
                        interpolation="nearest",
                        kind="ball",
                        n_samples=None,
                        mask_img=dset.masker.mask_img,
                    )
                    surfs = np.transpose(np.vstack((surf_lh, surf_rh)))
                    del surf_lh, surf_rh

                    # handle cortex first
                    coords_left = surface.load_surf_data(pial_left)[0]
                    coords_left = np.delete(coords_left, medial_wall_inds_left, axis=0)
                    coords_right = surface.load_surf_data(pial_right)[0]
                    coords_right = np.delete(coords_right, medial_wall_inds_right, axis=0)

                    print("Left Hemipshere Vertices")
                    surface_macms_lh, inds_discard_lh = build_macms(dset, surfs, coords_left)
                    print(np.shape(surface_macms_lh))
                    print(inds_discard_lh)

                    print("Right Hemipshere Vertices")
                    surface_macms_rh, inds_discard_rh = build_macms(dset, surfs, coords_right)
                    print(np.shape(surface_macms_rh))
                    print(len(inds_discard_rh))

                    lh_vertices_total = np.shape(surface_macms_lh)[0]
                    rh_vertices_total = np.shape(surface_macms_rh)[0]
                    time_series = np.transpose(np.vstack((surface_macms_lh, surface_macms_rh)))
                    print(np.shape(time_series))
                    del surface_macms_lh, surface_macms_rh

                    if subcort:
                        subcort_img = nib.load("templates/rois-subcortical_mni152_mask.nii.gz")
                        subcort_vox = np.asarray(np.where(subcort_img.get_fdata()))
                        subcort_mm = vox2mm(subcort_vox.T, subcort_img.affine)

                        print("Subcortical Voxels")
                        subcort_macm, inds_discard_subcort = build_macms(dset, surfs, subcort_mm)

                        num_subcort_vox = np.shape(subcort_macm)[0]
                        print(inds_discard_subcort)

                        time_series = np.hstack((time_series, np.asarray(subcort_macm).T))
                        print(np.shape(time_series))

                time_series = time_series.astype("float32")

                print("calculating correlation matrix")
                correlation = ConnectivityMeasure(kind="correlation")
                time_series = correlation.fit_transform([time_series])[0]
                print(np.shape(time_series))

                if affinity == "cosine":
                    time_series = calculate_affinity(time_series, 10 * sparsity)

            else:
                time_series = np.transpose(k.transform(dset, return_type="array"))

    print("Performing gradient analysis")

    gradients, statistics = embed.compute_diffusion_map(
        time_series, alpha=0.5, return_result=True, overwrite=True
    )
    pickle.dump(statistics, open(op.join(workdir, "statistics.p"), "wb"))

    # if subcortical included in gradient decomposition, remove gradient scores
    if subcort:
        subcort_grads = gradients[np.shape(gradients)[0] - num_subcort_vox :, :]
        subcort_grads = insert(subcort_grads, inds_discard_subcort)
        gradients = gradients[0 : np.shape(gradients)[0] - num_subcort_vox, :]

    # get left hemisphere gradient scores, and insert 0's where medial wall is
    gradients_lh = gradients[0:lh_vertices_total, :]
    if len(inds_discard_lh) > 0:
        gradients_lh = insert(gradients_lh, inds_discard_lh)
    gradients_lh = insert(gradients_lh, medial_wall_inds_left)

    # get right hemisphere gradient scores and insert 0's where medial wall is
    gradients_rh = gradients[-rh_vertices_total:, :]
    if len(inds_discard_rh) > 0:
        gradients_rh = insert(gradients_rh, inds_discard_rh)
    gradients_rh = insert(gradients_rh, medial_wall_inds_right)

    grad_dict = {
        "grads_lh": gradients_lh,
        "grads_rh": gradients_rh,
        "pial_left": pial_left,
        "sulc_left": sulc_left,
        "pial_right": pial_right,
        "sulc_right": sulc_right,
    }
    if subcort:
        grad_dict["subcort_grads"] = subcort_grads
    pickle.dump(grad_dict, open(op.join(workdir, "gradients.p"), "wb"))

    # map the gradient to the parcels
    for i in range(np.shape(gradients)[1]):
        if atlas is not None:
            if atlas == "fsaverage5" or atlas == "hcp":

                plot_surfaces(grad_dict, i, workdir)

                if subcort:
                    tmpimg = masking.unmask(subcort_grads[:, i], subcort_img)
                    nib.save(tmpimg, op.join(workdir, "gradient-{0}.nii.gz".format(i)))
            else:
                tmpimg = np.zeros(atlas_shape)
                for j, n in enumerate(np.unique(atlas_data)[1:]):
                    inds = atlas_data == n
                    tmpimg[inds] = gradients[j, i]
                    nib.save(
                        nib.Nifti1Image(tmpimg, atlas_affine),
                        op.join(workdir, "gradient-{0}.nii.gz".format(i)),
                    )
        else:
            tmpimg = np.zeros(np.prod(dset.masker.mask_img.shape))
            inds = np.ravel_multi_index(
                np.nonzero(dset.masker.mask_img.get_fdata()), dset.masker.mask_img.shape
            )
            tmpimg[inds] = gradients[:, i]
            nib.save(
                nib.Nifti1Image(
                    np.reshape(tmpimg, dset.masker.mask_img.shape), dset.masker.mask_img.affine
                ),
                op.join(workdir, "gradient-{0}.nii.gz".format(i)),
            )

            os.system(
                "python3 /Users/miriedel/Desktop/GitHub/surflay/make_figures.py "
                "-f {grad_image} --colormap jet".format(
                    grad_image=op.join(workdir, "gradient-{0}.nii.gz".format(i))
                )
            )

    output_dir = op.join(
        outdir,
        (
            f"{dataset_name}_{atlas_name}_{kernel_name}_{sparsity_name}_{gradients_name}_"
            f"{affinity_name}_{approach_name}"
        )
    )

    shutil.copytree(workdir, output_dir)

    shutil.rmtree(workdir)
# python scriptdl.py --subject=992774 --out_dir="."

import numpy as np
import pandas as pd
from nilearn import image as nlimg
from nilearn import datasets as nldatasets
import nilearn.plotting as nlplt
from sys import argv
import os

if(len(argv)!=3):
        print('Enter Subject ID')
        os._exit(0)

from nilearn import datasets
dataset = datasets.aal = datasets.fetch_atlas_aal()
# dataset = datasets.fetch_atlas_harvard_oxford('sub-maxprob-thr50-1mm')
atlas_filename = dataset.maps
labels = dataset.labels

print(len(labels))

from nilearn.input_data import NiftiLabelsMasker
masker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True)

suffix=""
outputsuffix=""

if argv[2]=='1':
	suffix  = "/rfMRI_REST1_LR/rfMRI_REST1_LR_hp2000_clean.nii.gz"
	outputsuffix="/rfMRI_REST1_LR/AAL"
Exemplo n.º 22
0
def fit_predict(classifier, X_train, X_test, y_train, y_test):
    print "Classifying"
    classifier.fit(X_train, y_train)
    return [y_test, classifier.predict(X_test).tolist()]
saved_pca = '/projects/delavega/clustering/dv_v6_reference_pca.pkl'
reference = pickle.load(open(saved_pca, 'r'))
classifier = GaussianNB()
cver = KFold(dataset.feature_table.data.shape[0], n_folds = 4)

name = 'aal'
ref = nib.load('masks/HO_ROIs/JLC.nii.gz')

from nilearn.image import resample_img
from nilearn.datasets import fetch_atlas_aal

match_region = resample_img(nib.load(fetch_atlas_aal().regions), ref.get_affine(), ref.get_shape())
n_regions = np.unique(match_region.get_data()).nonzero()[0].shape[0]

print n_regions

distances = Parallel(n_jobs=1)(delayed(cv_distances)(
    dataset, reference, binarize_nib(match_region), train_index) for train_index, _ in cver)

print "Getting ready to cluster"
ns_matched_regions = Parallel(n_jobs=-1)(delayed(cluster)(dataset, d, r, n_regions) for d, r in distances)

print "Classifying"
all_predictions = []
### For each fold, predict activation with topics using corresponding clustering
for fold_i, (train_index, test_index) in enumerate(cver):
    ys = (dataset.feature_table.data.values > 0.001).astype('int').T