Example #1
0
 def overlayMask(self, image, mask):
     plotting.plot_roi(mask,
                       image,
                       cut_coords=[0, 0, 0],
                       annotate=False,
                       draw_cross=False)
     plotting.show()
Example #2
0
def plot_region_overlay(roi_img, func_img, fname, cmap):
    """Overlay mask/atlas on mean functional image.

    Parameters
    ----------
    roi_img : str
        File name of atlas/mask image
    func_img : str
        File name of 4D functional image that was used in extraction.
    cmap : matplotlib.colors.LinearSegmentedColormap
        Colormap to use.

    Returns
    -------
    matplotlib.pyplot.figure
        Atlas/mask plot
    """
    # compute mean of functional image
    bg_img = mean_img(func_img)

    n_cuts = 7
    fig, axes = plt.subplots(3, 1, figsize=(15, 6))

    g = plot_roi(roi_img,
                 bg_img=bg_img,
                 display_mode='z',
                 axes=axes[0],
                 alpha=.66,
                 cut_coords=np.linspace(-50, 60, num=n_cuts),
                 cmap=cmap,
                 black_bg=True,
                 annotate=False)
    g.annotate(size=8)
    g = plot_roi(roi_img,
                 bg_img=bg_img,
                 display_mode='x',
                 axes=axes[1],
                 alpha=.66,
                 cut_coords=np.linspace(-60, 60, num=n_cuts),
                 cmap=cmap,
                 black_bg=True,
                 annotate=False)
    g.annotate(size=8)
    g = plot_roi(roi_img,
                 bg_img=bg_img,
                 display_mode='y',
                 axes=axes[2],
                 alpha=.66,
                 cut_coords=np.linspace(-90, 60, num=n_cuts),
                 cmap=cmap,
                 black_bg=True,
                 annotate=False)
    g.annotate(size=8)

    fname += '_mask_overlay.png'
    fig.savefig(fname, bbox_inches='tight')
    plt.close()
    # just get figure directory + file for html
    fig_path = os.path.basename(os.path.dirname(fname))
    return os.path.join(fig_path, os.path.basename(fname))
Example #3
0
def genAndViewPlanarROI(planeCoord,dimension,cutCoord,cutDim,keepPortion,xCoord,yCoord,zCoord):
    
    from nilearn import plotting
    import nibabel as nib
    import numpy as np
    import pandas as pd
    
    #refuse to plot if coord is outside of dim bound
    if np.any([np.min(convertedBoundCoords[:,naiveDimDictionary[dimension]])>planeCoord,               np.max(convertedBoundCoords[:,naiveDimDictionary[dimension]])<planeCoord,               np.min(convertedBoundCoords[:,naiveDimDictionary[cutDim]])>cutCoord,               np.max(convertedBoundCoords[:,naiveDimDictionary[cutDim]])<cutCoord]):
        print('requested coordinate exceeds selected dimension\'s bounds')
    else:    
        segPlane=WMA_pyFuncs.makePlanarROI(t1img, planeCoord, dimension)
        
        #first though we need to generate a planar ROI to "cut" with
        knife_roi=WMA_pyFuncs.makePlanarROI(t1img, cutCoord , cutDim)

        cutPlaneTable=WMA_pyFuncs.findMaxMinPlaneBound(knife_roi)
        
        #refuse to plot if improper keep dimension is requested
        if np.logical_not(naiveDimDictionary[cutDim]==cutPlaneTable['dimIndex'].loc[cutPlaneTable['boundLabel']==keepPortion].to_numpy(int)):
            print('requested keep portion not consistent with cut plane')
        elif naiveDimDictionary[cutDim]==naiveDimDictionary[dimension]:
            print('intial plane and cut plane are parallel, no meaningful intersection will occur.')
        else:

            #now cut the roi_img ROI
            cut_roi=WMA_pyFuncs.sliceROIwithPlane(segPlane,knife_roi,keepPortion)
    
            get_ipython().run_line_magic('matplotlib', 'inline')
            plotting.plot_roi(roi_img=cut_roi, bg_img=t1img, cut_coords=[xCoord,yCoord,zCoord])
Example #4
0
def create_intersect(subject_list):

    fig = plt.figure(figsize=(20, 15))
    mask_list = []
    i = 1

    for subject in tqdm(subject_list):

        if subject < 10:
            subject_str = '0' + str(subject)
        else:
            subject_str = str(subject)

        filename_mask = "/home/brain/datasets/SherlockMerlin_ds001110/sub-" + subject_str + "/func/sub-" + subject_str + "_task-SherlockMovie_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz"
        filename_irm = "/home/brain/datasets/SherlockMerlin_ds001110/sub-" + subject_str + "/func/sub-" + subject_str + "_task-SherlockMovie_bold_space-MNI152NLin2009cAsym_preproc.nii.gz"

        meanepi = (index_img(filename_irm, 22))
        mask_list.append(filename_mask)
        ax = plt.subplot(3, 6, i)
        plotting.plot_roi(filename_mask, bg_img=meanepi, axes=ax, figure=fig)
        i += 1

    mask_inter = masking.intersect_masks(mask_list,
                                         threshold=1,
                                         connected=False)
    ax = plt.subplot(3, 6, i)
    plotting.plot_roi(mask_inter, bg_img=meanepi, axes=ax, figure=fig)
    saving_path = os.path.join(savingpath, 'mask_inter.nii.gz')
    mask_inter.to_filename(saving_path)
    return mask_inter
Example #5
0
def find_roi(img, roi):
    from nilearn.plotting import plot_roi
    from copy import deepcopy
    img = deepcopy(img)
    data = img.get_data()
    data[data != roi] = 0
    plot_roi(img)
Example #6
0
def plot_results(scores, data_path, Y_dim):

    #plot the result provided by the optimized model
    unique, counts = np.unique(scores, return_counts=True)
    maxi = max(unique)
    occurence = max(counts)
    print('\nscores max: ', maxi)
    print('scores equals to 0: ', occurence)
    print('scores above 0: ', Y_dim[1] - occurence, '\n')

    #plotting results
    filename_mask = "/home/brain/datasets/SherlockMerlin_ds001110/sub-12/func/sub-12_task-SherlockMovie_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz"
    masker = NiftiMasker(mask_img=filename_mask,
                         detrend=True,
                         standardize=True)
    masker.fit()
    meanepi = os.path.join(data_path, 'meanepi.nii.gz')
    score_img = masker.inverse_transform(scores)
    plotting.plot_roi(score_img,
                      bg_img=meanepi,
                      title="Results of the clustering",
                      cut_coords=5,
                      display_mode='z',
                      aspect=1.25)
    plt.show()
    plt.close()
Example #7
0
def main_plot(label, meanepi, nbc, outputpath, result_dict, subject):

    #plotting the nifti image
    score_map_img = masker.inverse_transform(label)
    plt.figure()
    plotting.plot_roi(score_map_img,
                      bg_img=meanepi,
                      title="Results of the clustering",
                      cut_coords=5,
                      display_mode='z',
                      aspect=1.25)
    img_name = 'Clustering_results.png'
    img_path = os.path.join(outputpath, img_name)
    plt.savefig(img_path)
    print('Image saved successfully')
    plt.close()

    #prepare the data for the lineplot via sns
    df = pd.DataFrame(result_dict[subject])
    df['nb_cluster'] = cluster_list
    df = df.set_index('nb_cluster')
    #plotting the line plot via sns
    sns.lineplot(data=df)
    save_name = 'lineplot_sub' + str(subject) + '.png'
    save_path = os.path.join(outputpath, save_name)
    plt.savefig(save_path)
    print('Image saved successfully')
    plt.close()
Example #8
0
def extract_cubic_roi(vol, aseg, visualize=False):
    # Extract ROI
    roi_mask = (aseg.get_data() == roi).astype(np.int)
    if np.sum(roi_mask) > 9:
        "If ROI contains more than 9 voxels"
        roi_vol = vol.get_data() * roi_mask

        # Reduce dimensionality to ROI
        ix, iy, iz = np.where(roi_mask)
        sx, sy, sz = max(ix) - min(ix), max(iy) - min(iy), max(iz) - min(iz)
        d = max([sx, sy, sz])

        roi_sq = np.zeros([d, d, d])

        # Bould an square volume from ROI and calculate volume in voxels
        roi_sq[:sx, :sy, :sz] = roi_vol[min(ix):max(ix),
                                        min(iy):max(iy),
                                        min(iz):max(iz)]
        voxels = np.sum(roi_mask)

        # PLot ROI if necessary
        if visualize:
            roi_nii = nb.Nifti1Image(roi_mask, affine=aseg.get_affine())
            plotting.plot_roi(roi_nii, bg_img=vol, black_bg=False)

            roi_sq_nii = nb.Nifti1Image(roi_sq, affine=aseg.get_affine())
            plotting.plot_anat(roi_sq_nii)
            plotting.show()
    else:
        print('[  WARNING  ] ROI not found, returning an empty volume')
        roi_sq = np.zeros([32, 32, 32])
        voxels = 0

    return roi_sq, voxels
Example #9
0
def _mask_to_svg(mask_img, bg_img):
    """Plot cuts of an mask image and creates SVG code of it.

    Parameters
    ----------
    mask_img : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The mask image; it could be binary mask or an atlas or ROIs
        with integer values.

    bg_img : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The background image that the mask will be plotted on top of.
        To turn off background image, just pass "bg_img=None".

    Returns
    -------
    mask_plot_svg : str
        SVG Image Data URL for the mask plot.

    """
    if mask_img:
        plot_roi(roi_img=mask_img,
                 bg_img=bg_img,
                 display_mode='z',
                 cmap='Set1',
                 )
        mask_plot_svg = _plot_to_svg(plt.gcf())
        # prevents sphinx-gallery & jupyter from scraping & inserting plots
        plt.close()
    else:
        mask_plot_svg = None  # HTML image tag's alt attribute is used.
    return mask_plot_svg
Example #10
0
def load_show_experimental_scans():
    from nilearn import plotting
    from src.data import experiment

    layout = experiment.get_food_temptation_data()

    # subject data is S_ID, hi-res, exp-data, events
    subject_data = experiment.get_all_subject_data(layout)

    # take first subject, exp-data
    scans = subject_data[0][2]

    scans.orthoview()

    _show_variance(scans)

    # pick a single scan to show
    single_scan = scans.slicer[:, :, :, 0]

    _show_raw(single_scan)

    plotting.plot_stat_map(
        single_scan,
        # threshold=30,
        # display_mode="z",
        # cut_coords=1,
        # colorbar=False,
    )
    plotting.plot_img(single_scan)
    plotting.plot_epi(single_scan)
    plotting.plot_roi(single_scan)
    plotting.plot_anat(single_scan)
Example #11
0
def create_intersect_mask(show_list, meanepi, savingpath_percentile, layer,
                          mask_dict, intersection):

    #We create the mask of intersection for all subject for each number of subvectors & each layer
    for number in show_list:
        mask_list = mask_dict[number]
        #in the doc, threshold = 1 will make the function compute the intersection of masks
        #threshold = 0 will compute the union of all mask in mask list
        if intersection == True:
            mask_inter = masking.intersect_masks(mask_list,
                                                 threshold=1,
                                                 connected=False)
        else:
            mask_inter = masking.intersect_masks(mask_list,
                                                 threshold=0,
                                                 connected=False)

        #We save the mask
        if intersection == True:
            mask_path = os.path.join(
                savingpath_percentile, 'mask_intersection_layer_' +
                str(layer) + '_number_' + str(number) + '.nii.gz')
        elif union == True:
            mask_path = os.path.join(
                savingpath_percentile, 'mask_union_layer_' + str(layer) +
                'number' + str(number) + '.nii.gz')
        mask_inter.to_filename(mask_path)

        #we plot the results
        plotting.plot_roi(mask_inter, title='roi', bg_img=meanepi)
Example #12
0
def plot_roi(data, file_name):
    dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
    atlas_filename = dataset.maps
    print('Atlas ROIs are located at: %s' % atlas_filename)
    plotting.plot_roi(atlas_filename, title="harvard oxford altas")
    plotting.show()
    return
Example #13
0
def plot_top_n_nodes(degrees,
                     top_n,
                     parcellation_img,
                     bg_img,
                     n_slices=5,
                     display_modes=['x', 'y', 'z']):
    n_nodes = len(degrees)
    top_n_nodes = np.argsort(degrees)[-top_n:]

    new_data = np.zeros(parcellation_img.shape)
    for node in top_n_nodes:
        # In a parcellation image, parcels = nodes are represented as neighbouring
        # voxels with values corresponding to the parcel/node number
        new_data[rtg_data == node] = node
    new_img = nib.Nifti1Image(new_data, rtg.affine, rtg.header)

    figures_dict = {}
    for display_mode in display_modes:
        plotting.plot_roi(new_img,
                          cut_coords=5,
                          display_mode=display_mode,
                          bg_img=bg_img,
                          black_bg=False)
        figures_dict[display_mode] = plt.gcf()

    return figures_dict
Example #14
0
def plot_brainrsa_rlts(img, threshold=None):

    if threshold != None:

        imgarray = nib.load(img).get_data()
        affine = get_affine(img)

        sx = np.shape(imgarray)[0]
        sy = np.shape(imgarray)[1]
        sz = np.shape(imgarray)[2]

        imgarray = correct_by_threshold(imgarray, sx, sy, sz, threshold)

        img = nib.Nifti1Image(imgarray, affine)

    roi_bg = "template/ch2.nii.gz"
    print(nib.load(roi_bg))

    plotting.plot_roi(roi_img=img,
                      bg_img=roi_bg,
                      threshold=0,
                      vmin=0.1,
                      vmax=1,
                      resampling_interpolation="continuous")

    plt.show()
Example #15
0
def find_roi(img, roi):
    from nilearn import plotting as niplt
    from copy import deepcopy
    img = deepcopy(img)
    data = img.get_data()
    data[np.round(data) != roi] = 0
    niplt.plot_roi(img)
    def cube_mask(self,
                  x1=50,
                  x2=120,
                  y1=50,
                  y2=150,
                  z1=50,
                  z2=100,
                  apply=False,
                  plot=True):
        """Method to cut out a cube from the center of the brain.

        :param x1: {int} start range x dimension
        :param x2: {int} stop range x dimension
        :param y1: {int} start range y dimension
        :param y2: {int} stop range y dimension
        :param z1: {int} start range z dimension
        :param z2: {int} stop range z dimension
        :param apply: {bool} if ``True``, the mask is directly applied to cut down the brains in :py:attr:`data` and
            reshape them into vectors
        :param plot: {bool} whether the generated mask should be visualized in a plot. A random image is shown.
        :return: Mask in the attribute :py:attr:`mask` and cut out cube in :py:attr:`data` if ``apply=True``.
        """
        print("Computing cube mask...")
        self.mask = np.zeros(self.img.shape[:3])
        self.mask[x1:x2, y1:y2, z1:z2] = 1
        self.mask = Nifti1Image(self.mask, self.img.affine)

        if apply:
            self.apply_mask()

        if plot:
            plot_roi(
                self.mask,
                index_img(self.img, np.random.randint(0, self.img.shape[-1])))
        print("\tMask computed!")
    def map_brain(self, average_proportions, selection_method):
                
        dataset = datasets.fetch_atlas_aal()
        aal_img = nib.load(dataset["maps"])
        aal_data = aal_img.get_data()
        
        new_data = np.zeros(aal_img.shape, dtype='>i2')
        for x in range(len(aal_data)):
            print(x)
            for y in range(len(aal_data[x])):
                for z in range(len(aal_data[x][y])):

                    if str(aal_data[x][y][z]) in dataset.indices:
        
                        # get the index in indices and look which area it is in labels
                        roi = dataset.labels[dataset.indices.index(str(aal_data[x][y][z]))]
                        if roi in average_proportions.keys():
                            new_value = average_proportions[roi] * 100
                        else:
                            new_value = 0
                        new_data[x][y][z] = new_value
                
            
        aal_new = nib.Nifti1Image(new_data, aal_img.affine)
        hot = cm.get_cmap('hot_r')
        vmin = 0
        vmax = 55
        
        plotting.plot_roi(aal_new, cmap=hot, colorbar=True, vmin = vmin, vmax = vmax, output_file = self.save_dir + self.analysis_method + "_brain_map_" + selection_method + "_" + self.paradigm +".png")
        plotting.show()
Example #18
0
    def test_002(self):
        from roistats import collect
        from nilearn import plotting as nip
        from nilearn import image
        from nilearn import datasets
        from roistats import atlases, plotting
        import random
        import numpy as np
        import pandas as pd

        atlas = datasets.load_mni152_brain_mask()
        atlas_fp = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr50-2mm')['maps']
        try:
            labels = atlases.labels('HarvardOxford-Cortical.xml')
        except Exception:
            from nilearn import datasets

            name = 'cort-maxprob-thr50-2mm'
            labels = datasets.fetch_atlas_harvard_oxford(name)['labels']

            # Store them in a dict
            labels = dict(enumerate(labels))
        print(atlas_fp)

        images = []
        for i in range(0, 50):
            d = np.random.rand(*atlas.dataobj.shape) * atlas.dataobj
            im = image.new_img_like(atlas, d)
            im = image.smooth_img(im, 5)
            _, f = tempfile.mkstemp(suffix='.nii.gz')
            im.to_filename(f)
            images.append(f)

        for each in images[:3]:
            nip.plot_roi(atlas_fp, bg_img=each)

        df = collect.roistats_from_maps(images, atlas_fp, subjects=None)
        df = df.rename(columns=labels)

        df['group'] = df.index
        df['age'] = df.apply(lambda row: random.random()*5+50, axis=1)
        df['group'] = df.apply(lambda row: row['group'] < len(df)/2, axis=1)
        df['index'] = df.index

        plotting.boxplot('Temporal Pole', df, covariates=['age'], by='group')

        _ = plotting.lmplot('Temporal Pole', 'age', df, covariates=[],
                            hue='group', palette='default')

        cov = df[['age', 'group']]
        melt = pd.melt(df, id_vars='index', value_vars=labels.values(),
                       var_name='region').set_index('index')
        melt = melt.join(cov)
        plotting.hist(melt, by='group', region_colname='region',
                      covariates=['age'])

        print('Removing images')
        import os
        for e in images:
            os.unlink(e)
Example #19
0
def spikes_mask(in_file, in_mask=None, out_file=None):
    """
    Utility function to calculate a mask in which check
    for :abbr:`EM (electromagnetic)` spikes.
    """

    import os.path as op
    import nibabel as nb
    import numpy as np
    from nilearn.image import mean_img
    from nilearn.plotting import plot_roi
    from scipy import ndimage as nd

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == '.gz':
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath('{}_spmask{}'.format(fname, ext))
        out_plot = op.abspath('{}_spmask.pdf'.format(fname))

    in_4d_nii = nb.load(in_file)
    orientation = nb.aff2axcodes(in_4d_nii.affine)

    if in_mask:
        mask_data = nb.load(in_mask).get_data()
        a = np.where(mask_data != 0)
        bbox = np.max(a[0]) - np.min(a[0]), np.max(a[1]) - \
            np.min(a[1]), np.max(a[2]) - np.min(a[2])
        longest_axis = np.argmax(bbox)

        # Input here is a binarized and intersected mask data from previous section
        dil_mask = nd.binary_dilation(mask_data,
                                      iterations=int(
                                          mask_data.shape[longest_axis] / 9))

        rep = list(mask_data.shape)
        rep[longest_axis] = -1
        new_mask_2d = dil_mask.max(axis=longest_axis).reshape(rep)

        rep = [1, 1, 1]
        rep[longest_axis] = mask_data.shape[longest_axis]
        new_mask_3d = np.logical_not(np.tile(new_mask_2d, rep))
    else:
        new_mask_3d = np.zeros(in_4d_nii.shape[:3]) == 1

    if orientation[0] in ['L', 'R']:
        new_mask_3d[0:2, :, :] = True
        new_mask_3d[-3:-1, :, :] = True
    else:
        new_mask_3d[:, 0:2, :] = True
        new_mask_3d[:, -3:-1, :] = True

    mask_nii = nb.Nifti1Image(new_mask_3d.astype(np.uint8),
                              in_4d_nii.get_affine(), in_4d_nii.get_header())
    mask_nii.to_filename(out_file)

    plot_roi(mask_nii, mean_img(in_4d_nii), output_file=out_plot)
    return out_file, out_plot
Example #20
0
def cluster_binary_img(binary_img, mask_img, min_region_size='exhaustive'):

    # get voxel resolution in binary_img
    # NOTE: function currently assumes equal width in x,y,z direction
    voxel_sizes = binary_img.header.get_zooms()

    # if not specfied by user, cluster exhaustive, i.e. assign each and every
    # voxel to one and only one cluster
    if min_region_size == 'exhaustive':
        min_region_size_ = _get_voxel_volume(voxel_sizes) - 1
    else:
        min_region_size_ = min_region_size

    # count overall number of 1s in the binary image
    total_n_voxels = np.count_nonzero(binary_img.get_fdata())

    # extract clusters in binary image
    cluster_imgs, indices = connected_regions(
        maps_img=binary_img,
        min_region_size=min_region_size_,
        extract_type='connected_components',
        smoothing_fwhm=None,
        mask_img=mask_img)

    # Get sizes of clusters (number of voxels that have been assigned to each region)
    # As a sanity check + for user information get size of every region and
    # count overall number of voxels that have been assigned to that region
    cluster_sizes = []
    total_n_voxels_assigned = 0

    for idx, cluster_img in enumerate(iter_img(cluster_imgs)):
        cluster_img_data = cluster_img.get_fdata()
        cluster_img_size = np.count_nonzero(cluster_img_data)
        cluster_sizes.append(cluster_img_size)
        total_n_voxels_assigned += cluster_img_size

    if total_n_voxels_assigned != total_n_voxels:
        raise ValueError(
            'Number of voxels in output clustered image is different from total number of voxels in input binary image '
        )

    # Collapse the extracted cluster images to one cluster atlas image
    cluster_imgs_labeled = []

    for idx, cluster_img in enumerate(iter_img(cluster_imgs), start=1):
        cluster_img_labeled = math_img(
            f"np.where(cluster_img == 1,{idx},cluster_img)",
            cluster_img=cluster_img)
        cluster_imgs_labeled.append(cluster_img_labeled)

    cluster_img_atlas = math_img("np.sum(imgs,axis=3)",
                                 imgs=cluster_imgs_labeled)

    # plot the cluster atlas image
    plotting.plot_roi(cluster_img_atlas,
                      title='Clustered Binary Image',
                      draw_cross=False)

    return cluster_sizes, cluster_img_atlas
Example #21
0
def spikes_mask(in_file, in_mask=None, out_file=None):
    """
    Utility function to calculate a mask in which check
    for :abbr:`EM (electromagnetic)` spikes.
    """

    import os.path as op
    import nibabel as nb
    import numpy as np
    from nilearn.image import mean_img
    from nilearn.plotting import plot_roi
    from scipy import ndimage as nd

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == '.gz':
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath('{}_spmask{}'.format(fname, ext))
        out_plot = op.abspath('{}_spmask.pdf'.format(fname))

    in_4d_nii = nb.load(in_file)
    orientation = nb.aff2axcodes(in_4d_nii.affine)

    if in_mask:
        mask_data = nb.load(in_mask).get_data()
        a = np.where(mask_data != 0)
        bbox = np.max(a[0]) - np.min(a[0]), np.max(a[1]) - \
            np.min(a[1]), np.max(a[2]) - np.min(a[2])
        longest_axis = np.argmax(bbox)

        # Input here is a binarized and intersected mask data from previous section
        dil_mask = nd.binary_dilation(
            mask_data, iterations=int(mask_data.shape[longest_axis] / 9))

        rep = list(mask_data.shape)
        rep[longest_axis] = -1
        new_mask_2d = dil_mask.max(axis=longest_axis).reshape(rep)

        rep = [1, 1, 1]
        rep[longest_axis] = mask_data.shape[longest_axis]
        new_mask_3d = np.logical_not(np.tile(new_mask_2d, rep))
    else:
        new_mask_3d = np.zeros(in_4d_nii.shape[:3]) == 1

    if orientation[0] in ['L', 'R']:
        new_mask_3d[0:2, :, :] = True
        new_mask_3d[-3:-1, :, :] = True
    else:
        new_mask_3d[:, 0:2, :] = True
        new_mask_3d[:, -3:-1, :] = True

    mask_nii = nb.Nifti1Image(new_mask_3d.astype(np.uint8), in_4d_nii.get_affine(),
                              in_4d_nii.get_header())
    mask_nii.to_filename(out_file)

    plot_roi(mask_nii, mean_img(in_4d_nii), output_file=out_plot)
    return out_file, out_plot
Example #22
0
def drawRoi(imgPath, roiPath, savePath):
    from matplotlib import pyplot as plt
    plotting.plot_roi(roiPath,
                      bg_img=imgPath,
                      title="Hippocampus",
                      draw_cross=False,
                      dim=0,
                      cmap=plt.cm.autumn,
                      output_file=savePath)
def create_coregistered_qc_image(pet_coregistered_file, t1w_file):
    pet_coregistered_img = nib.load(pet_coregistered_file)
    t1w_img = nib.load(t1w_file)
    plotting.plot_roi(pet_coregistered_img,
                      bg_img=t1w_img,
                      draw_cross=False,
                      alpha=0.3,
                      output_file=pet_coregistered_file.replace(
                          '.nii.gz', '.png'))
Example #24
0
def rotateAndPlotWrapper(xCoord, yCoord, zCoord):
    from nilearn import plotting
    import nibabel as nib
    import numpy as np

    get_ipython().run_line_magic('matplotlib', 'inline')
    plotting.plot_roi(roi_img=roi_img,
                      bg_img=t1img,
                      cut_coords=[xCoord, yCoord, zCoord])
Example #25
0
    def map_brain(self, average_proportions, selection_method, paradigm):

        self.paradigm = paradigm

        dataset = datasets.fetch_atlas_aal()
        aal_img = nib.load(dataset["maps"])
        aal_data = aal_img.get_data()
        roi_ids = []

        new_data_rank = np.zeros(aal_img.shape, dtype='>i2')
        new_data_proportion = np.zeros(aal_img.shape, dtype='>i2')
        for x in range(len(aal_data)):
            print(x)
            for y in range(len(aal_data[x])):
                for z in range(len(aal_data[x][y])):

                    if str(aal_data[x][y][z]) in dataset.indices:

                        # get the index in indices and look which area it is in labels
                        roi = dataset.labels[dataset.indices.index(
                            str(aal_data[x][y][z]))]
                        if roi in average_proportions.keys():

                            if average_proportions[roi]["accuracy"][
                                    "mean"] > 0.52:
                                if not roi in roi_ids:
                                    roi_ids.append(roi)
                                print(roi)
                                new_rank_value = average_proportions[roi][
                                    "rank"]["mean"]
                                print(new_rank_value)
                                new_proportion_value = average_proportions[
                                    roi]["percentage_of_max"]["mean"]
                            else:
                                new_rank_value = 0
                                new_proportion_value = 0
                        else:
                            new_rank_value = 0
                            new_proportion_value = 0
                        new_data_rank[x][y][z] = new_rank_value
                        new_data_proportion[x][y][z] = new_proportion_value

        print(roi_ids)
        for roi in roi_ids:
            roi_id = dataset.indices[dataset.labels.index(roi)]
            roi_map = image.math_img('img == %s' % roi_id, img=dataset.maps)
            plotting.plot_roi(roi_map, title=roi)

        aal_new = nib.Nifti1Image(new_data_rank, aal_img.affine)
        hot = cm.get_cmap('hot')
        plotting.plot_roi(aal_new,
                          cmap=hot,
                          colorbar=True,
                          output_file=self.save_dir + self.analysis_method +
                          "_brain_map_rank_" + selection_method + "_" +
                          self.paradigm + "_>.52.png")
        plotting.show()
Example #26
0
def plot_brainrsa_regions(img,
                          threshold=None,
                          background=get_bg_ch2(),
                          type='r'):
    """
    Plot the RSA-result regions by 3 cuts (frontal, axial & lateral)

    Parameters
    ----------
    img : string
        The file path of the .nii file of the RSA results.
    threshold : None or int. Default is None.
        The threshold of the number of voxels used in correction.
        If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
        None, the threshold-correction will not work.
    background : Niimg-like object or string. Default is stuff.get_bg_ch2()
        The background image that the RSA results will be plotted on top of.
    type : string 'r' or 't'
        The type of result (r-values or t-values).
    """

    imgarray = nib.load(img).get_fdata()

    if (imgarray == np.nan).all() == True:
        print("No Valid Results")

    else:
        if threshold != None:

            imgarray = nib.load(img).get_fdata()
            affine = get_affine(img)

            imgarray = correct_by_threshold(imgarray, threshold)

            img = nib.Nifti1Image(imgarray, affine)

        if type == 'r':
            plotting.plot_roi(roi_img=img,
                              bg_img=background,
                              threshold=0,
                              vmin=0.1,
                              vmax=1,
                              title="Similarity",
                              resampling_interpolation="continuous")
        if type == 't':
            plotting.plot_roi(roi_img=img,
                              bg_img=background,
                              threshold=0,
                              vmin=-7,
                              vmax=7,
                              title="Similarity",
                              resampling_interpolation="continuous")

        plt.show()

    return 0
Example #27
0
 def get_static_svg(self):
     '''Generate static svg of atlas (cannot manipulate in d3)'''
     svg_data = []
     with make_tmp_folder() as temp_dir:
         output_file='%s/atlas.svg' %(temp_dir)
         plotting.plot_roi(self.mr,annotate=False,draw_cross=False,cmap="nipy_spectral",black_bg=False, output_file=output_file)
         svg_file = open(output_file,'r')
         svg_data = svg_file.readlines()
         svg_file.close()
     return svg_data[4:]
Example #28
0
def plotResultImageWithoutGT(path_t1, path_result, savepath, subject, output_type='save'):
    if output_type == 'save':
        plotting.plot_roi(path_result, path_t1, title=str(subject))
        plt.savefig(os.path.join(savepath, 'result_' + subject + '.png'))
        plt.close()
        logging.info('Subject ' + str(subject) + ' plotted with image ' + path_t1 + '.')
    elif output_type == 'show':
        plotting.plot_roi(path_result, path_t1, title=str(subject))
        plt.show()
        plt.close()
def plot_img_with_label(labels, images):
    """
    Plots 50 images with labels
    :param labels: list with label paths
    :param images: list with images paths
    """
    for i in range(1, 50):
        plotting.plot_roi(labels[i], bg_img=images[i], cmap='Paired')
        # plotting.plot_img(i)
        plotting.show()
Example #30
0
 def get_static_svg(self):
     '''Generate static svg of atlas (cannot manipulate in d3)'''
     svg_data = []
     with make_tmp_folder() as temp_dir:
         output_file='%s/atlas.svg' %(temp_dir)
         plotting.plot_roi(self.mr,annotate=False,draw_cross=False,cmap="nipy_spectral",black_bg=False, output_file=output_file)
         svg_file = open(output_file,'r')
         svg_data = svg_file.readlines()
         svg_file.close()
     return svg_data[4:]
def compute_all_subjects_mask():
    """ Computes the mask of all the subjects and the sesssions
    """
    masker = MultiNiftiMasker(mask_strategy='epi', memory=CACHE_DIR,
                              memory_level=2, n_jobs=10, verbose=5)
               
    imgs = dataset.func1 + dataset.func2
    masker.fit(imgs)
    masker.mask_img_.to_filename('all_subjects.nii.gz')
    plot_roi(masker.mask_img_)
Example #32
0
def plot_mask_overlay(mask_file, niftii_file):
    mask = nib.load(mask_file)
    head = nib.load(niftii_file)
    overlay_image_file = output_dir + "mask_overlay.png"
    plot_roi(
        mask,
        head,
        draw_cross=False,
        title="Mask Overlays",
        output_file=overlay_image_file,
    )
Example #33
0
    def inspect_mask(self):
        """Method to plot the computed mask.

        :return: three middle slices with the applied mask on the mean image.
        """
        try:
            plot_roi(self.mask, self.img)
        except TypeError:
            raise LookupError(
                "\nNo mask precomputed! First compute a mask before applying this method."
            )
Example #34
0
def make_roi_image(nifti_file,png_img_file=None):
    """Make roi (mask) image"""
    nifti_file = str(nifti_file)
    mask_brain = plot_roi(nifti_file)
    if png_img_file:    
        mask_brain.savefig(png_img_file)
    plt.close('all')
    return mask_brain
def stripped_brain_overlay(in_file, overlay_file, out_file):
    import os.path
    import nibabel as nb
    import matplotlib as mpl
    mpl.use('Agg')
    from nilearn.plotting import plot_roi
    vmax = nb.load(in_file).get_data().reshape(-1).max()
    mask_display = plot_roi(
        in_file, overlay_file, output_file=out_file, title=out_file,
        display_mode="ortho", dim=-1, alpha=.3, vmax=vmax + 1)
    #  mask_display.bg_img(overlay_file)
    #  mask_display.title(out_file, x=0.01, y=0.99, size=15, color=None,
    #                     bgcolor=None, alpha=1)
    #  mask_display.display_mode = "yx"
    mask_display
    return os.path.abspath(out_file)
l, n = find_region_names_using_cut_coords(dmn_coords, atlas_img,
                                          labels=labels)

# where 'l' indicates new labels generated according to given atlas labels and
# coordinates
new_labels = l

# where 'n' indicates brain regions names labelled, generated according to given
# labels
region_names_involved = n

######################################################################
# Let's visualize
from nilearn.image import load_img
from nilearn import plotting
from nilearn.image import new_img_like

atlas_img = load_img(atlas_img)
affine = atlas_img.get_affine()
atlas_data = atlas_img.get_data()

for i, this_label in enumerate(new_labels):
    this_data = (atlas_data == this_label)
    this_data = this_data.astype(int)
    this_img = new_img_like(atlas_img, this_data, affine)
    plotting.plot_roi(this_img, cut_coords=dmn_coords[i],
                      title=region_names_involved[i])

plotting.show()
Example #37
0
### Build a mask ##############################################################
# Thresholding
log_p_values[log_p_values < 5] = 0
plot_stat_map(new_img_like(fmri_img, log_p_values),
              mean_img, title='Thresholded p-values', annotate=False,
              colorbar=False, cut_coords=cut_coords)

# Binarization and intersection with VT mask
# (intersection corresponds to an "AND conjunction")
bin_p_values = (log_p_values != 0)
mask_vt_filename = haxby_dataset.mask_vt[0]
vt = nibabel.load(mask_vt_filename).get_data().astype(bool)
bin_p_values_and_vt = np.logical_and(bin_p_values, vt)

plot_roi(new_img_like(fmri_img, bin_p_values_and_vt.astype(np.int)),
         mean_img, title='Intersection with ventral temporal mask',
         cut_coords=cut_coords)

# Dilation
from scipy import ndimage
dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt)
plot_roi(new_img_like(fmri_img, dil_bin_p_values_and_vt.astype(np.int)),
         mean_img, title='Dilated mask', cut_coords=cut_coords,
         annotate=False)

# Identification of connected components
plt.figure()
labels, n_labels = ndimage.label(dil_bin_p_values_and_vt)
first_roi_data = (labels == 1).astype(np.int)
second_roi_data = (labels == 2).astype(np.int)
fig_id = plt.subplot(2, 1, 1)
def plot_mask(pet_files, pet_imgs):
    for pi, pf in zip(pet_imgs, pet_files):
        mask_path = os.path.join('figures', 'mask',
                                 pf.split('/')[-1].split('.')[0]) 
        plot_roi(masker.mask_img_, pi, output_file=mask_path,
                 title=pf.split('/')[-1].split('.')[0])
func_filename = haxby_dataset.func[0]
mean_haxby = mean_img(func_filename)

from nilearn.plotting import plot_epi, show
plot_epi(mean_haxby)

##############################################################################
# Extracting a brain mask

# Simple computation of a mask from the fMRI data
from nilearn.masking import compute_epi_mask
mask_img = compute_epi_mask(func_filename)

# Visualize it as an ROI
from nilearn.plotting import plot_roi
plot_roi(mask_img, mean_haxby)

##############################################################################
# Applying the mask to extract the corresponding time series

from nilearn.masking import apply_mask
masked_data = apply_mask(func_filename, mask_img)

# masked_data shape is (timepoints, voxels). We can plot the first 150
# timepoints from two voxels

# And now plot a few of these
import matplotlib.pyplot as plt
plt.figure(figsize=(7, 5))
plt.plot(masked_data[:150, :2])
plt.xlabel('Time [TRs]', fontsize=16)
from nilearn import datasets, plotting

data = datasets.fetch_yeo_2011_atlas()
plotting.plot_roi(data.liberal_7, title="7 clusters")
plotting.plot_roi(data.liberal_17, title="17 clusters")

pet_files = []
pet_img = []
for idx, row in data.iterrows():
    pet_file = glob.glob(os.path.join(BASE_DIR,
                                      'I' + str(row.Image_ID_y), 'wI*.nii'))
    if len(pet_file)>0:
        pet_files.append(pet_file[0])
        img = nib.load(pet_file[0])
        pet_img.append(img)

masker = NiftiMasker(mask_strategy='epi',
                     mask_args=dict(opening=1))
masker.fit(pet_files)

plot_roi(masker.mask_img_, pet_file[0])

pet_masked = masker.transform_niimgs(pet_files, n_jobs=6)
pet_masked = np.vstack(pet_masked)
nb_vox = pet_masked.shape[1]

groups = [['AD', 'Normal'], ['AD', 'MCI'], ['MCI', 'LMCI'], ['MCI', 'Normal']]

for gr in groups:
    gr1_idx = data[data.DX_Group == gr[0]].index.values
    gr2_idx = data[data.DX_Group == gr[1]].index.values
    
    gr1_f = pet_masked[gr1_idx, :]
    gr2_f = pet_masked[gr2_idx, :]
    
    t_masked, p_masked = stats.ttest_ind(gr1_f, gr2_f)
                           target_shape=shape, interpolation='nearest')
roi_masker = input_data.NiftiLabelsMasker(labels_img=atlas,
                                          mask_img=mask_filename)
roi_masker.fit(mask_filename)  # just to have it fitted

cut_coords = (30, -45, -12)

roi_score_img = roi_masker.inverse_transform(rsa[np.newaxis])
plotting.plot_stat_map(roi_score_img, title='RSA', cut_coords=cut_coords)
plt.savefig('rsa.png')
roi_score_img = roi_masker.inverse_transform(lm[np.newaxis])
plotting.plot_stat_map(roi_score_img, title='Linear model',
                       cut_coords=cut_coords)
plt.savefig('lm.png')

plotting.plot_roi(atlas, title="Harvard Oxford atlas", cut_coords=cut_coords)
# print labels

from scipy.stats import fligner
X = roi_masker.transform(func_filename)
y, session = np.loadtxt(haxby_dataset.session_target).astype('int').T
conditions = np.recfromtxt(haxby_dataset.conditions_target)['f0']
non_rest = conditions != b'rest'
conditions = conditions[non_rest]
y, session = y[non_rest], session[non_rest]
y = y[session < 4]

var_stat = np.zeros(X.shape[1])
for j, x in enumerate(X.T):
    _, var_stat[j] = fligner(
        x[y == 8], x[y == 1], x[y == 2], x[y == 3],
              title="p-values", cut_coords=(coronal, sagittal, axial))

### Build a mask ##############################################################

# Thresholding
pvalues[pvalues < 5] = 0
plot_stat_map(nibabel.Nifti1Image(pvalues, fmri_img.get_affine()), mean_img,
              title='Thresholded p-values',
              cut_coords=(coronal, sagittal, axial))

# Binarization and intersection with VT mask
bin_pvalues = (pvalues != 0)
vt = nibabel.load(haxby_files.mask_vt[0]).get_data().astype(bool)
bin_pvalues_and_vt = np.logical_and(bin_pvalues, vt)
plot_roi(nibabel.Nifti1Image(bin_pvalues_and_vt.astype(np.int), 
                             fmri_img.get_affine()), 
         mean_img, title='Intersection with ventral temporal mask',
         cut_coords=(coronal, sagittal, axial))

# Dilation
from scipy import ndimage
dil_bin_pvalues_and_vt = ndimage.binary_dilation(bin_pvalues_and_vt)
plot_roi(nibabel.Nifti1Image(dil_bin_pvalues_and_vt.astype(np.int), 
                             fmri_img.get_affine()), 
         mean_img, title='Dilated mask', cut_coords=(coronal, sagittal, axial))

# Identification of connected components
labels, n_labels = ndimage.label(dil_bin_pvalues_and_vt)
plot_roi(nibabel.Nifti1Image(labels, fmri_img.get_affine()), 
         mean_img, title='Connected components',
         cut_coords=(coronal, sagittal, axial))
plt.show()
# print basic information on the dataset
print('First functional nifti image (4D) is located at: %s' %
      miyawaki_dataset.func[0])  # 4D data

miyawaki_filename = miyawaki_dataset.func[0]
miyawaki_mean_img = image.mean_img(miyawaki_filename)

# This time, we can use the NiftiMasker without changing the default mask
# strategy, as the data has already been masked, and thus lies on a
# homogeneous background

masker = NiftiMasker()
masker.fit(miyawaki_filename)

plot_roi(masker.mask_img_, miyawaki_mean_img,
         title="Mask from already masked data")


###############################################################################
# From raw EPI data

# Load NYU resting-state dataset
nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1)
nyu_filename = nyu_dataset.func[0]

# Restrict nyu to 100 frames to speed up computation
from nilearn.image import index_img
nyu_img = index_img(nyu_filename, slice(0, 100))

# To display the background
nyu_mean_img = image.mean_img(nyu_img)
###########################################################################
# Visualize: Brain parcellations (Ward)
# -------------------------------------
#
# First, we display the parcellations of the brain image stored in attribute
# `labels_img_`
ward_labels_img = ward.labels_img_

# Now, ward_labels_img are Nifti1Image object, it can be saved to file
# with the following code:
ward_labels_img.to_filename('ward_parcellation.nii.gz')

from nilearn import plotting
from nilearn.image import mean_img, index_img

first_plot = plotting.plot_roi(ward_labels_img, title="Ward parcellation",
                               display_mode='xz')

# Grab cut coordinates from this plot to use as a common for all plots
cut_coords = first_plot.cut_coords
###########################################################################
# Compressed representation of Ward clustering
# --------------------------------------------
#
# Second, we illustrate the effect that the clustering has on the signal.
# We show the original data, and the approximation provided by the
# clustering by averaging the signal on each parcel.

# Grab number of voxels from attribute mask image (mask_img_).
import numpy as np
original_voxels = np.sum(ward.mask_img_.get_data())
Example #46
0
                                               get_anats=True)


###############################################################################
# demo the different plotting function

# Plotting statistical maps
plotting.plot_stat_map(localizer.cmaps[3], bg_img=localizer.anats[3],
                       threshold=3, title="plot_stat_map",
                       cut_coords=(36, -27, 66))

# Plotting anatomical maps
plotting.plot_anat(haxby.anat[0], title="plot_anat")

# Plotting ROIs (here the mask)
plotting.plot_roi(haxby.mask_vt[0], bg_img=haxby.anat[0], title="plot_roi")

# Plotting EPI haxby
mean_haxby_img = image.mean_img(haxby.func[0])
plotting.plot_epi(mean_haxby_img, title="plot_epi")


###############################################################################
# demo the different display_mode

plotting.plot_stat_map(localizer.cmaps[3], display_mode='ortho',
                       cut_coords=(36, -27, 60),
                       title="display_mode='ortho', cut_coords=(36, -27, 60)")

plotting.plot_stat_map(localizer.cmaps[3], display_mode='z', cut_coords=5,
                       title="display_mode='z', cut_coords=5")
### Build a mask ##############################################################
# Thresholding
log_p_values[log_p_values < 5] = 0
plot_stat_map(nibabel.Nifti1Image(log_p_values, fmri_img.get_affine()),
              mean_img, title='Thresholded p-values', annotate=False,
              colorbar=False, cut_coords=cut_coords)

# Binarization and intersection with VT mask
# (intersection corresponds to an "AND conjunction")
bin_p_values = (log_p_values != 0)
mask_vt_filename = haxby_dataset.mask_vt[0]
vt = nibabel.load(mask_vt_filename).get_data().astype(bool)
bin_p_values_and_vt = np.logical_and(bin_p_values, vt)

plot_roi(nibabel.Nifti1Image(bin_p_values_and_vt.astype(np.int),
         fmri_img.get_affine()),
         mean_img, title='Intersection with ventral temporal mask',
         cut_coords=cut_coords)

# Dilation
from scipy import ndimage
dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt)
plot_roi(nibabel.Nifti1Image(dil_bin_p_values_and_vt.astype(np.int),
                             fmri_img.get_affine()),
         mean_img, title='Dilated mask', cut_coords=cut_coords,
         annotate=False)

# Identification of connected components
plt.figure()
labels, n_labels = ndimage.label(dil_bin_p_values_and_vt)
first_roi_data = (labels == 1).astype(np.int)
second_roi_data = (labels == 2).astype(np.int)
# Fetching multiscale group brain parcellations

# import datasets module and use `fetch_atlas_basc_multiscale_2015` function
from nilearn import datasets

parcellations = datasets.fetch_atlas_basc_multiscale_2015(version='sym')

# We show here networks of 64, 197, 444
networks_64 = parcellations['scale064']
networks_197 = parcellations['scale197']
networks_444 = parcellations['scale444']

###############################################################################
# Visualizing brain parcellations

# import plotting module and use `plot_roi` function, since the maps are in 3D
from nilearn import plotting

# The coordinates of all plots are selected automatically by itself
# We manually change the colormap of our choice
plotting.plot_roi(networks_64, cmap=plotting.cm.bwr,
                  title='64 regions of brain clusters')

plotting.plot_roi(networks_197, cmap=plotting.cm.bwr,
                  title='197 regions of brain clusters')

plotting.plot_roi(networks_444, cmap=plotting.cm.bwr_r,
                  title='444 regions of brain clusters')

plotting.show()
###########################################################################
# Convert the fMRI volume's to a data matrix
# ..........................................
#
# We will use the :class:`nilearn.input_data.NiftiMasker` to extract the
# fMRI data on a mask and convert it to data series.
#
# The mask is a mask of the Ventral Temporal streaming coming from the
# Haxby study:
mask_filename = haxby_dataset.mask_vt[0]

# Let's visualize it, using the subject's anatomical image as a
# background
from nilearn import plotting
plotting.plot_roi(mask_filename, bg_img=haxby_dataset.anat[0],
                 cmap='Paired')

###########################################################################
# Now we use the NiftiMasker.
#
# We first create a masker, giving it the options that we care
# about. Here we use standardizing of the data, as it is often important
# for decoding
from nilearn.input_data import NiftiMasker
masker = NiftiMasker(mask_img=mask_filename, standardize=True)

# We give the masker a filename and retrieve a 2D array ready
# for machine learning with scikit-learn
fmri_masked = masker.fit_transform(fmri_filename)

###########################################################################
# -*- coding: utf-8 -*-
"""
Created on Wed Apr  1 09:16:38 2015

@author: mr243268
"""
import os
from loader import load_dynacomp, set_figure_base_dir
from nilearn.plotting import plot_roi

dataset = load_dynacomp()

FIG_DIR = set_figure_base_dir('rois')

for i in range(len(dataset.subjects)):
    for k in sorted(dataset.rois[i].keys()):
        output_file = os.path.join(FIG_DIR, k)
        plot_roi(dataset.rois[i][k], title=k, output_file=output_file)
    break
mask = masker.mask_img_.get_data().astype(np.bool)
shape = mask.shape
connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],
                                   n_z=shape[2], mask=mask)

# Computing the ward for the first time, this is long...
start = time.time()
ward = WardAgglomeration(n_clusters=1000, connectivity=connectivity,
                         memory='nilearn_cache')
ward.fit(pet_masked[0])
print "Ward agglomeration 1000 clusters: %.2fs" % (time.time() - start)


labels = ward.labels_ + 1
labels_img = masker.inverse_transform(labels)
first_plot = plot_roi(labels_img, pet_img[0], title="Ward parcellation",
                      display_mode='xz')
# labels_img is a Nifti1Image object, it can be saved to file with the
# following code:
labels_img.to_filename('parcellation.nii')


"""
##################################################################
# Compute the ward with more clusters, should be faster as we are using
# the caching mechanism
start = time.time()
ward = WardAgglomeration(n_clusters=2000, connectivity=connectivity,
                         memory='nilearn_cache')
ward.fit(fmri_masked)
print "Ward agglomeration 2000 clusters: %.2fs" % (time.time() - start)
"""
Example #52
0
 def plot_all(self):
     names = self.network_names
     for idx, rsn in enumerate(niimg.iter_img(self._img)):
         disp = niplot.plot_roi(rsn, title=names.get(idx, None))
Example #53
0
# voxels that have been selected in both masks. In neuroimaging jargon, this
# is called an "AND conjunction". We use already imported numpy as np
bin_p_values_and_vt = np.logical_and(bin_p_values, vt)

# Visualizing the mask intersection results using plotting function `plot_roi`,
# a function which can be used for visualizing target specific voxels.
from nilearn.plotting import plot_roi, show

# First, we create new image type of binarized and intersected mask (second
# argument) and use this created Nifti image type in visualization. Binarized
# values in data type boolean should be converted to int data type at the same
# time. Otherwise, an error will be raised
bin_p_values_and_vt_img = new_img_like(fmri_img,
                                       bin_p_values_and_vt.astype(np.int))
# Visualizing goes here with background as computed mean of functional images
plot_roi(bin_p_values_and_vt_img, mean_img, cut_coords=cut_coords,
         title='Intersection with ventral temporal mask')

##############################################################################
# **Dilation** - Thresholded functional brain images often contain scattered
# voxels across the brain. To consolidate such brain images towards more compact
# shapes, we use a `morphological dilation
# <http://en.wikipedia.org/wiki/Dilation_(morphology)>`_. This is a common step
# to be sure not to forget voxels located on the edge of a ROI. In other words,
# such operations can fill "holes" in masked voxel representations.

# We use ndimage function from scipy Python library for mask dilation
from scipy import ndimage

# Input here is a binarized and intersected mask data from previous section
dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt)
Example #54
0
for train, test in cv:
    svc.fit(fmri_masked[train], target[train])
    prediction = svc.predict(fmri_masked[test])
    cv_scores.append(np.sum(prediction == target[test])
                     / float(np.size(target[test])))

print(cv_scores)

### Unmasking #################################################################

# Retrieve the SVC discriminating weights
coef_ = svc.coef_

# Reverse masking thanks to the Nifti Masker
coef_img = nifti_masker.inverse_transform(coef_)

# Save the coefficients as a Nifti image
coef_img.to_filename('haxby_svc_weights.nii')

### Visualization #############################################################
import matplotlib.pyplot as plt
from nilearn.image.image import mean_img
from nilearn.plotting import plot_roi, plot_stat_map

mean_epi = mean_img(func_filename)
plot_stat_map(coef_img, mean_epi, title="SVM weights", display_mode="yx")

plot_roi(nifti_masker.mask_img_, mean_epi, title="Mask", display_mode="yx")

plt.show()
"""
Basic Atlas plotting
=======================

Plot the regions of a reference atlas (here the Harvard-Oxford atlas).
"""

import matplotlib.pyplot as plt
from nilearn import datasets
from nilearn import plotting

atlas_filename, labels = datasets.fetch_harvard_oxford('cort-maxprob-thr25-2mm')

plotting.plot_roi(atlas_filename, title="Harvard Oxford atlas")
plt.show()
# To visualize results, we need to transform the clustering's labels back
# to a neuroimaging volume. For this, we use the NiftiMasker's
# inverse_transform method.
from nilearn.plotting import plot_roi, plot_epi, show

# Unmask the labels

# Avoid 0 label
labels = ward.labels_ + 1
labels_img = nifti_masker.inverse_transform(labels)

from nilearn.image import mean_img
mean_func_img = mean_img(func_filename)


first_plot = plot_roi(labels_img, mean_func_img, title="Ward parcellation",
                      display_mode='xz')

# common cut coordinates for all plots
cut_coords = first_plot.cut_coords

##################################################################
# labels_img is a Nifti1Image object, it can be saved to file with the
# following code:
labels_img.to_filename('parcellation.nii')


##################################################################
# Second, we illustrate the effect that the clustering has on the
# signal. We show the original data, and the approximation provided by
# the clustering by averaging the signal on each parcel.
#
Example #57
0
    get_tmaps=True)
localizer_anat_filename = localizer_dataset.anats[1]
localizer_cmap_filename = localizer_dataset.cmaps[1]
localizer_tmap_filename = localizer_dataset.tmaps[1]

###############################################################################
# demo the different plotting functions

# Plotting statistical maps
plotting.plot_stat_map(localizer_cmap_filename, bg_img=localizer_anat_filename,
                       threshold=3, title="plot_stat_map",
                       cut_coords=(36, -27, 66))

# Plotting glass brain
plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain',
                          threshold=3)

# Plotting anatomical maps
plotting.plot_anat(haxby_anat_filename, title="plot_anat")

# Plotting ROIs (here the mask)
plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename,
                  title="plot_roi")

# Plotting EPI haxby
mean_haxby_img = image.mean_img(haxby_func_filename)
plotting.plot_epi(mean_haxby_img, title="plot_epi")

import matplotlib.pyplot as plt
plt.show()
"""

##############################################################################
# The original Yeo atlas
# -----------------------

# First we fetch the Yeo atlas
from nilearn import datasets

atlas_yeo_2011 = datasets.fetch_atlas_yeo_2011()
atlas_yeo = atlas_yeo_2011.thick_7

# Let's now plot it
from nilearn import plotting

plotting.plot_roi(atlas_yeo, title='Original Yeo atlas',
                  cut_coords=(8, -4, 9), colorbar=True, cmap='Paired')

##############################################################################
# The original Yeo atlas has 7 labels, that is indicated in the colorbar.
# The colorbar also shows the correspondence between the color and the label
#
# Note that these 7 labels correspond actually to networks that comprise
# several regions. We are going to split them up.

##############################################################################
# Relabeling the atlas into separated regions
# ---------------------------------------------
#
# Now we use the connected_label_regions to break appart the networks
# of the Yeo atlas into separated regions
from nilearn.regions import connected_label_regions
Example #59
0
# rely on the 'background' masking strategy. We need to use the 'epi' one
nifti_masker = NiftiMasker(standardize=False, mask_strategy='epi',
                           memory="nilearn_cache", memory_level=2)
func_filename = nyu_dataset.func[0]
nifti_masker.fit(func_filename)
mask_img = nifti_masker.mask_img_

### Visualize the mask ########################################################
import matplotlib.pyplot as plt
from nilearn.plotting import plot_roi
from nilearn.image.image import mean_img

# calculate mean image for the background
mean_func_img = mean_img(func_filename)

plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask")


### Preprocess data ###########################################################
nifti_masker.fit(func_filename)
fmri_masked = nifti_masker.transform(func_filename)

### Run an algorithm ##########################################################
from sklearn.decomposition import FastICA
n_components = 20
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(fmri_masked.T).T

### Reverse masking ###########################################################
components = nifti_masker.inverse_transform(components_masked)
extraction.fit()
regions_img = extraction.regions_img_

################################################################################
# Visualization
# Show region extraction results by importing image & plotting utilities
from nilearn import plotting
from nilearn.image import index_img
from nilearn.plotting import find_xyz_cut_coords

# Showing region extraction results using 4D maps visualization tool
plotting.plot_prob_atlas(regions_img, display_mode='z', cut_coords=2,
                         view_type='contours', title="Regions extracted.")

# To reduce the complexity, we choose to display all the regions
# extracted from network 3
import numpy as np

DMN_network = index_img(atlas_networks, 3)
plotting.plot_roi(DMN_network, display_mode='z', cut_coords=1,
                  title='Network 3')

regions_indices_network3 = np.where(np.array(extraction.index_) == 3)
for index in regions_indices_network3[0]:
    cur_img = index_img(extraction.regions_img_, index)
    coords = find_xyz_cut_coords(cur_img)
    plotting.plot_roi(cur_img, display_mode='z', cut_coords=coords[2:3],
                      title="Blob of network3")
            
plotting.show()