def glass_brain_rois(rois_set, color_list, output_dir, fname, view='z',
                     plot_title = None):
    """
    Plot a set of Regions-of-Interest (ROIs) in one single glass brain.
    Saves the resulting figure in a pre-specified directory.

    view = 'x': sagittal
    view = 'y': coronal
    view = 'z': axial
    view = 'l': sagittal left hemisphere only
    view = 'r': sagittal right hemisphere only
    """
    if plot_title is None:
        display = plotting.plot_glass_brain(None, display_mode=view,
                                            black_bg=False, alpha=1.)
    else:
        display = plotting.plot_glass_brain(None, display_mode=view,
                                            black_bg=False, alpha=1.,
                                            title=plot_title)
    for roi, color in zip(rois_set, color_list):
        display.add_overlay(roi, cmap=plotting.cm.alpha_cmap(color,
                                                             alpha_min=1.))
    if view in ['x', 'l', 'r']:
        view_tag = 'sagittal'
    elif view == 'y':
        view_tag == 'coronal'
    elif view == 'z':
        view_tag = 'axial'
    # Save figure
    display.savefig(os.path.join(output_dir,
                                 fname + '_' + view_tag + '.png'), dpi=600)
Example #2
0
    def plot_data(self, con_data=None, pattern=None, contrast=True):
        """
        This function plots matching contrast or beta files
        """
        if contrast:
            if pattern:
                con_data = self.load_contrasts(pattern, return_data=True)

            if con_data is None:
                Exception("Need either con_data (loaded from load_contrasts) or regex pattern")

            for filename, con in con_data.items():
                plotting.plot_glass_brain(con, display_mode='lyrz',
                                        colorbar=True, plot_abs=False,
                                        cmap=plotting.cm.ocean_hot, title=self.id+" "+self.contrasts[filename])

        else:
            if pattern:
                beta_data = self.load_betas(pattern, return_data=True)

            if beta_data is None:
                Exception("Need either beta_data (loaded from load_betas) or regex pattern")

            for filename, beta in beta_data.items():
                plotting.plot_glass_brain(beta, display_mode='lyrz',
                                        colorbar=True, plot_abs=False,
                                        cmap=plotting.cm.ocean_hot, title=self.id+" "+self.betas[filename])
def plot_stripes_in_ic(ic_img, melmix_ic_power, melmix_ic_timecourse, nifti_img, ic,):
    fig = plt.figure()
    fig.subplots_adjust(hspace=0.45, wspace=0.1)     
    
    stripe_magnitude_per_slice = sorted([(find_biggest_stripe_in_slice(ic_img[i]), i) 
                        for i in range(ic_img.shape[0])])    
    i = 1
    for stripe_magnitude, slice_index in stripe_magnitude_per_slice[:4]:
        slice_2d = ic_img[slice_index]        
        dip_sizes, col_means, peaks, valleys, median_filtered_picture = columnwise_signal_dips(slice_2d)                
        stripe_mask = get_stripe_mask(col_means, dip_sizes, valleys)*10
        dips = get_dips(dip_sizes, col_means, valleys)
        
        ax = fig.add_subplot(4, 2, i+2)
        ax.get_yaxis().set_visible(False)
        ax.imshow(median_filtered_picture)
        ax.plot(34 - dips, color='white')
        ax.plot(34-stripe_mask, color='red')        
        plt.title('ic:%s slice:%s magnitude:%s' %(
                ic+1, slice_index, abs(np.round(np.min(dip_sizes),2))))        
        i += 1
    
    ax2 = fig.add_subplot(4, 2, 7)
    ax2.plot(melmix_ic_power)
    plt.title('Powerspectrum')
    ax3 = fig.add_subplot(4, 2, 8)
    ax3.plot(melmix_ic_timecourse)
    plt.title('Timecourse')
    
    ax0 = fig.add_subplot(4,1,1)
    plot_glass_brain(nifti_img, title=ic+1, axes=ax0)
    return fig
Example #4
0
def test_plot_glass_brain_file_output(testdata_3d, tmpdir):  # noqa:F811
    """Smoke-test for hemispheric glass brain with file output."""
    filename = str(tmpdir.join('test.png'))
    plot_glass_brain(testdata_3d['img'],
                     output_file=filename,
                     display_mode='lzry')
    plt.close()
Example #5
0
def getMapping(net, tokens, thres=.05, title_=None):
    """
	compute predicted brain response from text

	INPUT:
		- net    : a pretrained network used to predict images 
		- tokens : a list of words, will take mean embedding

	"""

    wvec = getMeanVectorRepresentation(tokens)
    #wvec -= c_vec
    wvec = Variable(
        torch.from_numpy(wvec).float())  # convert to correct format
    bimage = net(wvec).data.numpy().reshape((20, 20))

    os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')
    plotting.plot_glass_brain('trial_name.nii.gz',
                              display_mode='z',
                              threshold=1,
                              title=title_)

    my_arr_mask = np.ma.masked_where(bimage < thres, bimage)

    cmap = plt.cm.YlOrRd
    cmap.set_bad(color='white')
    limits = 100  # 90
    plt.imshow(my_arr_mask,
               extent=(-1 * limits, limits, -1 * limits, limits),
               cmap=cmap,
               vmin=min(0, thres))  #, vmax=my_arr.max())
Example #6
0
def interp_corr(locs,
                corrs,
                width=10,
                vox_size=10,
                outfile=None,
                save_nii=None):
    nii = se.load('std', vox_size=vox_size)
    full_locs = nii.get_locs().values
    W = np.exp(_log_rbf(full_locs, locs, width=width))
    interp_corrs = np.dot(corrs, W.T)
    bo_nii = se.Brain(data=interp_corrs, locs=full_locs)
    nii_bo = _brain_to_nifti(bo_nii, nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=1,
                            vmin=0)
    #ni_plt.plot_glass_brain(nii_bo, colorbar=True, threshold=None, vmax=1, vmin=0, display_mode='lyrz')

    if save_nii:
        nii_bo.save(save_nii)

    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
def main():

    # group level analysis: input wanted subject ids
    subject_ids = [1, 2, 3, 4, 5, 6]
    searchlight_radius = 2

    if len(subject_ids) == 0:
        wr.warn('No subject IDs.')

    if searchlight_radius < 0:
        wr.warn('Searchlight radius has to be non-negative.')

    path = "../CDWAssignment_data"

    bold = nib.load(f'{path}/subj{subject_ids[0]}/bold.nii.gz')
    bold_data = bold.get_fdata()

    allRSA = np.zeros((((len(subject_ids), bold_data.shape[0],
                         bold_data.shape[1], bold_data.shape[2]))))

    for index in range(len(subject_ids)):

        allRSA[index, :, :, :] = get_one_RSA(subject_ids[index], path,
                                             searchlight_radius)

    # compute mean RSA across all subjects

    mean_RSA = allRSA.mean(axis=0)

    # ------------------Make image of RSA values in ROI-----------
    RSA_plotting = nib.Nifti1Image(mean_RSA, bold.affine)
    plotting.plot_glass_brain(RSA_plotting)
    plotting.show()
Example #8
0
def semanticRelPrediction(tokens1, tokens2, norm=False):
    """
	we add the values for tokens1 and subtract the mean for tokens2! 

	INPUT:
		- tokens1 : word tokens to add
		- tokens2 : word tokens to subtract
		- norm    : should we take mean of tokens or not
	"""
    wvec_1 = getMeanVectorRepresentation(tokens1, norm=norm)
    wvec_2 = getMeanVectorRepresentation(tokens2, norm=norm)

    wvec = Variable(
        torch.from_numpy(wvec_1 - wvec_2).float())  # convert to correct format
    bimage = net(wvec).data.numpy().reshape((20, 20))
    my_arr_mask = np.ma.masked_where(bimage < 0.05, bimage)

    cmap = plt.cm.YlOrRd
    cmap.set_bad(color='white')
    limits = 100  # 90
    os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')
    plotting.plot_glass_brain('trial_name.nii.gz',
                              display_mode='z',
                              threshold=1)
    plt.imshow(my_arr_mask,
               extent=(-1 * limits, limits, -1 * limits, limits),
               cmap=cmap,
               vmin=0)  #, vmax=my_arr.max())
Example #9
0
 def _visualize(self, data, out_name):
     import numpy as np
     vmax = self.inputs.vmax
     if not isdefined(vmax):
         vmax = None
         abs_data = np.abs(data.get_fdata(dtype=np.float32))
         pctile99 = np.percentile(abs_data, 99.99)
         if abs_data.max() - pctile99 > 10:
             vmax = pctile99
     if isinstance(data, nb.Cifti2Image):
         plot_dscalar(data,
                      vmax=vmax,
                      threshold=self.inputs.threshold,
                      cmap=self.inputs.colormap,
                      output_file=out_name)
     else:
         nlp.plot_glass_brain(data,
                              colorbar=True,
                              plot_abs=False,
                              display_mode='lyrz',
                              axes=None,
                              vmax=vmax,
                              threshold=self.inputs.threshold,
                              cmap=self.inputs.colormap,
                              output_file=out_name)
Example #10
0
    def plot_sample_brain_data(real_sample_brain_img,
                               synthetic_sample_brain_img,
                               real_sample_correlation,
                               synthetic_sample_correlation,
                               output_file,
                               title=None):

        figure = plt.figure(figsize=(10, 10))
        figure.text(
            0.5,
            0.5,
            "[REAL] Average correlation with {0}\n  examples in Brainpedia: {1:.4f}"
            .format(title, real_sample_correlation),
            ha='center')
        figure.text(
            0.5,
            0.05,
            "[SYNTHETIC] Average correlation with {0}\n  examples in Brainpedia: {1:.4f}"
            .format(title, synthetic_sample_correlation),
            ha='center')

        real_brain_img_axes = plt.subplot(2, 1, 1)
        synthetic_brain_img_axes = plt.subplot(2, 1, 2)

        plotting.plot_glass_brain(real_sample_brain_img,
                                  threshold='auto',
                                  title="[REAL] " + title,
                                  axes=real_brain_img_axes)
        plotting.plot_glass_brain(synthetic_sample_brain_img,
                                  threshold='auto',
                                  title="[SYNTHETIC] " + title,
                                  axes=synthetic_brain_img_axes)

        figure.savefig(output_file)
        plt.close()
Example #11
0
 def calculate_zscore(self):
     '''
     calculates the zscore for each subject voxel based on the control mean and sd
     finds only significant voxels and saves them as "zs.nii.gz"
     '''
     self.zscores = (self.subject_data -
                     self.mean_data) / self.sd_data  # calculate zscores
     zscores = self.zscores
     zscores[np.isnan(
         zscores)] = 0  # replace nans with z scores temporarily
     # finds non significant values and replaces them with zeros for new variable:
     self.significant_zscores = np.where(
         np.abs(zscores) <= 1.96, np.nan, zscores)
     # creates nifti template:
     self.significant_zscores_nii = nib.Nifti1Image(
         self.significant_zscores, self.subject_img.affine)
     nib.save(self.significant_zscores_nii,
              'zs.nii.gz')  # save nifti template
     zs_nii_path = self.significant_zscores_nii
     plotting.plot_glass_brain(zs_nii_path,
                               threshold=1.96,
                               colorbar=True,
                               plot_abs=False,
                               output_file='Z_map.png',
                               vmax=5)
Example #12
0
def most_informative_locs_plot(df, vox_size=5, width=10, outfile=None):

    locs = compile_df_locs(df['R'])

    sub_nii = se.load('std', vox_size=vox_size)
    sub_locs = sub_nii.get_locs().values

    point_tree = spatial.cKDTree(locs)

    most_info = np.array([])

    z_df = df.copy(deep=True)
    z_df['Correlation'] = r2z(z_df['Correlation'])
    for l in sub_locs:
        most_info = np.append(
            most_info,
            z_df['Correlation'][point_tree.query_ball_point(l, width)].mean())

    bo_nii = se.Brain(data=np.atleast_2d(z2r(most_info)), locs=sub_locs)
    nii_bo = se.helpers._brain_to_nifti(bo_nii, sub_nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=1,
                            vmin=0,
                            display_mode='lyrz')
    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
Example #13
0
def density_within_r_plot(locs, r, vox_size=4, outfile=None):

    nii = se.load('std', vox_size=vox_size)
    full_locs = nii.get_locs().values
    point_tree = spatial.cKDTree(locs)
    density_locs = np.array([])

    for l in locs:
        density_locs = np.append(
            density_locs,
            np.divide(len(point_tree.query_ball_point(l, r)),
                      np.shape(locs)[0]))

    bo_nii = se.Brain(data=np.atleast_2d(density_locs), locs=locs)
    nii_bo = se.helpers._brain_to_nifti(bo_nii, nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=.1,
                            vmin=0)

    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
Example #14
0
 def _visualize(self, data, out_name):
     nlp.plot_glass_brain(data,
                          colorbar=True,
                          plot_abs=False,
                          display_mode='lyrz',
                          axes=None,
                          output_file=out_name)
def plot_bw_coeffs(coeffs, affine, title, base_brightness=.7, cmap=None, output_file=None, black_bg=False):
    def isstr(s): return isinstance(s, str)

    def default_cmap():
        invert_if_black_bg = lambda v: (1 - v) if black_bg else v
        base_brightness_local = invert_if_black_bg(base_brightness)
        end_brightness = invert_if_black_bg(0)
        avg = np.average([base_brightness_local, end_brightness])
        c_range = ((0, base_brightness_local, base_brightness_local),
                   (.33, base_brightness_local, avg),
                   (.67, avg, end_brightness),
                   (1, end_brightness, end_brightness))
        c_dict = {r: c_range for r in ['red', 'green', 'blue']}
        cmap_name = 'bright_bw'
        cmap = LinearSegmentedColormap(cmap_name, c_dict)
        plt.register_cmap(cmap=cmap)
        return cmap

    cmap = plt.get_cmap(cmap) if isstr(cmap) else default_cmap() if cmap is None else cmap

    plot_glass_brain(nib.Nifti1Image(coeffs, affine=affine),
                     title=title,
                     black_bg=black_bg,
                     colorbar=True,
                     output_file=output_file,
                     cmap=cmap,
                     alpha=.15)
Example #16
0
def test_add_markers_using_plot_glass_brain():
    """Tests for adding markers through plot_glass_brain."""
    fig = plot_glass_brain(None)
    coords = [(-34, -39, -9)]
    fig.add_markers(coords)
    fig.close()
    # Add a single marker in right hemisphere such that no marker
    # should appear in the left hemisphere when plotting
    display = plot_glass_brain(None, display_mode='lyrz')
    display.add_markers([[20, 20, 20]])
    # Check that Axe 'l' has no marker
    assert (display.axes['l'].ax.collections[0].get_offsets().data.shape == (
        0, 2))
    # Check that all other Axes have one marker
    for d in 'ryz':
        assert (display.axes[d].ax.collections[0].get_offsets().data.shape == (
            1, 2))
    # Add two markers in left hemisphere such that no marker
    # should appear in the right hemisphere when plotting
    display = plot_glass_brain(None, display_mode='lyrz')
    display.add_markers([[-20, 20, 20], [-10, 10, 10]],
                        marker_color=['r', 'b'])
    # Check that Axe 'r' has no marker
    assert (display.axes['r'].ax.collections[0].get_offsets().data.shape == (
        0, 2))
    # Check that all other Axes have two markers
    for d in 'lyz':
        assert (display.axes[d].ax.collections[0].get_offsets().data.shape == (
            2, 2))
Example #17
0
def __main__():
    volume = image.index_img("E:\\Users\\Niall\\Documents\\Computer Science\\FinalYearProject\\data\\ds105_raw\\ds105\\sub001\\BOLD\\task001_run001\\bold.nii.gz", 0)
    smoothed_img = image.smooth_img(volume, fwhm=5)

    # print("Read the images");

    plotting.plot_glass_brain(volume, title='plot_glass_brain',
    black_bg=True, display_mode='xz')
    plotting.plot_glass_brain(volume, title='plot_glass_brain',
    black_bg=False, display_mode='xz')

    plt.show()

    # print("Finished");



    #gemerate some numbers
    t = np.linspace(1, 10, 2000)  # 2000 points between 1 and 10
    t

    #plot the graph
    plt.plot(t, np.cos(t))
    plt.ylabel('Subject Response')
    plt.show()
Example #18
0
def density_by_voxel_plot(locs, r=20, vox_size=4, outfile=None, save_nii=None):

    sub_nii = se.load('std', vox_size=4)
    sub_locs = sub_nii.get_locs().values

    point_tree = spatial.cKDTree(locs)
    density_locs = np.array([])

    for l in sub_locs:
        density_locs = np.append(
            density_locs,
            np.divide(len(point_tree.query_ball_point(l, r)),
                      np.shape(locs)[0]))

    bo_nii = se.Brain(data=np.atleast_2d(density_locs), locs=sub_locs)
    nii_bo = se.helpers._brain_to_nifti(bo_nii, sub_nii)
    ni_plt.plot_glass_brain(nii_bo,
                            colorbar=True,
                            threshold=None,
                            vmax=.1,
                            vmin=0,
                            display_mode='lyrz')

    if save_nii:
        nii_bo.save(save_nii)

    if not outfile is None:
        plt.savefig(outfile)
    else:
        plt.show()
Example #19
0
def univar_stats(Y, X, path_prefix, mask_img):
    contrasts = [1] + [0] * (X.shape[1] - 1)
    mod = mulm.MUOLS(Y, X)
    tvals, pvals, df = mod.fit().t_test(contrasts, pval=True, two_tailed=True)

    print([[thres,
            np.sum(pvals < thres),
            np.sum(pvals < thres) / pvals.size]
           for thres in 10.**np.array([-4, -3, -2])])
    # {'voxsize': 1.5, 'smoothing': 0, 'target': 'dx_num'}
    # [[0.0001, 23068, 0.058190514149063371], [0.001, 47415, 0.11960738808643315], [0.01, 96295, 0.24291033292804132]]

    tstat_arr = np.zeros(mask_arr.shape)
    pvals_arr = np.zeros(mask_arr.shape)

    pvals_arr[mask_arr] = -np.log10(pvals[0])
    tstat_arr[mask_arr] = tvals[0]

    pvals_img = nibabel.Nifti1Image(pvals_arr, affine=mask_img.affine)
    pvals_img.to_filename(path_prefix + "_log10pvals.nii.gz")

    tstat_img = nibabel.Nifti1Image(tstat_arr, affine=mask_img.affine)
    tstat_img.to_filename(path_prefix + "_tstat.nii.gz")

    threshold = 3
    fig = plt.figure(figsize=(13.33, 7.5 * 4))
    ax = fig.add_subplot(411)
    ax.set_title("-log pvalues >%.2f" % threshold)
    plotting.plot_glass_brain(pvals_img,
                              threshold=threshold,
                              figure=fig,
                              axes=ax)

    ax = fig.add_subplot(412)
    ax.set_title("T-stats T>%.2f" % threshold)
    plotting.plot_glass_brain(tstat_img,
                              threshold=threshold,
                              figure=fig,
                              axes=ax)

    ax = fig.add_subplot(413)
    ax.set_title("-log pvalues >%.2f" % threshold)
    plotting.plot_stat_map(pvals_img,
                           colorbar=True,
                           draw_cross=False,
                           threshold=threshold,
                           figure=fig,
                           axes=ax)

    ax = fig.add_subplot(414)
    ax.set_title("T-stats T>%.2f" % threshold)
    plotting.plot_stat_map(tstat_img,
                           colorbar=True,
                           draw_cross=False,
                           threshold=threshold,
                           figure=fig,
                           axes=ax)
    plt.savefig(path_prefix + "_tstat.png")

    return tstat_arr, pvals_arr
Example #20
0
def save_glassbrain(outputfile, inputlist, binarize=False, colormap="cold_white_hot", parcellation_file=None):
    avgdata=None
    imgshape=None

    for i in inputlist:
        imgdata=load_input(i)
        imgdata[np.isnan(imgdata)]=0
        if binarize:
            imgdata=(imgdata!=0).astype(np.float32)
        if avgdata is None:
            avgdata=imgdata
            imgshape=imgdata.shape
        else:
            if imgshape != imgdata.shape:
                return None
            avgdata+=imgdata
    
    avgdata/=len(inputlist)
    
    if parcellation_file is None:
        refimg=nib.load(inputlist[0])
    else:
        refimg=nib.load(parcellation_file)
        parcvol=refimg.get_fdata()
        avgdata=parcellation_to_volume(avgdata,parcvol)
    
    
    imgavg=nib.Nifti1Image(avgdata,affine=refimg.affine, header=refimg.header)
    plotting.plot_glass_brain(imgavg,output_file=outputfile,cmap=colormap,colorbar=True)
    
    return imgshape
def produce_figures(nii_file, template, type_of_correction, t_thresh, c_thresh, n_cuts):
    """
    Produce the output figures

    Args:
        nii_file: (str) path to the nifti file (generated at previous steps)
        template: (str) path to template used for the stat map plot
        type_of_correction: (str) Can be either FWE or FDR (used only in potential figure titles)
        t_thresh: (str) t value threshold used (used only in potential figure titles)
        c_thresh: (int) cluster minimal size used (used only in potential figure titles)
        n_cuts: (int) number of cuts in fig

    Returns:
        List of path to image files: glass brain, statmap along x, statmap along y, statmap along z
    """
    from nilearn import plotting
    import numpy as np
    from os.path import abspath

    assert type_of_correction in ['FWE', 'FDR'], 'Type of correction must be FWE or FDR'
    if not np.isnan(c_thresh):
        correction = 'Cluster'
    else:
        correction = 'Peak'

    my_title = correction + ' correction ' + type_of_correction + ' Threshold = ' + str(t_thresh)
    if not np.isnan(c_thresh):
        my_title = my_title + ' - min cluster size = ' + str(c_thresh),

    plotting.plot_glass_brain(nii_file,
                              output_file='./glass_brain.png')

    plotting.plot_stat_map(nii_file,
                           display_mode='x',
                           cut_coords=np.linspace(-70, 67, n_cuts),
                           bg_img=template,
                           colorbar=False,
                           draw_cross=True,
                           output_file='./statmap_x.png')

    plotting.plot_stat_map(nii_file,
                           display_mode='y',
                           cut_coords=np.linspace(-104, 69, n_cuts),
                           bg_img=template,
                           colorbar=False,
                           draw_cross=True,
                           output_file='./statmap_y.png')

    plotting.plot_stat_map(nii_file,
                           display_mode='z',
                           cut_coords=np.linspace(-45, 78, n_cuts),
                           bg_img=template,
                           colorbar=False,
                           draw_cross=True,
                           output_file='./statmap_z.png')

    return [abspath('./glass_brain.png'),
            abspath('./statmap_x.png'),
            abspath('./statmap_y.png'),
            abspath('./statmap_z.png')]
Example #22
0
def plot_activation_by_ID(identifier):
	localizer_dataset = datasets.fetch_localizer_contrasts(
		[identifier],
		 n_subjects=2,
		get_tmaps=True)
	localizer_tmap_filename = localizer_dataset.tmaps[1]
	plotting.plot_glass_brain(localizer_tmap_filename,threshold=3)
Example #23
0
def plot_stat_maps_grid(stat_maps, labels=None, threshold=None):
    '''Plots grid of statical maps.
    
    Args:
        stat_maps (list): List of nibabel.nifti1.Nifti1Image first level output statistical images.
        labels (list, optional): List of titles for grid images. Defaults to integers (1, 2, 3...)
        threshold (float, optional): Threshold for plotting. If nothing is passed, image will not be
            thresholded.    
    '''
    n = len(stat_maps)
    n_rows = (n - 1) // 4 + 1

    if labels is None:
        labels = [str(i) for i in range(n)]

    fig, ax = plt.subplots(nrows=n_rows,
                           ncols=4,
                           facecolor='k',
                           figsize=(15, 4 * n_rows))

    for cidx, stat_map in enumerate(stat_maps):
        if n_rows == 1:
            axes = ax[cidx]
        else:
            axes = ax[int(cidx / 4)][int(cidx % 4)]
        plotting.plot_glass_brain(stat_map,
                                  colorbar=False,
                                  threshold=threshold,
                                  title=labels[cidx],
                                  axes=axes,
                                  plot_abs=False,
                                  black_bg=True,
                                  display_mode='z')
Example #24
0
def plotBrain(objIn, how='full', thr=None, **kwargs):
    """
    More complete brain plotting of a Brain_Data instance
    Args:
        obj: (Brain_Data) object to plot
        how: (str) whether to plot a glass brain 'glass', 3 view-multi-slice mni 'mni', or both 'full'
        thr: (str/float) thresholding of image. Can be string for percentage, or float for data units (see Brain_Data.threshold()
        kwargs: optionals args to nilearn plot functions (e.g. vmax)

    """
    if thr:
        obj = objIn.threshold(thr)
    else:
        obj = objIn.copy()

    views = ['x', 'y', 'z']
    coords = [range(-50, 51, 8),
              range(-80, 50, 10),
              range(-40, 71, 9)]  #[-88,-72,-58,-38,-26,8,20,34,46]
    cmap = 'RdBu_r'

    if thr is None:
        print("Plotting unthresholded image")
    elif type(thr) == str:
        print("Plotting top %s of voxels" % thr)
    elif type(thr) == float or type(thr) == int:
        print("Plotting voxels with stat value >= %s" % thr)

    if how == 'full':
        plot_glass_brain(obj.to_nifti(),
                         display_mode='lzry',
                         colorbar=True,
                         cmap=cmap,
                         plot_abs=False,
                         **kwargs)
        for v, c in zip(views, coords):
            plot_stat_map(obj.to_nifti(),
                          cut_coords=c,
                          display_mode=v,
                          cmap=cmap,
                          bg_img=resolve_mni_path(MNI_Template)['brain'],
                          **kwargs)
    elif how == 'glass':
        plot_glass_brain(obj.to_nifti(),
                         display_mode='lzry',
                         colorbar=True,
                         cmap=cmap,
                         plot_abs=False,
                         **kwargs)
    elif how == 'mni':
        for v, c in zip(views, coords):
            plot_stat_map(obj.to_nifti(),
                          cut_coords=c,
                          display_mode=v,
                          cmap=cmap,
                          bg_img=resolve_mni_path(MNI_Template)['brain'],
                          **kwargs)
    del obj  #save memory
    return
Example #25
0
    def openBold(self):
        file = self.onOpen([('NIFTI files', '*.nii.gz'), ('All files', '*')])
        print("anatomy file: " + file)

        bold = image.index_img(file, 0)

        plotting.plot_glass_brain(bold, title='glass_brain',
    black_bg=True, display_mode='ortho')
        plt.show()
Example #26
0
def test_plot_glass_brain(testdata_3d, tmpdir):  # noqa:F811
    """Smoke tests for plot_glass_brain with colorbar and negative values."""
    img = testdata_3d['img']
    plot_glass_brain(img, colorbar=True, resampling_interpolation='nearest')
    # test plot_glass_brain with negative values
    plot_glass_brain(img,
                     colorbar=True,
                     plot_abs=False,
                     resampling_interpolation='nearest')
def plotDifferenceInDownSample(pid, saveFig=False):
    """
	we plot the original image and the downsampled image (which is used to train the DNN)

	INPUT:
		- pid: publication ID
		- saveFig: should we save the figure
	"""

    orig_img = Get_2d_smoothed_activation(
        np.array(res[res.keys()[pid]]['MNI']), kernelSize)
    down_samp = downsample2d(orig_img, downsampleSize)

    # plot original:
    plotting.plot_glass_brain(brainMapLoc + 'trial_name.nii.gz',
                              display_mode='z',
                              threshold=1,
                              title=title_)
    my_arr_mask = np.ma.masked_where(orig_img < .001, orig_img)

    cmap = plt.cm.YlOrRd
    cmap.set_bad(color='white')
    limits = 100  # 90
    plt.imshow(my_arr_mask,
               extent=(-1 * limits, limits, -1 * limits, limits),
               cmap=cmap,
               vmin=min(0, thres))  #, vmax=my_arr.max())

    if saveFig:
        os.chdir(figureLoc)
        plt.savefig('orig_img_' + str(pid) + '.png')

    # plot downsampled:
    plotting.plot_glass_brain(brainMapLoc + 'trial_name.nii.gz',
                              display_mode='z',
                              threshold=1,
                              title=title_)
    my_arr_mask = np.ma.masked_where(down_samp < .001, down_samp)

    cmap = plt.cm.YlOrRd
    cmap.set_bad(color='white')
    limits = 100  # 90
    plt.imshow(my_arr_mask,
               extent=(-1 * limits, limits, -1 * limits, limits),
               cmap=cmap,
               vmin=min(0, thres))  #, vmax=my_arr.max())

    if saveFig:
        os.chdir(figureLoc)
        plt.savefig('downsamp_img_' + str(pid) + '.png')

        # combine both figures:
        os.system('convert +append orig_img_' + str(pid) +
                  '.png downsamp_img_' + str(pid) + '.png preproc_pid_' +
                  str(pid) + '.png')
        os.system('rm orig_img_' + str(pid) + '.png')
        os.system('rm downsamp_img_' + str(pid) + '.png')
Example #28
0
 def visualize(self, c=None):
     if c != None:
         # plot_stat_map(c)
         # show()
         plotting.plot_glass_brain(c)
         show()
     else:
         # plotting.plot_stat_map('sub-320/ses-FU/anat/sub-320_ses-FU_T1w.nii.gz')
         plotting.plot_stat_map(
             'mri_data/sub-/ses-FU/anat/sub-133_ses-FU_T1w.nii.gz')
         show()
def plot_coeffs(coeffs, affine, neg_disp=.8, pos_disp=.8, **kwargs):

    def default_cmap():
        max_neg_coeff = np.abs(np.min(coeffs))
        max_pos_coeff = np.max(coeffs)
        max_coeff = np.max((max_neg_coeff, max_pos_coeff))

        dev = 0.5

        neg_dev = dev * max_neg_coeff/max_coeff
        pos_dev = dev * max_pos_coeff/max_coeff

        zero = 0.5
        max_neg = zero - neg_dev
        max_pos = zero + pos_dev

        na_clr = .5
        na_start = (0.0, na_clr, na_clr)
        na_end = (1.0, na_clr, na_clr)

        blue_red_bp_map = {
            'red': (
                na_start,
                (max_neg, na_clr, 0.0),
                (zero, 0.0, 1.0),
                (max_pos, 1.0, na_clr),
                na_end
            ),
            'blue': (
                na_start,
                (max_neg, na_clr, 0.0),
                (zero - neg_disp*neg_dev, 1.0, 1.0),
                (zero, 1.0, 0.0),
                (max_pos, 0.0, na_clr),
                na_end
            ),
            'green': (
                na_start,
                (max_neg, na_clr, 1.0),
                (zero - neg_disp*neg_dev, 1.0, 1.0),
                (zero, 0.0, 0.0),
                (zero + pos_disp*pos_dev, pos_disp, pos_disp),
                (max_pos, 1.0, na_clr),
                na_end
            )
            }

        name = 'BlueRedBiPolar'
        return LinearSegmentedColormap(name, blue_red_bp_map)

    img = nib.Nifti1Image(coeffs, affine=affine)
    kwargs['cmap'] = default_cmap()
    plot_glass_brain(img, **kwargs)
Example #30
0
def ComparePredictionl1Network( im_id ):
	"""
	compare predictions for a given abstract 
	INPUT:
		- im_id: abstract id (starting at 0, ending at 8096)
	"""

	# plot true activation first:
	os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')
	plotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='True')
	bimage_true = myData[im_id]['image'].reshape((20,20))
	my_arr_mask = np.ma.masked_where(bimage_true < 0.05, bimage_true)
	cmap = plt.cm.YlOrRd
	cmap.set_bad(color='white')
	limits = 100 # 90 
	plt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())
	os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')
	plt.savefig('activation_id' + str(im_id) + '_true.png')

	# plot l1 network activation next:
	os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')
	plotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='L1')
	wvec = Variable( torch.from_numpy( test_loader.dataset[im_id]['wordVector'] ).float())
	wvec = wvec.resize(1,200)
	bimage = net( wvec ).data.numpy().reshape((20,20))
	my_arr_mask = np.ma.masked_where(bimage < 0.05, bimage)
	cmap = plt.cm.YlOrRd
	cmap.set_bad(color='white')
	limits = 100 # 90 
	plt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())
	os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')
	plt.savefig('activation_id' + str(im_id) + '_l1.png')

	# finally, the L2 network
	#os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')
	#plotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='L2')
	#wvec = Variable( torch.from_numpy( test_loader.dataset[im_id]['wordVector'] ).float())
	#wvec = wvec.resize(1,200)
	#bimage = net_l2( wvec ).data.numpy().reshape((20,20))
	#my_arr_mask = np.ma.masked_where(bimage < 0.05, bimage)
	#cmap = plt.cm.YlOrRd
	#cmap.set_bad(color='white')
	#limits = 100 # 90 
	#plt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())
	#os.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')
	#plt.savefig('activation_id' + str(im_id) + '_l2.png')

	#os.system('convert +append activation_id'+str(im_id)+'_true.png activation_id'+str(im_id)+'_l1.png activation_id'+str(im_id)+'_l2.png res_id'+str(im_id)+'.png')
	os.system('convert +append activation_id'+str(im_id)+'_true.png activation_id'+str(im_id)+'_l1.png PredictionComparison_id'+str(im_id)+'.png')
	# remove older files
	os.system('rm *_true.png')
	os.system('rm *_l1.png')
Example #31
0
 def _visualize(self, data, out_name):
     vmax = self.inputs.vmax
     if not isdefined(vmax):
         vmax = None
     nlp.plot_glass_brain(data,
                          colorbar=True,
                          plot_abs=False,
                          display_mode='lyrz',
                          axes=None,
                          vmax=vmax,
                          threshold=self.inputs.threshold,
                          cmap=self.inputs.colormap,
                          output_file=out_name)
Example #32
0
 def glass_brains(self, data, data_type, map_index):
     data, title, threshold, output_folder = self.prepare_plots(
         data, data_type, map_index, "Glass_Brain")
     plotting.plot_glass_brain(
         stat_map_img=data,
         black_bg=True,
         plot_abs=False,
         display_mode='lzry',
         title=title,
         threshold=threshold,
         annotate=True,
         output_file=(output_folder + 'feature_' + str(map_index) + '-' +
                      self.category + '_category.png')
     )  # Plot feature map using nilearn glass brain - Original threshold = (mean_value + (std_value*2))
def do_single_subject(rootdir, subj, matrices):
    fmri_filenames = get_fmri_files_from_subject(rootdir, subj)
    fmri_runs = [masker.transform(f) for f in fmri_filenames]

    loglabel = subj
    logcsvwriter = csv.writer(open("test.log", "a+"))

    r2train, r2test = compute_crossvalidated_r2(fmri_runs, matrices, loglabel,
                                                logcsvwriter)

    r2train_img = masker.inverse_transform(r2train)
    nib.save(r2train_img, f'train_{subj:03d}.nii.gz')

    r2test_img = masker.inverse_transform(r2test)
    nib.save(r2test_img, f'test_{subj:03d}.nii.gz')

    # compute the increase in R2 due to each predictor
    # for reg in range(MATRICES[0].shape[1]):
    #     """ remove one predictor from the design matrix in test to compare it with the full model """
    #     new_design_matrices = [np.delete(mtx, reg, 1) for mtx in MATRICES]
    #     r2train_dropped, r2test_dropped = compute_crossvalidated_r2(fmri_runs, new_design_matrices, loglabel, logcsvwriter)

    #     nib.save(masker.inverse_transform(r2train_dropped),
    #              'train_dropping_{}_{:03}.nii'.format(reg,subject))

    #     nib.save(masker.inverse_transform(r2test_dropped),
    #              'test_dropping_{}_{:03}.nii'.format(reg,subject))

    #     r2train_difference = r2train - r2train_dropped
    #     nib.save(masker.inverse_transform(r2train_difference),
    #              'train_r2_increase_when_adding_{}_{:03}.nii'.format(reg,subject))

    #     r2test_difference = r2test - r2test_dropped
    #     nib.save(masker.inverse_transform(r2test_difference),
    #              'test_r2_increase_when_adding_{}_{:03}.nii'.format(reg,subject))

    display = plot_glass_brain(r2test_img,
                               display_mode='lzry',
                               threshold=0,
                               colorbar=True)
    display.savefig(f'test_{subj:03}.png')
    display.close()

    display = plot_glass_brain(r2train_img,
                               display_mode='lzry',
                               threshold=0,
                               colorbar=True)
    display.savefig(f'train_{subj:03}.png')
    display.close()
Example #34
0
def plot_brainrsa_glass(img, threshold=None, type='r'):
    """
    Plot the 2-D projection of the RSA-result

    Parameters
    ----------
    img : string
        The file path of the .nii file of the RSA results.
    threshold : None or int. Default is None.
        The threshold of the number of voxels used in correction.
        If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
        None, the threshold-correction will not work.
    type : string 'r' or 't'
        The type of result (r-values or t-values).
    """

    imgarray = nib.load(img).get_fdata()

    if (imgarray == np.nan).all() == True:

        print("No Valid Results")

    else:
        if threshold != None:

            imgarray = nib.load(img).get_fdata()
            affine = get_affine(img)
            imgarray = correct_by_threshold(imgarray, threshold)
            img = nib.Nifti1Image(imgarray, affine)

        if type == 'r':
            plotting.plot_glass_brain(img,
                                      colorbar=True,
                                      title="Similarity",
                                      black_bg=True,
                                      draw_cross=True,
                                      vmax=1)
        if type == 't':
            plotting.plot_glass_brain(img,
                                      colorbar=True,
                                      title="Similarity",
                                      black_bg=True,
                                      draw_cross=True,
                                      vmax=7)

        plt.show()

    return 0
Example #35
0
def generate_glassbrain_image(image_pk):
    from neurovault.apps.statmaps.models import Image
    import neurovault
    import matplotlib as mpl
    mpl.rcParams['savefig.format'] = 'jpg'
    my_dpi = 50
    fig = plt.figure(figsize=(330.0/my_dpi, 130.0/my_dpi), dpi=my_dpi)
    
    img = Image.objects.get(pk=image_pk)    
    f = BytesIO()
    try:
        glass_brain = plot_glass_brain(img.file.path, figure=fig)
        glass_brain.savefig(f, dpi=my_dpi)
    except:
        # Glass brains that do not produce will be given dummy image
        this_path = os.path.abspath(os.path.dirname(__file__))
        f = open(os.path.abspath(os.path.join(this_path,
                                              "static","images","glass_brain_empty.jpg"))) 
        raise
    finally:
        plt.close('all')
        f.seek(0)
        content_file = ContentFile(f.read())
        img.thumbnail.save("glass_brain_%s.jpg" % img.pk, content_file)
        img.save()
Example #36
0
    def plot_reconstruction_diff(self, block, filename='', show=True, t=0,
                                 plot_abs=False, labeler=lambda b: None,
                                 zscore_bound=3, **kwargs):
        if filename == '' and t is None:
            filename = '%s-%s_ntfa_reconstruction_diff.pdf'
            filename = filename % (self.common_name(), str(block))
        elif filename == '':
            filename = '%s-%s_ntfa_reconstruction_diff_tr%d.pdf'
            filename = filename % (self.common_name(), str(block), t)

        image_slice, diff = self.reconstruction_diff(block, t=t,
                                                     zscore_bound=zscore_bound)
        plot = niplot.plot_glass_brain(
            image_slice, plot_abs=plot_abs, colorbar=True, symmetric_cbar=False,
            title=utils.title_brain_plot(block, self._blocks[block], labeler, t,
                                         'Squared Residual'),
            vmin=0, vmax=zscore_bound ** 2, **kwargs,
        )

        logging.info(
            'Reconstruction Error (Frobenius Norm): %.8e out of %.8e',
            np.linalg.norm(diff.sqrt().numpy()),
            np.linalg.norm(self.voxel_activations[block].numpy())
        )

        if filename is not None:
            plot.savefig(filename)
        if show:
            niplot.show()

        return plot
Example #37
0
def generate_glassbrain_image(image_pk):
    from neurovault.apps.statmaps.models import Image
    import matplotlib as mpl
    mpl.rcParams['savefig.format'] = 'jpg'
    my_dpi = 50
    fig = plt.figure(figsize=(330.0 / my_dpi, 130.0 / my_dpi), dpi=my_dpi)

    img = Image.objects.get(pk=image_pk)
    f = BytesIO()
    try:
        glass_brain = plot_glass_brain(img.file.path, figure=fig)
        glass_brain.savefig(f, dpi=my_dpi)
    except:
        # Glass brains that do not produce will be given dummy image
        this_path = os.path.abspath(os.path.dirname(__file__))
        f = open(
            os.path.abspath(
                os.path.join(this_path, "static", "images",
                             "glass_brain_empty.jpg")))
        raise
    finally:
        plt.close('all')
        f.seek(0)
        content_file = ContentFile(f.read())
        img.thumbnail.save("glass_brain_%s.jpg" % img.pk, content_file)
        img.save()
Example #38
0
def make_glassbrain_image(nifti_file,png_img_file=None):
    """Make glassbrain image, optional save image to png file (not vector)"""
    nifti_file = str(nifti_file)
    glass_brain = plot_glass_brain(nifti_file)
    if png_img_file:    
        glass_brain.savefig(png_img_file)
    plt.close('all')
    return glass_brain
Example #39
0
def generate_images(components_img, n_components, images_dir, glass=False):
    # Remove existing images
    if os.path.exists(images_dir):
        shutil.rmtree(images_dir)
    os.makedirs(images_dir)
    output_filenames = [osp.join(images_dir, 'IC_{}.png'.format(i))
                        for i in range(n_components)]

    for i, output_file in enumerate(output_filenames):
        plot_stat_map(nibabel.Nifti1Image(components_img.get_data()[..., i],
                                          components_img.get_affine()),
                      display_mode="z", title="IC %d" % i, cut_coords=7,
                      colorbar=False, output_file=output_file)
    if glass:
        output_filenames = [osp.join(images_dir, 'glass_IC_{}.png'.format(i))
                            for i in range(n_components)]
        for i, output_file in enumerate(output_filenames):
            plot_glass_brain(nibabel.Nifti1Image(
                components_img.get_data()[..., i],
                components_img.get_affine()),
                display_mode="ortho", title="IC %d" % i,
                             output_file=output_file)
Example #40
0
def glassbrain_allcontrasts(path, title, mode='uncorrected',
    cluster_threshold=50):
    ''' For each SPM contrast from a Nipype workflow (`path` points to the base
    directory), generates a glass brain figure with the corresponding
    thresholded map.

    `mode` can be either 'uncorrected' (p<0.001, T>3.1, F>4.69)
                      or 'FWE' (p<0.05, T>4.54, F>8.11).
    `title` is the title displayed on the plot.'''

    nodes = [pickle.load(gzip.open(osp.join(path, e, '_node.pklz'), 'rb'))
        for e in ['modeldesign', 'estimatemodel','estimatecontrasts']]
    _, _, node = nodes

    spm_mat_file = osp.join(node.output_dir(), 'SPM.mat')
    for i in range(1, len(node.inputs.contrasts)+1):
        output_dir = osp.join(path, node._id)

        img = glob(osp.join(output_dir, 'spm*_00%02d.nii'%i))[0]
        contrast_type = osp.split(img)[-1][3]
        print img, contrast_type
        contrast_name = node.inputs.contrasts[i-1][0]

        thresholded_map1, threshold1 = map_threshold(img, threshold=0.001,
            cluster_threshold=cluster_threshold)
        if mode == 'uncorrected':
            threshold1 = 3.106880 if contrast_type == 'T' else 4.69
            pval_thresh = 0.001
        elif mode == 'FWE':
            threshold1 = 4.5429 if contrast_type == 'T' else 8.1101
            pval_thresh = 0.05

        plotting.plot_glass_brain(thresholded_map1, colorbar=True, black_bg=True,
            display_mode='ortho', threshold=threshold1,
            title='(%s) %s - %s>%.02f - p<%s (%s)'
            %(title, contrast_name, contrast_type, threshold1, pval_thresh,
            mode))
Example #41
0
def create_glassbrain_image(self, mlmodel_id):
    from nilearn.plotting import plot_glass_brain
    import pylab as plt

    model = MLModel.query.get(mlmodel_id)
    if not model:
        return

    my_dpi = 50
    fig = plt.figure(figsize=(330.0/my_dpi, 130.0/my_dpi), dpi=my_dpi)

    output_dir = model_dir(mlmodel_id)
    stat_map_img = os.path.join(output_dir, model.output_data['weightmap'])

    glass_brain = plot_glass_brain(stat_map_img, figure=fig)

    glass_brain_filename = 'glassbrain.png'
    glass_brain_path = os.path.join(output_dir, glass_brain_filename)
    glass_brain.savefig(glass_brain_path, dpi=my_dpi)

    model.output_data = dict(glassbrain=glass_brain_filename,
                             **model.output_data)
    db.session.commit()
Example #42
0
p001_unc = norm.isf(0.001)

############################################################################
# Prepare figure for concurrent plot of individual maps
from nilearn import plotting
import matplotlib.pyplot as plt

fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(8, 4.5))
model_and_args = zip(models, models_run_imgs, models_events, models_confounds)
for midx, (model, imgs, events, confounds) in enumerate(model_and_args):
    # fit the GLM
    model.fit(imgs, events, confounds)
    # compute the contrast of interest
    zmap = model.compute_contrast('language-string')
    plotting.plot_glass_brain(zmap, colorbar=False, threshold=p001_unc,
                              title=('sub-' + model.subject_label),
                              axes=axes[int(midx / 5), int(midx % 5)],
                              plot_abs=False, display_mode='x')
fig.suptitle('subjects z_map language network (unc p<0.001)')
plotting.show()

#########################################################################
# Second level model estimation
# -----------------------------
# We just have to provide the list of fitted FirstLevelModel objects
# to the SecondLevelModel object for estimation. We can do this because
# all subjects share a similar design matrix (same variables reflected in
# column names)
from nistats.second_level_model import SecondLevelModel
second_level_input = models

#########################################################################
###############################################################################
# Let us now retrieve a motor task contrast map
# corresponding to a group one-sample t-test

motor_images = datasets.fetch_neurovault_motor_task()
stat_img = motor_images.images[0]
# stat_img is just the name of the file that we downloded
print(stat_img)

###############################################################################
# Demo glass brain plotting
# --------------------------
from nilearn import plotting

# Whole brain sagittal cuts and map is thresholded at 3
plotting.plot_glass_brain(stat_img, threshold=3)


###############################################################################
# With a colorbar
plotting.plot_glass_brain(stat_img, threshold=3, colorbar=True)


###############################################################################
# Black background, and only the (x, z) cuts
plotting.plot_glass_brain(stat_img, title='plot_glass_brain',
                          black_bg=True, display_mode='xz', threshold=3)


###############################################################################
# Plotting the sign of the activation with plot_abs to False
plotting.plot_stat_map(localizer.cmaps[3], bg_img=localizer.anats[3],
                       threshold=3, title="plot_stat_map",
                       cut_coords=(36, -27, 66))

# Plotting anatomical maps
plotting.plot_anat(haxby.anat[0], title="plot_anat")

# Plotting ROIs (here the mask)
plotting.plot_roi(haxby.mask_vt[0], bg_img=haxby.anat[0], title="plot_roi")

# Plotting EPI haxby
mean_haxby_img = image.mean_img(haxby.func[0])
plotting.plot_epi(mean_haxby_img, title="plot_epi")

# Plotting glass brain
plotting.plot_glass_brain(localizer.tmaps[3], title='plot_glass_brain',
                          threshold=3)

plotting.plot_glass_brain(localizer.tmaps[3], title='plot_glass_brain',
                          black_bg=True, display_mode='xz', threshold=3)

###############################################################################
# demo the different display_mode

plotting.plot_stat_map(localizer.cmaps[3], display_mode='ortho',
                       cut_coords=(36, -27, 60),
                       title="display_mode='ortho', cut_coords=(36, -27, 60)")

plotting.plot_stat_map(localizer.cmaps[3], display_mode='z', cut_coords=5,
                       title="display_mode='z', cut_coords=5")

plotting.plot_stat_map(localizer.cmaps[3], display_mode='x', cut_coords=(-36, 36),
"""


###############################################################################
# Retrieve the data
from nilearn import datasets

localizer_dataset = datasets.fetch_localizer_button_task()
localizer_tmap_filename = localizer_dataset.tmaps[0]

###############################################################################
# Demo glass brain plotting.
from nilearn import plotting

# Whole brain sagittal cuts
plotting.plot_glass_brain(localizer_tmap_filename, threshold=3)


###############################################################################
# With a colorbar
plotting.plot_glass_brain(localizer_tmap_filename, threshold=3, colorbar=True)


###############################################################################
# Black background, and only the (x, z) cuts
plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain',
                          black_bg=True, display_mode='xz', threshold=3)


###############################################################################
# Plotting the sign of the activation
Example #46
0
    maskfile = "/home/edogerde/Bureau/Atlas_plt_roi"
    when =[163, 164, 101, 61, 46, 62, 47, 68]
    heure = [163, 102, 164, 61, 68, 67, 62, 44, 48]
    Journalier = [161, 162, 47, 45, 101, 102, 163, 48, 62, 166]
    composante2 = [163, 101,102, 61, 67, 68, 62, 48, 47, 46, 43, 161, 89, 90, 165,166]


    mask, affine = get_roi_mask(roi_nii, 1.)
    mask = np.zeros(mask.shape[0:3])
    labels = when
    network_name = 'network'
    for label_number in labels:
        m, _ = get_roi_mask(roi_nii, label_number)
        mask += m

    print mask.shape
    print np.sum(mask)
    elo = nib.Nifti1Image(mask.astype(np.int8), affine)
    nifti_name = os.path.join(maskfile, "%s.nii" % (network_name))
    nib.save(elo, nifti_name)

    plot_name = os.path.join(maskfile, "%s.png" % (network_name))
    plotting.plot_glass_brain(nifti_name, threshold=0.1, display_mode='ortho', cmap="rainbow")
    plt.show()




""" for NewA in NEWA:
A = np.logical_or(A, NewA)"""
Example #47
0
###############################################################################
# Retrieve data from Internet
# ---------------------------

from nilearn import datasets

motor_images = datasets.fetch_neurovault_motor_task()
stat_img = motor_images.images[0]

###############################################################################
# Glass brain plotting: whole brain sagittal cuts
# -----------------------------------------------

from nilearn import plotting

plotting.plot_glass_brain(stat_img, threshold=3)

###############################################################################
# Glass brain plotting: black backgrond
# -------------------------------------
# On a black background (option "black_bg"), and with only the x and
# the z view (option "display_mode").
plotting.plot_glass_brain(
    stat_img, title='plot_glass_brain',
    black_bg=True, display_mode='xz', threshold=3)

###############################################################################
# Glass brain plotting: Hemispheric sagittal cuts
# -----------------------------------------------
plotting.plot_glass_brain(stat_img,
                          title='plot_glass_brain with display_mode="lyrz"',
# If you want to only show positive values (or values larger than a certain
# threshold value then you want to use thr_pos.
else:

    threshold = thr_pos

    # We need to set all the data that we don't want to see to equal thr_pos
    data[data<thr_pos] = thr_pos

#===============================================================================
# Plot away!
#===============================================================================
slicer = plotting.plot_glass_brain(img,
                                    threshold=threshold,
                                    plot_abs=False,
                                    symmetric_cbar=symmetric_cbar,
                                    vmin=lower_thresh,
                                    vmax=upper_thresh,
                                    cmap=cmap,
                                    colorbar=colorbar,
                                    display_mode=display_mode,
                                    black_bg=black_bg)

#===============================================================================
# Save the figure
#===============================================================================
output_file = os.path.join(stats_file.rsplit('.nii', 1)[0]
                           + '_GlassBrain_{}.png'.format(display_mode))

slicer.savefig(output_file, dpi=dpi)
Example #49
0
import glob
import numpy as np
import featurespace_fun as fsf
import matplotlib.pyplot as plt
from nilearn.masking import apply_mask
from nilearn.input_data import MultiNiftiMasker
from scipy.stats import norm
from nilearn.plotting import plot_glass_brain
from scipy.stats import ttest_1samp
import sys
from nibabel import load

mask = 'brainmask_group_template.nii.gz'

maps = [sorted(glob.glob('MaThe/maps/mni/model_depcor_{}*subj_*'.format(model))) for model in ['lda', 'speaker', 'emotions']]

valid_subjs = [mp.split('_')[-2] for mp in maps[0]]

for subj, ft_maps in zip(sorted(valid_subjs), zip(*maps)):
    display = plot_glass_brain(None, plot_abs=False, threshold=0.001)
    for ind_ft_map, color in zip(ft_maps, ['r', 'b', 'g']):
        level_thr = np.percentile(apply_mask('MaThe/avg_maps/'+ind_ft_map.split('/')[-1], mask), 99.9)
        display.add_contours(ind_ft_map, colors=[color], levels=[level_thr], alpha=0.6, linewidths=3.0)
    display.savefig('contours_tvals_subj_{}.png'.format(subj))
    plt.close()

# We can convert our morphed source estimate into a NIfTI volume using
# :meth:`morph.apply(..., output='nifti1') <mne.SourceMorph.apply>`.

# Create mri-resolution volume of results
img_fsaverage = morph.apply(stc, mri_resolution=2, output='nifti1')

###############################################################################
# Plot results
# ------------

# Load fsaverage anatomical image
t1_fsaverage = nib.load(fname_t1_fsaverage)

# Plot glass brain (change to plot_anat to display an overlaid anatomical T1)
display = plot_glass_brain(t1_fsaverage,
                           title='subject results to fsaverage',
                           draw_cross=False,
                           annotate=True)

# Add functional data as overlay
display.add_overlay(img_fsaverage, alpha=0.75)


###############################################################################
# Reading and writing SourceMorph from and to disk
# ------------------------------------------------
#
# An instance of SourceMorph can be saved, by calling
# :meth:`morph.save <mne.SourceMorph.save>`.
#
# This methods allows for specification of a filename under which the ``morph``
# will be save in ".h5" format. If no file extension is provided, "-morph.h5"
data = fetch_localizer_contrasts(["left vs right button press"], n_subjects,
                                 get_tmaps=True)

###########################################################################
# Display subject t_maps
# ----------------------
# We plot a grid with all the subjects t-maps thresholded at t = 2 for
# simple visualization purposes. The button press effect is visible among
# all subjects
from nilearn import plotting
import matplotlib.pyplot as plt
subjects = [subject_data[0] for subject_data in data['ext_vars']]
fig, axes = plt.subplots(nrows=4, ncols=4)
for cidx, tmap in enumerate(data['tmaps']):
    plotting.plot_glass_brain(tmap, colorbar=False, threshold=2.0,
                              title=subjects[cidx],
                              axes=axes[int(cidx / 4), int(cidx % 4)],
                              plot_abs=False, display_mode='z')
fig.suptitle('subjects t_map left-right button press')
plt.show()

############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
second_level_input = data['cmaps']
design_matrix = pd.DataFrame([1] * len(second_level_input),
                             columns=['intercept'])

############################################################################
    columns=['vertical vs horizontal'] + subjects)

############################################################################
# plot the design_matrix
from nistats.reporting import plot_design_matrix
plot_design_matrix(design_matrix)

############################################################################
# formally specify the analysis model and fit it
from nistats.second_level_model import SecondLevelModel
second_level_model = SecondLevelModel().fit(
    second_level_input, design_matrix=design_matrix)

##########################################################################
# Estimating the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast('vertical vs horizontal',
                                            output_type='z_score')

###########################################################################
# We threshold the second level contrast and plot it
threshold = 3.1  # correponds to  p < .001, uncorrected
display = plotting.plot_glass_brain(
    z_map, threshold=threshold, colorbar=True, plot_abs=False,
    title='vertical vs horizontal checkerboard (unc p<0.001')

###########################################################################
# Unsurprisingly, we see activity in the primary visual cortex, both positive and negative.

plotting.show()
# uncomment this to open the plot in a web browser:
# view.open_in_browser()

##############################################################################
# In a Jupyter notebook, if ``view`` is the output of a cell, it will
# be displayed below the cell

view

###############################################################################
# Plotting statistical maps in a glass brain with function `plot_glass_brain`
# ---------------------------------------------------------------------------
#
# Now, the t-map image is mapped on glass brain representation where glass
# brain is always a fixed background template
plotting.plot_glass_brain(stat_img, title='plot_glass_brain',
                          threshold=3)

###############################################################################
# Plotting anatomical images with function `plot_anat`
# -----------------------------------------------------
#
# Visualizing anatomical image of haxby dataset
plotting.plot_anat(haxby_anat_filename, title="plot_anat")

###############################################################################
# Plotting ROIs (here the mask) with function `plot_roi`
# -------------------------------------------------------
#
# Visualizing ventral temporal region image from haxby dataset overlayed on
# subject specific anatomical image with coordinates positioned automatically on
# region of interest (roi)
def gen_jpegs(nii_dir):
    niis = [f for f in os.listdir(nii_dir) if f.endswith("nii")]
    for nii in niis:
        png = nii.replace('nii', 'png')
        plot_glass_brain(nii, png)
# from a localizer experiment
tmap_filenames = datasets.fetch_localizer_button_task()["tmaps"]
print(tmap_filenames)

###############################################################################
# tmap_filenames is returned as a list. We need to take first one
tmap_filename = tmap_filenames[0]


###############################################################################
# Demo glass brain plotting
# --------------------------
from nilearn import plotting

# Whole brain sagittal cuts and map is thresholded at 3
plotting.plot_glass_brain(tmap_filename, threshold=3)


###############################################################################
# With a colorbar
plotting.plot_glass_brain(tmap_filename, threshold=3, colorbar=True)


###############################################################################
# Black background, and only the (x, z) cuts
plotting.plot_glass_brain(tmap_filename, title="plot_glass_brain", black_bg=True, display_mode="xz", threshold=3)


###############################################################################
# Plotting the sign of the activation with plot_abs to False
plotting.plot_glass_brain(tmap_filename, threshold=0, colorbar=True, plot_abs=False)
"""
Glass brain plotting in nilearn
===============================

See :ref:`plotting` for more plotting functionalities.
"""

from nilearn import datasets
from nilearn import plotting

###############################################################################
# Retrieve the data

localizer_dataset = datasets.fetch_localizer_contrasts(
    ["left vs right button press"],
    n_subjects=2,
    get_tmaps=True)
localizer_tmap_filename = localizer_dataset.tmaps[1]

###############################################################################
# demo glass brain plotting

plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain',
                          threshold=3)

plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain',
                          black_bg=True, display_mode='xz', threshold=3)

import matplotlib.pyplot as plt
plt.show()
Example #57
0
def plot_activation_by_data(localizer_data):
	plotting.plot_glass_brain(localizer_data, threshold=3)
Example #58
0
    get_anats=True,
    get_tmaps=True)
localizer_anat_filename = localizer_dataset.anats[1]
localizer_cmap_filename = localizer_dataset.cmaps[1]
localizer_tmap_filename = localizer_dataset.tmaps[1]

###############################################################################
# demo the different plotting functions

# Plotting statistical maps
plotting.plot_stat_map(localizer_cmap_filename, bg_img=localizer_anat_filename,
                       threshold=3, title="plot_stat_map",
                       cut_coords=(36, -27, 66))

# Plotting glass brain
plotting.plot_glass_brain(localizer_tmap_filename, title='plot_glass_brain',
                          threshold=3)

# Plotting anatomical maps
plotting.plot_anat(haxby_anat_filename, title="plot_anat")

# Plotting ROIs (here the mask)
plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename,
                  title="plot_roi")

# Plotting EPI haxby
mean_haxby_img = image.mean_img(haxby_func_filename)
plotting.plot_epi(mean_haxby_img, title="plot_epi")

import matplotlib.pyplot as plt
plt.show()
for con_files in zip(*con_images):
    # Create one sample T-Test Design
    onesamplettestdes = mem.cache(OneSampleTTestDesign)
    out_onesamplettestdes = onesamplettestdes(
        in_files=list(con_files))

    # Estimate the parameters of the model
    level2estimate = mem.cache(spm.EstimateModel)
    out_level2estimate = level2estimate(
        estimation_method={'Classical': 1},
        spm_mat_file=out_onesamplettestdes.outputs.spm_mat_file)

    # Estimate group contrast
    contrast = ['Group', 'T', ['mean'], [1]]
    level2conestimate = mem.cache(spm.EstimateContrast)
    out_level2conestimate = level2conestimate(
        group_contrast=True,
        spm_mat_file=out_level2estimate.outputs.spm_mat_file,
        beta_images=out_level2estimate.outputs.beta_images,
        residual_image=out_level2estimate.outputs.residual_image,
        contrasts=[contrast])
    t_maps.append(out_level2conestimate.outputs.spmT_images)

# Plot thresholded t-maps
from nilearn import plotting
for contrast_name, t_map in zip(contrast_names, t_maps):
    plotting.plot_glass_brain(t_map, threshold=5., title=contrast_name,
                              colorbar=True, plot_abs=False,
                              black_bg=True, display_mode='yz')
plotting.show()
data = fetch_localizer_contrasts(["left vs right button press"], n_subjects,
                                 get_tmaps=True)

###########################################################################
# Display subject t_maps
# ----------------------
# We plot a grid with all the subjects t-maps thresholded at t = 2 for
# simple visualization purposes. The button press effect is visible among
# all subjects
from nilearn import plotting
import matplotlib.pyplot as plt
subjects = [subject_data[0] for subject_data in data['ext_vars']]
fig, axes = plt.subplots(nrows=4, ncols=4)
for cidx, tmap in enumerate(data['tmaps']):
    plotting.plot_glass_brain(tmap, colorbar=False, threshold=2.0,
                              title=subjects[cidx],
                              axes=axes[int(cidx / 4), int(cidx % 4)],
                              plot_abs=False, display_mode='z')
fig.suptitle('subjects t_map left-right button press')
plt.show()

############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
second_level_input = data['cmaps']
design_matrix = pd.DataFrame([1] * len(second_level_input),
                             columns=['intercept'])

############################################################################