Exemplo n.º 1
0
def createTFCEfMRIOverlayImages(folder,suffix,title='',vmax=8,display_mode='z',slices=range(-20,50,10),threshold=0.94999,plotToAxis=False,f=[],axes=[],colorbar=True,tight_layout=False,draw_cross=False,annotate=False):


    TFCEposImg,posImg,TFCEnegImg,negImg=getFileNamesfromFolder(folder,suffix)

    bg_img='./Templates/MNI152_.5mm_masked_edged.nii.gz'
    # threshold=0.949
    pos=image.math_img("np.multiply(img1,img2)",
                         img1=image.threshold_img(TFCEposImg,threshold=threshold),img2=posImg)
    neg=image.math_img("np.multiply(img1,img2)",
                         img1=image.threshold_img(TFCEnegImg,threshold=threshold),img2=negImg)
    fw=image.math_img("img1-img2",img1=pos,img2=neg)

    if plotToAxis:
        display=plotting.plot_stat_map(fw,display_mode=display_mode,threshold=0,
                                       cut_coords=slices,vmax=vmax,colorbar=colorbar,
                                       bg_img=bg_img,black_bg=False,title=title,dim=0,
                                       figure=f,axes=axes,draw_cross=draw_cross,
                                       annotate=annotate)
    else:
        display=plotting.plot_stat_map(fw,display_mode=display_mode,threshold=0,
        cut_coords=slices,vmax=vmax,colorbar=colorbar,bg_img=bg_img,
        black_bg=False,title=title,dim=0,annotate=annotate)

    if tight_layout:
        display.tight_layout()

    return display
Exemplo n.º 2
0
def stat_function_tst(conn, prefix='', OUTPUT_PATH=None, threshold=0.05):
    fc = conn.hurst

    tst = Parallel(n_jobs=3, verbose=5)(delayed(ttest_group)(group, threshold, fc)
                                    for group in groups)
    
    if OUTPUT_PATH is None:
        font = {'family' : 'normal',
            'size'   : 20}
        changefont('font', **font)
        gr = ['v', 'av', 'avn']
        for i in range(3):
            title = prefix + '_'.join(groups[i])
            try:
                img = conn.masker.inverse_transform(tst[i])
                print title
                plot_stat_map(img, cut_coords=(3, -63, 36))
                plt.show()

            except ValueError:
                print "problem with tst " + title
        changefont.func_defaults
            
    else:
        for i in range(3):
            title = prefix + '_'.join(groups[i])
            output_file = os.path.join(OUTPUT_PATH, title)
            try:
                img = conn.masker.inverse_transform(tst[i])
                plot_stat_map(img, cut_coords=(3, -63, 36), output_file=output_file + '.pdf')
            except ValueError:
                print "problem with tst " + title
def p_map(task, run, p_values_3d, threshold=0.05):
    """
    Generate three thresholded p-value maps.

    Parameters
    ----------
    task: int
        Task number
    run: int
        Run number
    p_value_3d: 3D array of p_value.
    threshold: The cutoff value to determine significant voxels.

    Returns
    -------
    threshold p-value images
    """
    fmri_img = image.smooth_img('../../../data/sub001/BOLD/' + 'task00' +
                                str(task) + '_run00' + str(run) +
                                '/filtered_func_data_mni.nii.gz',
                                fwhm=6)

    mean_img = image.mean_img(fmri_img)

    log_p_values = -np.log10(p_values_3d)
    log_p_values[np.isnan(log_p_values)] = 0.
    log_p_values[log_p_values > 10.] = 10.
    log_p_values[log_p_values < -np.log10(threshold)] = 0
    plot_stat_map(nib.Nifti1Image(log_p_values, fmri_img.get_affine()),
                  mean_img, title="Thresholded p-values",
                  annotate=False, colorbar=True)
Exemplo n.º 4
0
 def draw_brain_map(self):
     cmap = plt.get_cmap('Accent')
     self.fig = plt.figure('brain_map')
     plot_stat_map(self.cluster_img, cut_coords=(0, 0, 0), output_file=None,
                   display_mode='ortho', colorbar=False, figure=self.fig,
                   axes=None, title=None, threshold=0.1, annotate=True,
                   draw_cross=False, black_bg='auto', symmetric_cbar="auto",
                   dim=True, vmax=None, cmap=cmap)
Exemplo n.º 5
0
def qc_image_data(dataset, images, plot_dir='qc'):
    # Get ready
    masker = GreyMatterNiftiMasker(memory=Memory(cachedir='nilearn_cache')).fit()
    if op.exists(plot_dir):  # Delete old plots.
        shutil.rmtree(plot_dir)

    # Dataframe to contain summary metadata for neurovault images
    if dataset == 'neurovault':
        fetch_summary = pd.DataFrame(
            columns=('Figure #', 'col_id', 'image_id', 'name',
                     'modality', 'map_type', 'analysis_level',
                     'is_thresholded', 'not_mni', 'brain_coverage',
                     'perc_bad_voxels', 'perc_voxels_outside'))

    for ii, image in enumerate(images):
        im_path = image['absolute_path']
        if im_path is None:
            continue

        ri = ii % 4  # row i
        ci = (ii / 4) % 4  # column i
        pi = ii % 16 + 1  # plot i
        fi = ii / 16  # figure i

        if ri == 0 and ci == 0:
            fh = plt.figure(figsize=(16, 10))
            print('Plot %03d of %d' % (fi + 1, np.ceil(len(images) / 16.)))
        ax = fh.add_subplot(4, 4, pi)
        title = "%s%s" % (
            '(X) ' if image['rejected'] else '', op.basename(im_path))

        if dataset == 'neurovault':
            fetch_summary.loc[ii] = [
                'fig%03d' % (fi + 1), image.get('collection_id'),
                image.get('id'), title, image.get('modality'),
                image.get('map_type'), image.get('analysis_level'),
                image.get('is_thresholded'), image.get('not_mni'),
                image.get('brain_coverage'), image.get('perc_bad_voxels'),
                image.get('perc_voxels_outside')]

        # Images may fail to be transformed, and are of different shapes,
        # so we need to trasnform one-by-one and keep track of failures.
        img = cast_img(im_path, dtype=np.float32)
        img = clean_img(img)
        try:
            img = masker.inverse_transform(masker.transform(img))
        except Exception as e:
            print("Failed to mask/reshape image %s: %s" % (title, e))

        plot_stat_map(img, axes=ax, black_bg=True, title=title, colorbar=False)

        if (ri == 3 and ci == 3) or ii == len(images) - 1:
            out_path = op.join(plot_dir, 'fig%03d.png' % (fi + 1))
            save_and_close(out_path)

    # Save fetch_summary
    if dataset == 'neurovault':
        fetch_summary.to_csv(op.join(plot_dir, 'fetch_summary.csv'))
Exemplo n.º 6
0
def plot_stat_map2(**kwargs):
    cut_coords = kwargs['cut_coords']
    row_l = kwargs['row_l']
    lines_nb = int(len(cut_coords) / row_l)
    for line in xrange(lines_nb):
        opt = dict(kwargs)
        opt.pop('row_l')
        opt['cut_coords'] = cut_coords[line * row_l: (line +1) *row_l]
        plotting.plot_stat_map(**opt)
Exemplo n.º 7
0
def compute_hurst_and_stat(metric='dfa', regu='off', OUTPUT_PATH = '/volatile/hubert/beamer/test_hurst/', plot=False):
    conn = Hurst_Estimator(metric=metric, mask=dataset.mask,smoothing_fwhm=0, regu=regu, n_jobs=5)
    os.write(1,'fit\n')
    fc = conn.fit(dataset.func1)
    #conn.load_map(INPUT_PATH)
    os.write(1,'save\n')
    #stat_function_tst(conn, metric+' '+regu+' ', OUTPUT_PATH)
    conn.save(save_path=OUTPUT_PATH)
    if plot:
        os.write(1,'plot\n')
        a = Parallel(n_jobs=3, verbose=5)(delayed(classify_group)(group, fc)
                                        for group in groups)

        tst = Parallel(n_jobs=3, verbose=5)(delayed(ttest_group)(group, .05, fc)
                                        for group in groups)

        ost = Parallel(n_jobs=3, verbose=5)(delayed(ttest_onesample)(group, 0.05, fc)
                                            for group in ['v', 'av', 'avn'])

        mht = Parallel(n_jobs=3, verbose=5)(delayed(ttest_onesample_Hmean)(group, 0.05, fc)
                                            for group in ['v', 'av', 'avn'])

        mpt = Parallel(n_jobs=3, verbose=5)(delayed(mne_permutation_ttest)(group,0.05, fc, 1)
                                            for group in ['v', 'av', 'avn'])
        
        
        cot = Parallel(n_jobs=3, verbose=5)(delayed(ttest_onesample_coef)(np.reshape(coef['coef'], (coef['coef'].shape[0], coef['coef'].shape[-1])),
                                            0.05, fc)
                                            for coef in a)

        gr = ['v', 'av', 'avn']
        if regu=='off':
            OUTPUT_PATH = os.path.join(OUTPUT_PATH, metric)
        else:
            OUTPUT_PATH = os.path.join(OUTPUT_PATH, metric, regu)

        for i in range(3):
            title = '_'.join(groups[i])
            output_file = os.path.join(OUTPUT_PATH, title)
            img = conn.masker.inverse_transform(tst[i])
            plot_stat_map(img, cut_coords=(3, -63, 36), title=title, output_file=output_file + '.pdf')
            img = conn.masker.inverse_transform(cot[i])
            plot_stat_map(img, title='coef_map ' + title, output_file=output_file + 'coef_map.pdf')

            title = gr[i]
            output_file = os.path.join(OUTPUT_PATH, title)
            img = conn.masker.inverse_transform(ost[i])
            plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title, output_file= output_file + '.pdf')
            img = conn.masker.inverse_transform(mht[i])
            plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title, output_file= output_file + 'meanH.pdf')
            img = conn.masker.inverse_transform(mpt[i])
            plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title, output_file= output_file + 'mnepermutH.pdf')


        plt.figure()
        plt.boxplot(map(lambda x: x['accuracy'], a))
        plt.savefig(os.path.join(OUTPUT_PATH, 'boxplot.pdf'))
Exemplo n.º 8
0
def montage(img, thr=0, mode='coronal', rows=5, cloumns=6, fsz=(10, 20)):
    """
    Make a montage using nilearn for the background
    The output figure will be 5 slices wide and 6
    slices deep

    :param img: nilearn image containing the data
    :param thr: threshold for the image
    :param mode: view mode. saggital, coronal, axial
    :param rows: number of rows in the figure
    :param cloumns: number of columns in the figure
    :param fsz: size of the figure
    :return fig: figure handle for saving or whatnot
    """
    # Hardwired view range
    sag_rng = [-65, 65]
    cor_rng = [-100, 65]
    axi_rng = [-71, 85]

    # Get the number of slices
    n_slices = rows * cloumns

    if mode == 'coronal':
        # Get the slice indices
        view_range = np.floor(np.linspace(cor_rng[0], cor_rng[1], n_slices))
        view_mode = 'y'
    if mode == 'axial':
        # Get the slice indices
        view_range = np.floor(np.linspace(axi_rng[0], axi_rng[1], n_slices))
        view_mode = 'z'
    if mode == 'saggital':
        # Get the slice indices
        view_range = np.floor(np.linspace(sag_rng[0], sag_rng[1], n_slices))
        view_mode = 'x'

    # Prepare the figure
    fig = plt.figure(figsize=fsz)
    gs = gridspec.GridSpec(cloumns, 1, hspace=0, wspace=0)
    # Loop through the rows of the image
    for row_id in range(cloumns):
        # Create the axis to show
        ax = fig.add_subplot(gs[row_id, 0])
        # Get the slices in the column direction
        row_range = view_range[row_id*rows:(row_id+1)*rows]
        # Display the thing
        nlp.plot_stat_map(img, cut_coords=row_range,
                          display_mode=view_mode, threshold=thr,
                          axes=ax, black_bg=True)

    return fig
Exemplo n.º 9
0
def run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames):
    output_dir = join(trace_folder, 'experiment_%i' % idx)
    try:
        os.makedirs(output_dir)
    except OSError:
        pass
    dict_fact = SpcaFmri(mask=mask,
                         smoothing_fwhm=3,
                         batch_size=40,
                         shelve=not raw,
                         n_components=n_components,
                         replacement=False,
                         dict_init=fetch_atlas_smith_2009().rsn70 if
                         init else None,
                         reduction=reduction,
                         alpha=alpha,
                         random_state=0,
                         n_epochs=2,
                         l1_ratio=0.5,
                         backend='c',
                         memory=expanduser("~/nilearn_cache"), memory_level=2,
                         verbose=5,
                         n_jobs=1,
                         trace_folder=output_dir
                         )

    print('[Example] Learning maps')
    t0 = time.time()
    dict_fact.fit(func_filenames, raw=raw)
    t1 = time.time() - t0
    print('[Example] Dumping results')
    # Decomposition estimator embeds their own masker
    masker = dict_fact.masker_
    components_img = masker.inverse_transform(dict_fact.components_)
    components_img.to_filename(join(output_dir, 'components_final.nii.gz'))
    print('[Example] Run in %.2f s' % t1)
    # Show components from both methods using 4D plotting tools
    import matplotlib.pyplot as plt
    from nilearn.plotting import plot_prob_atlas, show

    print('[Example] Displaying')
    fig, axes = plt.subplots(2, 1)
    plot_prob_atlas(components_img, view_type="filled_contours",
                    axes=axes[0])
    plot_stat_map(index_img(components_img, 0),
                  axes=axes[1],
                  colorbar=False,
                  threshold=0)
    plt.savefig(join(output_dir, 'components.pdf'))
    show()
Exemplo n.º 10
0
def diff_computed_hurst(metric='wavelet', regu='off', INPUT_PATH = '/volatile/hubert/beamer/test_hurst/', OUTPUT_PATH=''):
    conn = Hurst_Estimator(metric=metric, mask=dataset.mask, regu=regu, n_jobs=5)
    os.write(1,'load\n')
    conn.load_map(INPUT_PATH)
    fc = conn.hurst
    os.write(1,'stat\n')

    tst = ttest_group(['av', 'v'], .05, fc)
    vmean_avmean = np.mean([fc[i] for i in dataset.group_indices['v']], axis=0) - np.mean([fc[i] for i in dataset.group_indices['av']], axis=0)
    vmean_avmean[tst == 0] = 0
    
    img = conn.masker.inverse_transform(vmean_avmean)
    plot_stat_map(img)
    plt.show()
def plot_contrast(first_level_model):
    """ Given a first model, specify, enstimate and plot the main contrasts"""
    design_matrix = first_level_model.design_matrices_[0]
    # Call the contrast specification within the function
    contrasts = make_localizer_contrasts(design_matrix)
    fig = plt.figure(figsize=(11, 3))
    # compute the per-contrast z-map
    for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
        ax = plt.subplot(1, len(contrasts), 1 + index)
        z_map = first_level_model.compute_contrast(
            contrast_val, output_type='z_score')
        plotting.plot_stat_map(
            z_map, display_mode='z', threshold=3.0, title=contrast_id, axes=ax,
            cut_coords=1)
Exemplo n.º 12
0
def make_thresholded_slices(regions, colors, display_mode='z', overplot=True, binarize=True, **kwargs):
    """ Plots on axial slices numerous images
    regions: Nibabel images
    colors: List of colors (rgb tuples)
    overplot: Overlay images?
    binarize: Binarize images or plot full stat maps
    """             

    from matplotlib.colors import LinearSegmentedColormap
    from nilearn import plotting as niplt
    
    if binarize:
        for reg in regions:
             reg.get_data()[reg.get_data().nonzero()] = 1
                                   
    for i, reg in enumerate(regions):
        reg_color = LinearSegmentedColormap.from_list('reg1', [colors[i], colors[i]])
        if i == 0:
            plot = niplt.plot_stat_map(reg, draw_cross=False,  display_mode=display_mode, cmap = reg_color, alpha=0.9, colorbar=False, **kwargs)
        else:
            if overplot:
                plot.add_overlay(reg, cmap = reg_color, alpha=.72)
            else:
                plt.plot_stat_map(reg, draw_cross=False,  display_mode=display_mode, cmap = reg_color, colorbar=False, **kwargs)
    
    return plot
Exemplo n.º 13
0
def make_stat_image(nifti_file,png_img_file=None):
    """Make statmap image"""
    nifti_file = str(nifti_file)
    brain = plot_stat_map(nifti_file)
    if png_img_file:    
        brain.savefig(png_img_file)
    plt.close('all')
    return brain
Exemplo n.º 14
0
def ica_vis(subj_num):
  # Use the mean as a background
  mean_img_1 = image.mean_img(BOLD_file_1)
  mean_img_2 = image.mean_img(BOLD_file_2)
  mean_img_3 = image.mean_img(BOLD_file_3)

  plot_stat_map(image.index_img(component_img_1, 5), mean_img_1, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task001_run001'+'ica_1'+'.jpg'))
  plot_stat_map(image.index_img(component_img_1, 12), mean_img_1, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task001_run001'+'ica_2'+'.jpg'))

  plot_stat_map(image.index_img(component_img_2, 5), mean_img_2, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task002_run001'+'ica_1'+'.jpg'))
  plot_stat_map(image.index_img(component_img_2, 12), mean_img_2, output_file=os.path.join(data_path,'sub'+subj_num+'_BOLD','task002_run001'+'ica_2'+'.jpg'))
Exemplo n.º 15
0
def plot_stat_overlay(stat_img, contour_img, bg_img, **kwargs):
    """Plot over bg_img a stat_img and the countour."""
    import nilearn.plotting as niplot

    if bg_img is not None:
        kwargs['bg_img'] = bg_img

    display = niplot.plot_stat_map(stat_img, **kwargs)
    display.add_contours(contour_img, filled=True, alpha=0.6, levels=[0.5], colors='g')
    return display
Exemplo n.º 16
0
def dump_comps(masker, compressor, components, threshold=2, fwhm=None,
               perc=None):
    from scipy.stats import zscore
    from nilearn.plotting import plot_stat_map
    from nilearn.image import smooth_img
    from scipy.stats import scoreatpercentile

    if isinstance(compressor, basestring):
        comp_name = compressor
    else:
        comp_name = compressor.__str__().split('(')[0]

    for i_c, comp in enumerate(components):
        path_mask = op.join(WRITE_DIR, '%s_%i-%i' % (comp_name,
                                                     n_comp, i_c + 1))
        nii_raw = masker.inverse_transform(comp)
        nii_raw.to_filename(path_mask + '.nii.gz')
        
        comp_z = zscore(comp)
        
        if perc is not None:
            cur_thresh = scoreatpercentile(np.abs(comp_z), per=perc)
            path_mask += '_perc%i' % perc
            print('Applying percentile %.2f (threshold: %.2f)' % (perc, cur_thresh))
        else:
            cur_thresh = threshold
            path_mask += '_thr%.2f' % cur_thresh
            print('Applying threshold: %.2f' % cur_thresh)

        nii_z = masker.inverse_transform(comp_z)
        gz_path = path_mask + '_zmap.nii.gz'
        nii_z.to_filename(gz_path)
        plot_stat_map(gz_path, bg_img='colin.nii', threshold=cur_thresh,
                      cut_coords=(0, -2, 0), draw_cross=False,
                      output_file=path_mask + 'zmap.png')
                      
        # optional: do smoothing
        if fwhm is not None:
            nii_z_fwhm = smooth_img(nii_z, fwhm=fwhm)
            plot_stat_map(nii_z_fwhm, bg_img='colin.nii', threshold=cur_thresh,
                          cut_coords=(0, -2, 0), draw_cross=False,
                          output_file=path_mask +
                          ('zmap_%imm.png' % fwhm))
Exemplo n.º 17
0
def plot_components(ica_image, hemi='', out_dir=None,
                    bg_img=datasets.load_mni152_template()):
    print("Plotting %s components..." % hemi)

    # Determine threshoold and vmax for all the plots
    # get nonzero part of the image for proper thresholding of
    # r- or l- only component
    nonzero_img = ica_image.get_data()[np.nonzero(ica_image.get_data())]
    thr = stats.scoreatpercentile(np.abs(nonzero_img), 90)
    vmax = stats.scoreatpercentile(np.abs(nonzero_img), 99.99)
    for ci, ic_img in enumerate(iter_img(ica_image)):

        title = _title_from_terms(terms=ica_image.terms, ic_idx=ci, label=hemi)
        fh = plt.figure(figsize=(14, 6))
        plot_stat_map(ic_img, axes=fh.gca(), threshold=thr, vmax=vmax,
                      colorbar=True, title=title, black_bg=True, bg_img=bg_img)

        # Save images instead of displaying
        if out_dir is not None:
            save_and_close(out_path=op.join(
                out_dir, '%s_component_%i.png' % (hemi, ci)))
Exemplo n.º 18
0
def dump_comps(masker, compressor, components, threshold=2):
    from scipy.stats import zscore
    from nilearn.plotting import plot_stat_map

    if isinstance(compressor, basestring):
        comp_name = compressor
    else:
        comp_name = compressor.__str__().split('(')[0]

    for i_c, comp in enumerate(components):
        path_mask = op.join(WRITE_DIR, '%s_%i-%i' % (comp_name,
                                                     n_comp, i_c + 1))
        nii_raw = masker.inverse_transform(comp)
        nii_raw.to_filename(path_mask + '.nii.gz')

        nii_z = masker.inverse_transform(zscore(comp))
        gz_path = path_mask + '_zmap.nii.gz'
        nii_z.to_filename(gz_path)
        plot_stat_map(gz_path, bg_img='colin.nii', threshold=threshold,
                      cut_coords=(0, -2, 0), draw_cross=False,
                      output_file=path_mask + 'zmap.png')
Exemplo n.º 19
0
def main(stat_map="nii/tstat.nii.gz", template="~/NIdata/templates/medres_QBI_chr.nii.gz", black_bg=False, cut_coords=(-50,8,45)):
	template = path.expanduser(template)

	colors_plus = plt.cm.autumn(np.linspace(0., 1, 128))
	colors_minus = plt.cm.winter(np.linspace(0, 1, 128))
	colors = np.vstack((colors_minus, colors_plus[::-1]))

	mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors)

	for i in ['none', 'nearest', 'bilinear', 'bicubic', 'spline16','spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric','catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']:
		display = plotting.plot_stat_map(stat_map, bg_img=template,threshold=2.5, vmax=40, cmap=mymap, black_bg=black_bg, cut_coords=cut_coords, annotate=True, title=i+" interpolation", draw_cross=False, interpolation=i)
		plt.savefig(i, dpi=700)
Exemplo n.º 20
0
def generate_images(components_img, n_components, images_dir, glass=False):
    # Remove existing images
    if os.path.exists(images_dir):
        shutil.rmtree(images_dir)
    os.makedirs(images_dir)
    output_filenames = [osp.join(images_dir, 'IC_{}.png'.format(i))
                        for i in range(n_components)]

    for i, output_file in enumerate(output_filenames):
        plot_stat_map(nibabel.Nifti1Image(components_img.get_data()[..., i],
                                          components_img.get_affine()),
                      display_mode="z", title="IC %d" % i, cut_coords=7,
                      colorbar=False, output_file=output_file)
    if glass:
        output_filenames = [osp.join(images_dir, 'glass_IC_{}.png'.format(i))
                            for i in range(n_components)]
        for i, output_file in enumerate(output_filenames):
            plot_glass_brain(nibabel.Nifti1Image(
                components_img.get_data()[..., i],
                components_img.get_affine()),
                display_mode="ortho", title="IC %d" % i,
                             output_file=output_file)
Exemplo n.º 21
0
Arquivo: db.py Projeto: vsoch/brainart
def make_plot(mr, color, png_image, black_bg=False):
    """make_plot
    :param mr: a nibabel.Nifti1Image
    :param color: a matplotlib color map
    :param png_image: file name for output png
    :black_bg: True saves image with black background (default False)
    """
    plot_stat_map(
        mr,
        display_mode="z",
        colorbar=False,
        annotate=False,
        draw_cross=False,
        cmap=color,
        cut_coords=1,
        black_bg=black_bg,
    )
    if black_bg == False:
        plt.savefig(png_image)
    else:
        plt.savefig(png_image, facecolor="k", edgecolor="k")
    plt.close()
Exemplo n.º 22
0
def stat_ttest_function(conn, prefix='', OUTPUT_PATH=None):
    fc = conn.hurst


    ost = Parallel(n_jobs=3, verbose=5)(delayed(ttest_onesample)(group, 0.05, fc)
                                        for group in ['v', 'av', 'avn'])

    mht = Parallel(n_jobs=3, verbose=5)(delayed(ttest_onesample_Hmean)(group, 0.05, fc)
                                        for group in ['v', 'av', 'avn'])
    
    
    gr = ['v', 'av', 'avn']
    if OUTPUT_PATH is None:
        for i in range(3):

            title = prefix + gr[i]
            try:
                img = conn.masker.inverse_transform(ost[i])
                plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title)

            except ValueError:
                print "problem with ost " + title
            try:
                img = conn.masker.inverse_transform(mht[i])
                plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title)

            except ValueError:
                print "problem with mht " + title
            
    else:
        for i in range(3):

            title = prefix + gr[i]
            output_file = os.path.join(OUTPUT_PATH, title)
            try:
                img = conn.masker.inverse_transform(ost[i])
                #plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title, output_file= output_file + '.pdf')
                plot_stat_map(img, output_file= output_file + '.pdf')

            except ValueError:
                print "problem with ost " + title
            try:
                img = conn.masker.inverse_transform(mht[i])
                #plot_stat_map(img, title='t-test H0 : H = 0.5 pvalue in -log10 scale groupe : ' + title, output_file= output_file + 'meanH.pdf')
                plot_stat_map(img, output_file= output_file + 'meanH.pdf')

            except ValueError:
                print "problem with mht " + title
Exemplo n.º 23
0
def _generate_thumbnail(build_dir, img, cut_coords):
    threshold = np.percentile(nb.load(img).get_data(), 97)
    display = plot_stat_map(img, threshold=threshold,
                            cut_coords=cut_coords, display_mode='z',
                            annotate=False, colorbar=False,
                            draw_cross=False, black_bg=False)

    study_id = img.split(os.path.sep)[-2]
    task_id, map_id = os.path.split(img)[-1].split('.nii.gz')[0].split('_', 1)
    fname = '%s_%s_%s.png' % (
        img.split(os.path.sep)[-2],
        os.path.split(img)[-1].split('.nii.gz')[0], cut_coords)
    fname = os.path.join(build_dir, 'thumbnails', fname)
    display.savefig(fname, dpi=200)
    return study_id, task_id, map_id, fname
Exemplo n.º 24
0
def plot_components_summary(ica_image, hemi='', out_dir=None,
                            bg_img=datasets.load_mni152_template()):
    print("Plotting %s components summary..." % hemi)

    n_components = ica_image.get_data().shape[3]

    # Determine threshoold and vmax for all the plots
    # get nonzero part of the image for proper thresholding of
    # r- or l- only component
    nonzero_img = ica_image.get_data()[np.nonzero(ica_image.get_data())]
    thr = stats.scoreatpercentile(np.abs(nonzero_img), 90)
    vmax = stats.scoreatpercentile(np.abs(nonzero_img), 99.99)
    for ii, ic_img in enumerate(iter_img(ica_image)):

        ri = ii % 5  # row i
        ci = (ii / 5) % 5  # column i
        pi = ii % 25 + 1  # plot i
        fi = ii / 25  # figure i

        if ri == 0 and ci == 0:
            fh = plt.figure(figsize=(30, 20))
            print('Plot %03d of %d' % (fi + 1, np.ceil(n_components / 25.)))
        ax = fh.add_subplot(5, 5, pi)

        title = _title_from_terms(terms=ica_image.terms, ic_idx=ii, label=hemi)

        colorbar = ci == 4

        plot_stat_map(
            ic_img, axes=ax, threshold=thr, vmax=vmax, colorbar=colorbar,
            title=title, black_bg=True, bg_img=bg_img)

        if (ri == 4 and ci == 4) or ii == n_components - 1:
            out_path = op.join(
                out_dir, '%s_components_summary%02d.png' % (hemi, fi + 1))
            save_and_close(out_path)
Exemplo n.º 25
0
def plot_diff_avg(scores, threshold=0.01, coords=None, cross=False, **kwargs):
    '''plots subject scoremap using nilearn and returns display object'''
    mask = spenc_dir+'temporal_lobe_mask_grp_7T_test.nii.gz'
    background_img = '/home/data/psyinf/forrest_gump/anondata/templates/' + \
            'grpbold7Tp1/brain.nii.gz'
    scores = scores.copy()
    scores[np.abs(scores)<threshold] = 0.0
    unmasked = unmask(scores, mask)
    unmasked = threshold_img(unmasked, 0.001)
    display = plot_stat_map(
                    unmasked, cut_coords=coords, bg_img=background_img,
                    title='metric per voxel', dim=-1, aspect=1.25,
                    threshold=0.001, draw_cross=cross, **kwargs)
    fig = plt.gcf()
    fig.set_size_inches(12, 4)
    return display
Exemplo n.º 26
0
def plot_subj_ko(subj, scores, threshold=0.01, coords=None):
    '''plots subject scoremap using nilearn and returns display object'''
    subj_mask = './masks/template_mask_thick.nii.gz'
    background_img = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz')
    scores = scores.copy()
    scores[scores<threshold] = 0.0
    unmasked = unmask(scores, subj_mask)
    unmasked = threshold_img(unmasked, 0.001)
    display = plot_stat_map(
                    unmasked, cut_coords=coords, bg_img=background_img,
                    symmetric_cbar=False,
                    title='metric per voxel', dim=-1, aspect=1.25,
                    threshold=0.001, draw_cross=False)
    fig = plt.gcf()
    fig.set_size_inches(12, 4)
    return display
Exemplo n.º 27
0
def plot_subj(subj, scores, threshold=0.01, coords=None):
    '''plots subject scoremap using nilearn and returns display object'''
    subj_mask = spenc_dir+'temporal_lobe_mask_brain_subj{0:02}bold.nii.gz'.format(subj)
    background_img = '/home/data/psyinf/forrest_gump/anondata/sub{0:03}/'.format(subj)+\
            'templates/bold7Tp1/brain.nii.gz'
    scores = scores.copy()
    scores[scores<threshold] = 0.0
    unmasked = unmask(scores, subj_mask)
    unmasked = threshold_img(unmasked, 0.001)
    display = plot_stat_map(
                    unmasked, cut_coords=coords, bg_img=background_img,
                    symmetric_cbar=False,
                    title='metric per voxel', dim=-1, aspect=1.25,
                    threshold=0.001, draw_cross=False)
    fig = plt.gcf()
    fig.set_size_inches(12, 4)
    return display
Exemplo n.º 28
0
    def _save_plot(self, predictor):
        """ Save Plots.

        Args:
            predictor: predictor instance

        Returns:
            predicter_weightmap_montage.png: Will output a montage of axial slices of weightmap
            predicter_prediction.png: Will output a plot of prediction

        """

        if not os.path.isdir(self.output_dir):
            os.makedirs(self.output_dir)

        if self.algorithm == 'lassopcr':
            coef = np.dot(self._pca.components_.T,self._lasso.coef_)
            coef_img = self.nifti_masker.inverse_transform(np.transpose(coef))
        elif self.algorithm == 'pcr':
            coef = np.dot(self._pca.components_.T,self._regress.coef_)
            coef_img = self.nifti_masker.inverse_transform(np.transpose(coef))
        else:
            coef_img = self.nifti_masker.inverse_transform(predictor.coef_)

        overlay_img = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain.nii.gz'))

        fig1 = plot_stat_map(coef_img, overlay_img, title=self.algorithm + " weights",
                            cut_coords=range(-40, 40, 10), display_mode='z')
        fig1.savefig(os.path.join(self.output_dir, self.algorithm + '_weightmap_axial.png'))

        if self.prediction_type == 'classification':
            if self.algorithm not in ['svm','ridgeClassifier','ridgeClassifierCV']:
                fig2 = probability_plot(self.stats_output)
                fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_prob_plot.png'))
            else:
                fig2 = dist_from_hyperplane_plot(self.stats_output)
                fig2.savefig(os.path.join(self.output_dir, self.algorithm +
                            '_Distance_from_Hyperplane_xval.png'))
                if self.algorithm == 'svm' and self.predictor.probability:
                    fig3 = probability_plot(self.stats_output)
                    fig3.savefig(os.path.join(self.output_dir, self.algorithm + '_prob_plot.png'))

        elif self.prediction_type == 'prediction':
            fig2 = scatterplot(self.stats_output)
            fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_scatterplot.png'))
Exemplo n.º 29
0
def plot_ica_components(components_img, **kwargs):
    """ Plot the components IC spatial maps in a grid."""
    import math
    from nilearn.image import iter_img
    from nilearn.plotting import plot_stat_map
    from matplotlib import pyplot as plt
    from matplotlib import gridspec

    n_ics  = len(list(iter_img(components_img)))
    n_rows = math.ceil(n_ics/2)
    fig = plt.figure(figsize=(6, 3*n_rows), facecolor='black')
    gs  = gridspec.GridSpec(n_rows, 2)

    plots = []
    for i, ic_img in enumerate(iter_img(components_img)):
        ax = plt.subplot(gs[i])
        p  = plot_stat_map(ic_img, display_mode="z", title="IC {}".format(i+1),
                           cut_coords=1, colorbar=False, figure=fig, axes=ax, **kwargs)
        plots.append(p)

    for p in plots:
        p.close()

    return fig
Exemplo n.º 30
0
# In[14]:

fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(12, 10))

topic_img_4d = neurosynth_dset_first_500.masker.inverse_transform(
    gclda_model.p_voxel_g_topic_.T)
# Plot first ten topics
topic_counter = 0
for i_row in range(5):
    for j_col in range(2):
        topic_img = image.index_img(topic_img_4d, index=topic_counter)
        display = plotting.plot_stat_map(
            topic_img,
            annotate=False,
            cmap="Reds",
            draw_cross=False,
            figure=fig,
            axes=axes[i_row, j_col],
        )
        axes[i_row, j_col].set_title(f"Topic {str(topic_counter).zfill(3)}")
        topic_counter += 1

        colorbar = display._cbar
        colorbar_ticks = colorbar.get_ticks()
        if colorbar_ticks[0] < 0:
            new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
        else:
            new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
        colorbar.set_ticks(new_ticks, update_ticks=True)

glue("figure_gclda_topics", fig, display=False)
Exemplo n.º 31
0
def plot_t_brain(objIn,
                 how="full",
                 thr="unc",
                 alpha=None,
                 nperm=None,
                 cut_coords=[],
                 **kwargs):
    """
    Takes a brain data object and computes a 1 sample t-test across it's first axis. If a list is provided will compute difference between brain data objects in list (i.e. paired samples t-test).
    Args:
        objIn:(list/Brain_Data) if list will compute difference map first
        how: (list) whether to plot a glass brain 'glass', 3 view-multi-slice mni 'mni', or both 'full'
        thr: (str) what method to use for multiple comparisons correction unc, fdr, or tfce
        alpha: (float) p-value threshold
        nperm: (int) number of permutations for tcfe; default 1000
        cut_coords: (list) x,y,z coords to plot brain slice
        kwargs: optionals args to nilearn plot functions (e.g. vmax)

    """
    if thr not in ["unc", "fdr", "tfce"]:
        raise ValueError("Acceptable threshold methods are 'unc','fdr','tfce'")
    views = ["x", "y", "z"]
    if len(cut_coords) == 0:
        cut_coords = [
            range(-40, 50, 10),
            [-88, -72, -58, -38, -26, 8, 20, 34, 46],
            [-34, -22, -10, 0, 16, 34, 46, 56, 66],
        ]
    else:
        if len(cut_coords) != 3:
            raise ValueError(
                "cut_coords must be a list of coordinates like [[xs],[ys],[zs]]"
            )
    cmap = "RdBu_r"

    if isinstance(objIn, list):
        if len(objIn) == 2:
            obj = objIn[0] - objIn[1]
        else:
            raise ValueError("Contrasts should contain only 2 list items!")

    thrDict = {}
    if thr == "tfce":
        thrDict["permutation"] = thr
        if nperm is None:
            nperm = 1000
        thrDict["n_permutations"] = nperm
        print("1-sample t-test corrected using: TFCE w/ %s permutations" %
              nperm)
    else:
        if thr == "unc":
            if alpha is None:
                alpha = 0.001
            thrDict[thr] = alpha
            print("1-sample t-test uncorrected at p < %.3f " % alpha)
        elif thr == "fdr":
            if alpha is None:
                alpha = 0.05
            thrDict[thr] = alpha
            print("1-sample t-test corrected at q < %.3f " % alpha)
        else:
            thrDict = None
            print("1-sample test unthresholded")

    out = objIn.ttest(threshold_dict=thrDict)
    if thrDict is not None:
        obj = out["thr_t"]
    else:
        obj = out["t"]

    if how == "full":
        plot_glass_brain(obj.to_nifti(),
                         display_mode="lzry",
                         colorbar=True,
                         cmap=cmap,
                         plot_abs=False,
                         **kwargs)
        for v, c in zip(views, cut_coords):
            plot_stat_map(obj.to_nifti(),
                          cut_coords=c,
                          display_mode=v,
                          cmap=cmap,
                          bg_img=resolve_mni_path(MNI_Template)["brain"],
                          **kwargs)
    elif how == "glass":
        plot_glass_brain(obj.to_nifti(),
                         display_mode="lzry",
                         colorbar=True,
                         cmap=cmap,
                         plot_abs=False,
                         **kwargs)
    elif how == "mni":
        for v, c in zip(views, cut_coords):
            plot_stat_map(obj.to_nifti(),
                          cut_coords=c,
                          display_mode=v,
                          cmap=cmap,
                          bg_img=resolve_mni_path(MNI_Template)["brain"],
                          **kwargs)
    del obj
    del out
    return
Exemplo n.º 32
0
haxby_func_filename = haxby_dataset.func[0]

# one motor contrast map from NeuroVault
motor_images = datasets.fetch_neurovault_motor_task()
stat_img = motor_images.images[0]

###############################################################################
# Plotting statistical maps with function `plot_stat_map`
# --------------------------------------------------------

from nilearn import plotting

# Visualizing t-map image on EPI template with manual
# positioning of coordinates using cut_coords given as a list
plotting.plot_stat_map(stat_img,
                       threshold=3,
                       title="plot_stat_map",
                       cut_coords=[36, -27, 66])

###############################################################################
# Making interactive visualizations with function `view_img`
# ----------------------------------------------------------
# An alternative to :func:`nilearn.plotting.plot_stat_map` is to use
# :func:`nilearn.plotting.view_img` that gives more interactive
# visualizations in a web browser. See :ref:`interactive-stat-map-plotting`
# for more details.

view = plotting.view_img(stat_img, threshold=3)
# In a Jupyter notebook, if ``view`` is the output of a cell, it will
# be displayed below the cell
view
Exemplo n.º 33
0
def plot_n_save_3plane(in_dict):
    '''
    in_dict fields:
        in_dir
        in_fn
        cut_coords
        do_save (boolean)
        formats_used (a list-like object)
        area (brain area, for naming the output file)
        cmap (optional): a matplotlib colorbar object
        symmetric_cbar (optional): whether to treat cmap as symetrical

    '''
    if 'symmetric_cbar' not in in_dict.keys():
        in_dict['symmetric_cbar'] = 'auto'
    if 'cmap' not in in_dict.keys():
        in_dict['cmap'] = mymap
        if in_dict['symmetric_cbar'] == 'auto':
            in_dict['cmap'] = cm.autumn
    if 'vmax' not in in_dict.keys():
        in_dict['vmax'] = None
    if 'draw_cross' not in in_dict.keys():
        in_dict['draw_cross'] = True
    if 'cluster_dict' not in in_dict.keys():
        cluster_dict = {}
    else:
        cluster_dict = in_dict['cluster_dict']
        cluster_dict['in_nii_fn'] = opj(in_dict['in_dir'], in_dict['in_fn'])

    if cluster_dict:
        thresholder = NiiThresholder(**cluster_dict)
        used_fn = thresholder.make_thresholded_image()
    else:
        used_fn = opj(in_dict['in_dir'], in_dict['in_fn'])

    pv = plotting.plot_stat_map(
        used_fn,
        black_bg=True,
        bg_img=(os.getenv('FSLDIR') +
                '/data/standard/MNI152_T1_1mm_brain.nii.gz'),
        threshold=in_dict['threshold'],
        cut_coords=in_dict['cut_coords'],
        cmap=in_dict['cmap'],
        symmetric_cbar=in_dict['symmetric_cbar'],
        draw_cross=in_dict['draw_cross'],
        vmax=in_dict['vmax'])

    set_fig_bg_contrast(pv)

    plt.show()

    if in_dict['do_save']:
        for fmt in in_dict['formats_used']:
            pv.savefig(
                opj(
                    in_dict['out_dir'], in_dict['in_fn'].split('.')[0] + '_' +
                    in_dict['area'] + '.' + fmt))
    try:
        thresholder.clean_out_fn()
    except NameError:
        pass

    return pv
Exemplo n.º 34
0
def plot_spm(
        zmaps,
        roi_dict,
        bg_img=None,
        z_threshold=0,
        f=None,
        axes=None,
        # brain_mask='../Templates/mni_icbm152_nlin_asym_09c_nifti/mni_icbm152_nlin_asym_09c.nii.gz',
        roi_to_plot=('PreSMA', 'M1', 'ACC', 'rIFG', 'STR', 'GPe', 'GPi',
                     'STN'),
        cut_coords=[None, None, None, None, None, None, None, None],
        contrasts=('failed_stop - go_trial', 'successful_stop - go_trial',
                   'failed_stop - successful_stop'),
        plot_columns=(0, 1, 3, 4, 6, 7),
        empty_plots=False,
        skip_all_but_last=False,
        **kwargs):

    if f is None:
        gridspec = dict(hspace=0.0,
                        wspace=0.0,
                        width_ratios=[1, 1, 0.05, 1, 1, .05, 1, 1, .1])
        f, axes = plt.subplots(
            len(roi_to_plot), len(zmaps) + 3, gridspec_kw=gridspec
        )  # add 3 columns: 2 interspace, 1 on the right for the colorbar

    if empty_plots:
        f.set_size_inches(len(zmaps) * 4, len(roi_to_plot) * 4)
        return f, axes

    all_cut_coords = []
    all_disps = []
    for row_n, roi in enumerate(roi_to_plot):
        # for debugging
        if skip_all_but_last:
            if row_n < (len(roi_to_plot) - 1):
                continue

        # get cut coordinates based on 1 hemisphere (if applicable)
        if roi in ['STR', 'STN', 'PreSMA', 'GPe', 'GPi']:
            roi_map = roi_dict['l' + roi]
        else:
            roi_map = roi_dict[roi]
#        roi_map = make_conjunction_mask(roi_map['fn'], brain_mask)
        if roi == 'rIFG':
            ## saggital
            if cut_coords[row_n] is None:
                this_cut_coords = plotting.find_xyz_cut_coords(
                    roi_map['fn'])[0:1]
            else:
                this_cut_coords = cut_coords[row_n]
            display_mode = 'x'
            plot_rois = ['rIFG']  #, 'M1', 'rPreSMA']
        elif roi == 'STR':
            ## axial view
            if cut_coords[row_n] is None:
                this_cut_coords = plotting.find_xyz_cut_coords(
                    roi_map['fn'])[2:3]
            else:
                this_cut_coords = cut_coords[row_n]

            display_mode = 'z'
            plot_rois = [
                'rIFG', 'M1', 'lSTR', 'lGPe', 'lGPi', 'lSTN', 'rSTR', 'rGPe',
                'rGPi', 'rSTN'
            ]
        elif roi == 'STN':
            ## plot coronal view
            if cut_coords[row_n] is None:
                this_cut_coords = plotting.find_xyz_cut_coords(
                    roi_map['fn'])[1:2]
            else:
                this_cut_coords = cut_coords[row_n]

            display_mode = 'y'
            plot_rois = [
                'rIFG', 'M1', 'lSTR', 'lGPe', 'lGPi', 'lSTN', 'rSTR', 'rGPe',
                'rGPi', 'rSTN'
            ]

        all_cut_coords.append({display_mode: this_cut_coords[0]})

        # loop over contrasts for columns
        for col_n, map_n in zip(plot_columns, np.arange(len(zmaps))):
            zmap = zmaps[map_n]
            if skip_all_but_last:
                if col_n < (len(zmaps) - 1):
                    continue

            if row_n == (len(roi_to_plot) - 1) and col_n == (len(zmaps) - 1):
                # plot colobar in the last plot
                cbar = False
            else:
                cbar = False

#             # do not plot in column 2 or 5
#             plot_col = col_n
#             if col_n > 1:
#                 plot_col = col_n + 1
#             if col_n > 3:
#                 plot_col = col_n + 2

            if isinstance(z_threshold, list):
                this_threshold = z_threshold[map_n]
            else:
                this_threshold = z_threshold
            ax = axes[row_n, col_n]

            #             print(cbar)
            disp = plotting.plot_stat_map(zmap,
                                          bg_img=bg_img,
                                          threshold=this_threshold,
                                          cut_coords=this_cut_coords,
                                          display_mode=display_mode,
                                          axes=ax,
                                          colorbar=cbar,
                                          **kwargs)

            # just plot *all* contours, always
            for roi_ in plot_rois:
                roi_map = roi_dict[roi_]
                #             for roi_, roi_map in roi_dict.items():
                #                 print(roi_map)
                add_contours(disp,
                             roi=roi_map['fn'],
                             thr=roi_map['threshold'],
                             color=roi_map['color'])

            # determine limits (xlim/ylim) based on first column, and apply to all others
            this_key = list([x for x in disp.axes.keys()])[0]
            # Determine new xlim/ylim based on first column
            if col_n == plot_columns[0]:
                # extract old/current limits
                cur_xlim = disp.axes[this_key].ax.get_xlim()
                cur_ylim = disp.axes[this_key].ax.get_ylim()
                if display_mode == 'x':
                    new_xlim = get_prop_limits([0, 1], cur_xlim)
                    new_ylim = get_prop_limits([0, 1], cur_ylim)
                elif display_mode == 'z' and 'STN' in roi:
                    new_xlim = get_prop_limits([.25, .75], cur_xlim)
                    new_ylim = get_prop_limits([.40, .90], cur_ylim)
                elif display_mode == 'z' and 'STR' in roi:
                    new_xlim = get_prop_limits([0, 1], cur_xlim)
                    new_ylim = get_prop_limits([0.3, 1], cur_ylim)
                elif display_mode == 'y':
                    new_xlim = get_prop_limits([.26, .74], cur_xlim)
                    new_ylim = get_prop_limits([.25, .75], cur_ylim)

            # Change axes limits
            disp.axes[this_key].ax.set_xlim(new_xlim[0], new_xlim[1])
            disp.axes[this_key].ax.set_ylim(new_ylim[0], new_ylim[1])

            all_disps.append(disp)


#             # set new xlimits if necessary (ie zoom for STN view)
#             if 'STN' in roi and display_mode == 'z':
#                 this_key = [x for x in disp.axes.keys()]
#                 this_key = this_key[0]
#                 cur_xlim = disp.axes[this_key].ax.get_xlim()
#                 cur_ylim = disp.axes[this_key].ax.get_ylim()
#                 new_xlim = get_prop_limits([.25, .75], cur_xlim)
#                 new_ylim = get_prop_limits([.40, .90], cur_ylim)
#                 disp.axes[this_key].ax.set_xlim(new_xlim[0], new_xlim[1])
#                 disp.axes[this_key].ax.set_ylim(new_ylim[0], new_ylim[1])
#             elif 'STN' in roi and display_mode == 'y':
#                 this_key = [x for x in disp.axes.keys()]
#                 this_key = this_key[0]
#                 cur_xlim = disp.axes[this_key].ax.get_xlim()
#                 cur_ylim = disp.axes[this_key].ax.get_ylim()
#                 new_xlim = get_prop_limits([.25, .75], cur_xlim)
#                 new_ylim = get_prop_limits([.25, .75], cur_ylim)
#                 disp.axes[this_key].ax.set_xlim(new_xlim[0], new_xlim[1])
#                 disp.axes[this_key].ax.set_ylim(new_ylim[0], new_ylim[1])
#             elif 'STR' in roi and display_mode == 'z':
#                 this_key = [x for x in disp.axes.keys()]
#                 this_key = this_key[0]
#                 cur_xlim = disp.axes[this_key].ax.get_xlim()
#                 cur_ylim = disp.axes[this_key].ax.get_ylim()
#                 new_xlim = get_prop_limits([0, 1], cur_xlim)
#                 new_ylim = get_prop_limits([.3, 1], cur_ylim)
#                 disp.axes[this_key].ax.set_xlim(new_xlim[0], new_xlim[1])
#                 disp.axes[this_key].ax.set_ylim(new_ylim[0], new_ylim[1])

#             all_disps.append(disp)

# add labels
    if not skip_all_but_last:
        for row_n, ax in enumerate(axes[:, 0]):
            cc = all_cut_coords[row_n]
            disp_mode = [x for x in cc.keys()][0]
            coord = cc[disp_mode]
            ax.annotate('%s = %d' % (disp_mode, int(coord)),
                        xy=(0, 0.5),
                        xytext=(-ax.yaxis.labelpad - 0.5, 0),
                        xycoords=ax.yaxis.label,
                        textcoords='offset points',
                        rotation=90,
                        ha='right',
                        va='center')

    f.set_size_inches(len(zmaps) * 4, len(roi_to_plot) * 4)

    return f, axes, all_disps
Exemplo n.º 35
0
def denoise(img_file,
            tsv_file,
            out_path,
            col_names=False,
            hp_filter=False,
            lp_filter=False,
            out_figure_path=False):
    nii_ext = '.nii.gz'
    FD_thr = [.5]
    sc_range = np.arange(-1, 3)
    constant = 'constant'

    # read in files
    img = load_niimg(img_file)
    # get file info
    img_name = os.path.basename(img.get_filename())
    file_base = img_name[0:img_name.find('.')]
    save_img_file = pjoin(out_path, file_base + \
                          '_NR' + nii_ext)
    data = img.get_data()
    df_orig = pandas.read_csv(tsv_file, '\t', na_values='n/a')
    df = copy.deepcopy(df_orig)
    Ntrs = df.as_matrix().shape[0]
    print('# of TRs: ' + str(Ntrs))
    assert (Ntrs == data.shape[len(data.shape) - 1])

    # select columns to use as nuisance regressors
    if col_names:
        df = df[col_names]
        str_append = '  [SELECTED regressors in CSV]'
    else:
        col_names = df.columns.tolist()
        str_append = '  [ALL regressors in CSV]'

    # fill in missing nuisance values with mean for that variable
    for col in df.columns:
        if sum(df[col].isnull()) > 0:
            print('Filling in ' + str(sum(df[col].isnull())) +
                  ' NaN value for ' + col)
            df[col] = df[col].fillna(np.mean(df[col]))
    print('# of Confound Regressors: ' + str(len(df.columns)) + str_append)

    # implement HP filter in regression
    TR = img.header.get_zooms()[-1]
    frame_times = np.arange(Ntrs) * TR
    if hp_filter:
        hp_filter = float(hp_filter)
        assert (hp_filter > 0)
        period_cutoff = 1. / hp_filter
        df = make_first_level_design_matrix(frame_times,
                                            period_cut=period_cutoff,
                                            add_regs=df.as_matrix(),
                                            add_reg_names=df.columns.tolist())
        # fn adds intercept into dm

        hp_cols = [col for col in df.columns if 'drift' in col]
        print('# of High-pass Filter Regressors: ' + str(len(hp_cols)))
    else:
        # add in intercept column into data frame
        df[constant] = 1
        print('No High-pass Filter Applied')

    dm = df.as_matrix()

    # prep data
    data = np.reshape(data, (-1, Ntrs))
    data_mean = np.mean(data, axis=1)
    Nvox = len(data_mean)

    # setup and run regression
    model = regression.OLSModel(dm)
    results = model.fit(data.T)
    if not hp_filter:
        results_orig_resid = copy.deepcopy(
            results.resid)  # save for rsquared computation

    # apply low-pass filter
    if lp_filter:
        # input to butterworth fn is time x voxels
        low_pass = float(lp_filter)
        Fs = 1. / TR
        if low_pass >= Fs / 2:
            raise ValueError(
                'Low pass filter cutoff if too close to the Nyquist frequency (%s)'
                % (Fs / 2))

        temp_img_file = pjoin(out_path, file_base + \
                              '_temp' + nii_ext)
        temp_img = nb.Nifti1Image(np.reshape(
            results.resid.T + np.reshape(data_mean, (Nvox, 1)),
            img.shape).astype('float32'),
                                  img.affine,
                                  header=img.header)
        temp_img.to_filename(temp_img_file)
        results.resid = butterworth(results.resid,
                                    sampling_rate=Fs,
                                    low_pass=low_pass,
                                    high_pass=None)
        print('Low-pass Filter Applied: < ' + str(low_pass) + ' Hz')

    # add mean back into data
    clean_data = results.resid.T + np.reshape(
        data_mean, (Nvox, 1))  # add mean back into residuals

    # save out new data file
    print('Saving output file...')
    clean_data = np.reshape(clean_data, img.shape).astype('float32')
    new_img = nb.Nifti1Image(clean_data, img.affine, header=img.header)
    new_img.to_filename(save_img_file)

    ######### generate Rsquared map for confounds only
    if hp_filter:
        # first remove low-frequency information from data
        hp_cols.append(constant)
        model_first = regression.OLSModel(df[hp_cols].as_matrix())
        results_first = model_first.fit(data.T)
        results_first_resid = copy.deepcopy(results_first.resid)
        del results_first, model_first

        # compute sst - borrowed from matlab
        sst = np.square(
            np.linalg.norm(results_first_resid -
                           np.mean(results_first_resid, axis=0),
                           axis=0))

        # now regress out 'true' confounds to estimate their Rsquared
        nr_cols = [col for col in df.columns if 'drift' not in col]
        model_second = regression.OLSModel(df[nr_cols].as_matrix())
        results_second = model_second.fit(results_first_resid)

        # compute sse - borrowed from matlab
        sse = np.square(np.linalg.norm(results_second.resid, axis=0))

        del results_second, model_second, results_first_resid

    elif not hp_filter:
        # compute sst - borrowed from matlab
        sst = np.square(
            np.linalg.norm(data.T - np.mean(data.T, axis=0), axis=0))

        # compute sse - borrowed from matlab
        sse = np.square(np.linalg.norm(results_orig_resid, axis=0))

        del results_orig_resid

    # compute rsquared of nuisance regressors
    zero_idx = scipy.logical_and(sst == 0, sse == 0)
    sse[zero_idx] = 1
    sst[zero_idx] = 1  # would be NaNs - become rsquared = 0
    rsquare = 1 - np.true_divide(sse, sst)
    rsquare[np.isnan(rsquare)] = 0

    ######### Visualizing DM & outputs
    fontsize = 12
    fontsize_title = 14
    def_img_size = 8

    if not out_figure_path:
        out_figure_path = save_img_file[0:save_img_file.find('.')] + '_figures'

    if not os.path.isdir(out_figure_path):
        os.mkdir(out_figure_path)
    png_append = '_' + img_name[0:img_name.find('.')] + '.png'
    print('Output directory: ' + out_figure_path)

    # DM corr matrix
    cm = df[df.columns[0:-1]].corr()
    curr_sz = copy.deepcopy(def_img_size)
    if cm.shape[0] > def_img_size:
        curr_sz = curr_sz + ((cm.shape[0] - curr_sz) * .3)
    mtx_scale = curr_sz * 100

    mask = np.zeros_like(cm, dtype=np.bool)
    mask[np.triu_indices_from(mask)] = True

    fig, ax = plt.subplots(figsize=(curr_sz, curr_sz))
    cmap = sns.diverging_palette(220, 10, as_cmap=True)
    sns.heatmap(cm,
                mask=mask,
                cmap=cmap,
                center=0,
                vmax=cm[cm < 1].max().max(),
                vmin=cm[cm < 1].min().min(),
                square=True,
                linewidths=.5,
                cbar_kws={"shrink": .6})
    ax.set_xticklabels(ax.get_xticklabels(),
                       rotation=60,
                       ha='right',
                       fontsize=fontsize)
    ax.set_yticklabels(cm.columns.tolist(),
                       rotation=-30,
                       va='bottom',
                       fontsize=fontsize)
    ax.set_title('Nuisance Corr. Matrix', fontsize=fontsize_title)
    plt.tight_layout()
    file_corr_matrix = 'Corr_matrix_regressors' + png_append
    fig.savefig(pjoin(out_figure_path, file_corr_matrix))
    plt.close(fig)
    del fig, ax

    # DM of Nuisance Regressors (all)
    tr_label = 'TR (Volume #)'
    fig, ax = plt.subplots(figsize=(curr_sz - 4.1, def_img_size))
    x_scale_html = ((curr_sz - 4.1) / def_img_size) * 890
    reporting.plot_design_matrix(df, ax=ax)
    ax.set_title('Nuisance Design Matrix', fontsize=fontsize_title)
    ax.set_xticklabels(ax.get_xticklabels(),
                       rotation=60,
                       ha='right',
                       fontsize=fontsize)
    ax.set_yticklabels(ax.get_yticklabels(), fontsize=fontsize)
    ax.set_ylabel(tr_label, fontsize=fontsize)
    plt.tight_layout()
    file_design_matrix = 'Design_matrix' + png_append
    fig.savefig(pjoin(out_figure_path, file_design_matrix))
    plt.close(fig)
    del fig, ax

    # FD timeseries plot
    FD = 'FD'
    poss_names = ['FramewiseDisplacement', FD, 'framewisedisplacement', 'fd']
    fd_idx = [df_orig.columns.__contains__(i) for i in poss_names]
    if np.sum(fd_idx) > 0:
        FD_name = poss_names[fd_idx == True]
        if sum(df_orig[FD_name].isnull()) > 0:
            df_orig[FD_name] = df_orig[FD_name].fillna(
                np.mean(df_orig[FD_name]))
        y = df_orig[FD_name].as_matrix()
        Nremove = []
        sc_idx = []
        for thr_idx, thr in enumerate(FD_thr):
            idx = y >= thr
            sc_idx.append(copy.deepcopy(idx))
            for iidx in np.where(idx)[0]:
                for buffer in sc_range:
                    curr_idx = iidx + buffer
                    if curr_idx >= 0 and curr_idx <= len(idx):
                        sc_idx[thr_idx][curr_idx] = True
            Nremove.append(np.sum(sc_idx[thr_idx]))

        Nplots = len(FD_thr)
        sns.set(font_scale=1.5)
        sns.set_style('ticks')
        fig, axes = plt.subplots(Nplots,
                                 1,
                                 figsize=(def_img_size * 1.5,
                                          def_img_size / 2),
                                 squeeze=False)
        sns.despine()
        bound = .4
        fd_mean = np.mean(y)
        for curr in np.arange(0, Nplots):
            axes[curr, 0].plot(y)
            axes[curr, 0].plot((-bound, Ntrs + bound),
                               FD_thr[curr] * np.ones((1, 2))[0],
                               '--',
                               color='black')
            axes[curr, 0].scatter(np.arange(0, Ntrs), y, s=20)

            if Nremove[curr] > 0:
                info = scipy.ndimage.measurements.label(sc_idx[curr])
                for cluster in np.arange(1, info[1] + 1):
                    temp = np.where(info[0] == cluster)[0]
                    axes[curr, 0].axvspan(temp.min() - bound,
                                          temp.max() + bound,
                                          alpha=.5,
                                          color='red')

            axes[curr, 0].set_ylabel('Framewise Disp. (' + FD + ')')
            axes[curr, 0].set_title(FD + ': ' +
                                    str(100 * Nremove[curr] / Ntrs)[0:4] +
                                    '% of scan (' + str(Nremove[curr]) +
                                    ' volumes) would be scrubbed (FD thr.= ' +
                                    str(FD_thr[curr]) + ')')
            plt.text(Ntrs + 1,
                     FD_thr[curr] - .01,
                     FD + ' = ' + str(FD_thr[curr]),
                     fontsize=fontsize)
            plt.text(Ntrs,
                     fd_mean - .01,
                     'avg = ' + str(fd_mean),
                     fontsize=fontsize)
            axes[curr, 0].set_xlim((-bound, Ntrs + 8))

        plt.tight_layout()
        axes[curr, 0].set_xlabel(tr_label)
        file_fd_plot = FD + '_timeseries' + png_append
        fig.savefig(pjoin(out_figure_path, file_fd_plot))
        plt.close(fig)
        del fig, axes
        print(FD + ' timeseries plot saved')

    else:
        print(FD + ' not found: ' + FD + ' timeseries not plotted')
        file_fd_plot = None

    # Carpet and DVARS plots - before & after nuisance regression

    # need to create mask file to input to DVARS function
    mask_file = pjoin(out_figure_path, 'mask_temp.nii.gz')
    nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False)
    nifti_masker.fit(img)
    nifti_masker.mask_img_.to_filename(mask_file)

    # create 2 or 3 carpet plots, depending on if LP filter is also applied
    Ncarpet = 2
    total_sz = int(16)
    carpet_scale = 840
    y_labels = ['Input (voxels)', 'Output \'cleaned\'']
    imgs = [img, new_img]
    img_files = [img_file, save_img_file]
    color = ['red', 'salmon']
    labels = ['input', 'cleaned']
    if lp_filter:
        Ncarpet = 3
        total_sz = int(20)
        carpet_scale = carpet_scale * (9 / 8)
        y_labels = ['Input', 'Clean Pre-LP', 'Clean LP']
        imgs.insert(1, temp_img)
        img_files.insert(1, temp_img_file)
        color.insert(1, 'firebrick')
        labels.insert(1, 'clean pre-LP')
        labels[-1] = 'clean LP'

    dvars = []
    print('Computing dvars...')
    for in_file in img_files:
        temp = nac.compute_dvars(in_file=in_file, in_mask=mask_file)[1]
        dvars.append(np.hstack((temp.mean(), temp)))
        del temp

    small_sz = 2
    fig = plt.figure(figsize=(def_img_size * 1.5,
                              def_img_size + ((Ncarpet - 2) * 1)))
    row_used = 0
    if np.sum(fd_idx) > 0:  # if FD data is available
        row_used = row_used + small_sz
        ax0 = plt.subplot2grid((total_sz, 1), (0, 0), rowspan=small_sz)
        ax0.plot(y)
        ax0.scatter(np.arange(0, Ntrs), y, s=10)
        curr = 0
        if Nremove[curr] > 0:
            info = scipy.ndimage.measurements.label(sc_idx[curr])
            for cluster in np.arange(1, info[1] + 1):
                temp = np.where(info[0] == cluster)[0]
                ax0.axvspan(temp.min() - bound,
                            temp.max() + bound,
                            alpha=.5,
                            color='red')
        ax0.set_ylabel(FD)

        for side in ["top", "right", "bottom"]:
            ax0.spines[side].set_color('none')
            ax0.spines[side].set_visible(False)

        ax0.set_xticks([])
        ax0.set_xlim((-.5, Ntrs - .5))
        ax0.spines["left"].set_position(('outward', 10))

    ax_d = plt.subplot2grid((total_sz, 1), (row_used, 0), rowspan=small_sz)
    for iplot in np.arange(len(dvars)):
        ax_d.plot(dvars[iplot], color=color[iplot], label=labels[iplot])
    ax_d.set_ylabel('DVARS')
    for side in ["top", "right", "bottom"]:
        ax_d.spines[side].set_color('none')
        ax_d.spines[side].set_visible(False)
    ax_d.set_xticks([])
    ax_d.set_xlim((-.5, Ntrs - .5))
    ax_d.spines["left"].set_position(('outward', 10))
    ax_d.legend(fontsize=fontsize - 2)
    row_used = row_used + small_sz

    st = 0
    carpet_each = int((total_sz - row_used) / Ncarpet)
    for idx, img_curr in enumerate(imgs):
        ax_curr = plt.subplot2grid((total_sz, 1), (row_used + st, 0),
                                   rowspan=carpet_each)
        fig = plotting.plot_carpet(img_curr, figure=fig, axes=ax_curr)
        ax_curr.set_ylabel(y_labels[idx])
        for side in ["bottom", "left"]:
            ax_curr.spines[side].set_position(('outward', 10))

        if idx < len(imgs) - 1:
            ax_curr.spines["bottom"].set_visible(False)
            ax_curr.set_xticklabels('')
            ax_curr.set_xlabel('')
            st = st + carpet_each

    file_carpet_plot = 'Carpet_plots' + png_append
    fig.savefig(pjoin(out_figure_path, file_carpet_plot))
    plt.close()
    del fig, ax0, ax_curr, ax_d, dvars
    os.remove(mask_file)
    print('Carpet/DVARS plots saved')
    if lp_filter:
        os.remove(temp_img_file)
        del temp_img

    # Display T-stat maps for nuisance regressors
    # create mean img
    img_size = (img.shape[0], img.shape[1], img.shape[2])
    mean_img = nb.Nifti1Image(np.reshape(data_mean, img_size), img.affine)
    mx = []
    for idx, col in enumerate(df.columns):
        if not 'drift' in col and not constant in col:
            con_vector = np.zeros((1, df.shape[1]))
            con_vector[0, idx] = 1
            con = results.Tcontrast(con_vector)
            mx.append(np.max(np.absolute([con.t.min(), con.t.max()])))
    mx = .8 * np.max(mx)
    t_png = 'Tstat_'
    file_tstat = []
    for idx, col in enumerate(df.columns):
        if not 'drift' in col and not constant in col:
            con_vector = np.zeros((1, df.shape[1]))
            con_vector[0, idx] = 1
            con = results.Tcontrast(con_vector)
            m_img = nb.Nifti1Image(np.reshape(con, img_size), img.affine)

            title_str = col + ' Tstat'
            fig = plotting.plot_stat_map(m_img,
                                         mean_img,
                                         threshold=3,
                                         colorbar=True,
                                         display_mode='z',
                                         vmax=mx,
                                         title=title_str,
                                         cut_coords=7)
            file_temp = t_png + col + png_append
            fig.savefig(pjoin(out_figure_path, file_temp))
            file_tstat.append({'name': col, 'file': file_temp})
            plt.close()
            del fig, file_temp
            print(title_str + ' map saved')

    # Display R-sq map for nuisance regressors
    m_img = nb.Nifti1Image(np.reshape(rsquare, img_size), img.affine)
    title_str = 'Nuisance Rsq'
    mx = .95 * rsquare.max()
    fig = plotting.plot_stat_map(m_img,
                                 mean_img,
                                 threshold=.2,
                                 colorbar=True,
                                 display_mode='z',
                                 vmax=mx,
                                 title=title_str,
                                 cut_coords=7)
    file_rsq_map = 'Rsquared' + png_append
    fig.savefig(pjoin(out_figure_path, file_rsq_map))
    plt.close()
    del fig
    print(title_str + ' map saved')

    ######### html report
    templateLoader = jinja2.FileSystemLoader(searchpath="/")
    templateEnv = jinja2.Environment(loader=templateLoader)

    templateVars = {
        "img_file": img_file,
        "save_img_file": save_img_file,
        "Ntrs": Ntrs,
        "tsv_file": tsv_file,
        "col_names": col_names,
        "hp_filter": hp_filter,
        "lp_filter": lp_filter,
        "file_design_matrix": file_design_matrix,
        "file_corr_matrix": file_corr_matrix,
        "file_fd_plot": file_fd_plot,
        "file_rsq_map": file_rsq_map,
        "file_tstat": file_tstat,
        "x_scale": x_scale_html,
        "mtx_scale": mtx_scale,
        "file_carpet_plot": file_carpet_plot,
        "carpet_scale": carpet_scale
    }

    TEMPLATE_FILE = pjoin(os.getcwd(), "report_template.html")
    template = templateEnv.get_template(TEMPLATE_FILE)

    outputText = template.render(templateVars)

    html_file = pjoin(out_figure_path,
                      img_name[0:img_name.find('.')] + '.html')
    with open(html_file, "w") as f:
        f.write(outputText)

    print('')
    print('HTML report: ' + html_file)
    return new_img
Exemplo n.º 36
0
from nilearn.plotting import plot_stat_map

# Before visualizing, we transform the computed p-values to Nifti-like image
# using function `new_img_like` from nilearn.
from nilearn.image import new_img_like

# First argument being a reference image and second argument should be p-values
# data to convert to a new image as output. This new image will have same header
# information as reference image.
log_p_values_img = new_img_like(fmri_img, log_p_values)

# Now, we visualize log p-values image on functional mean image as background
# with coordinates given manually and colorbar on the right side of plot (by
# default colorbar=True)
plot_stat_map(log_p_values_img,
              mean_img,
              title="p-values",
              cut_coords=cut_coords)

#############################################################################
# **Selecting features using f_classif**: Feature selection method is also
# available in the scikit-learn Python package, where it has been extended to
# several classes, using the `sklearn.feature_selection.f_classif` function.

##############################################################################
# Build a mask from this statistical map (Improving the quality of the mask)
# --------------------------------------------------------------------------
# **Thresholding** - We build the t-map to have better representation of voxels
# of interest and voxels with lower p-values correspond to the most intense
# voxels. This can be done easily by applying a threshold to a t-map data in
# array.
Exemplo n.º 37
0
# Now, we show from here how to visualize the retrieved datasets using plotting
# tools from nilearn.

from nilearn import plotting

########################################
# Visualizing in - 'sagittal', 'coronal' and 'axial' with given coordinates
# -------------------------------------------------------------------------
# The first argument is a path to the filename of a constrast map,
# optional argument `display_mode` is given as string 'ortho' to visualize
# the map in three specific directions xyz and the optional `cut_coords`
# argument, is here a list of integers denotes coordinates of each slice
# in the order [x, y, z]. By default the `colorbar` argument is set to True
# in plot_stat_map.
plotting.plot_stat_map(stat_img,
                       display_mode='ortho',
                       cut_coords=[36, -27, 60],
                       title="display_mode='ortho', cut_coords=[36, -27, 60]")

########################################
# Visualizing in - single view 'axial' with number of cuts=5
# -----------------------------------------------------------
# In this type of visualization, the `display_mode` argument is given as
# string 'z' for axial direction and `cut_coords` as integer 5 without a
# list implies that number of cuts in the slices should be maximum of 5.
# The coordinates to cut the slices are selected automatically
plotting.plot_stat_map(stat_img,
                       display_mode='z',
                       cut_coords=5,
                       title="display_mode='z', cut_coords=5")

########################################
Exemplo n.º 38
0
logp_thresh = -np.log(.05)

###############################################################################
# Fisher's (using functions)
# --------------------------------------------------
# Get images for analysis
files = dset.get_images(imtype='z')
files = [f for f in files if f]
z_imgs = [nib.load(f) for f in files]
z_data = apply_mask(z_imgs, mask_img)
print('{0} studies found.'.format(z_data.shape[0]))

result = fishers(z_data, mask_img)
fishers_result = unmask(result['z'], mask_img)
plot_stat_map(fishers_result,
              cut_coords=[0, 0, -8],
              draw_cross=False,
              cmap='RdBu_r')

###############################################################################
# Fisher's (using Estimators)
# --------------------------------------------------
# Here is the object-oriented approach
meta = Fishers()
meta.fit(dset)
plot_stat_map(meta.results.get_map('z'),
              cut_coords=[0, 0, -8],
              draw_cross=False,
              cmap='RdBu_r')

###############################################################################
# Stouffer's with fixed-effects inference
Exemplo n.º 39
0
for train, test in cv:
    svc.fit(fmri_masked[train], target[train])
    prediction = svc.predict(fmri_masked[test])
    cv_scores.append(
        np.sum(prediction == target[test]) / float(np.size(target[test])))

print cv_scores

### Unmasking #################################################################

# Retrieve the SVC discriminating weights
coef_ = svc.coef_

# Reverse masking thanks to the Nifti Masker
coef_img = nifti_masker.inverse_transform(coef_)

# Save the coefficients as a Nifti image
coef_img.to_filename('haxby_svc_weights.nii')

### Visualization #############################################################
import pylab as plt
from nilearn.image.image import mean_img
from nilearn.plotting import plot_roi, plot_stat_map

mean_epi = mean_img(func_filename)
plot_stat_map(coef_img, mean_epi, title="SVM weights", display_mode="yx")

plot_roi(nifti_masker.mask_img_, mean_epi, title="Mask", display_mode="yx")

plt.show()
plotting.plot_connectome(mean_correlations,
                         coords_connectome,
                         edge_threshold='90%',
                         title=title)

################################################################################
# Plot regions extracted for only one specific network
# ----------------------------------------------------
components_img = atlas_harvard_oxford.maps
# First, we plot a network of index=4 without region extraction (left plot)
from nilearn import image

img = image.index_img(components_img, 1)
coords = plotting.find_xyz_cut_coords(img)
display = plotting.plot_stat_map(img,
                                 cut_coords=coords,
                                 colorbar=False,
                                 title='Showing one specific network')

################################################################################
# Now, we plot (right side) same network after region extraction to show that
# connected regions are nicely seperated.
# Each brain extracted region is identified as separate color.

# For this, we take the indices of the all regions extracted related to original
# network given as 4.
regions_indices_of_map3 = np.where(np.array(regions_index) == 1)

display = plotting.plot_anat(cut_coords=coords,
                             title='Regions from this network')

# Add as an overlay all the regions of index 4
Exemplo n.º 41
0
            print 'dot product is done'
            #maybe save the z-image and run randomise on that?
            sbc_hb_z = np.arctanh(sbc_hb)
            print 'we have z values'
            sbc_hb_img = brain_masker.inverse_transform(sbc_hb.T)
            sbc_hb_z_img = brain_masker.inverse_transform(sbc_hb_z.T)
            print 'and a z-value image'

            output_png = join(sink_dir, level,
                              '{0}-{1}_sbc_hb.png'.format(s, run))
            output_nii = join(sink_dir, level,
                              '{0}-{1}_sbc_hb.nii.gz'.format(s, run))
            output_z_nii = join(sink_dir, level,
                                '{0}-{1}_sbc_z_hb.nii.gz'.format(s, run))

            rmaps.append(output_nii)
            zmaps.append(output_z_nii)
            sbc_hb_img.to_filename(output_nii)
            sbc_hb_z_img.to_filename(output_z_nii)
            plotting.plot_stat_map(sbc_hb_img,
                                   bg_img=mean_func,
                                   output_file=output_png)
        except Exception as e:
            print(e)
    try:
        avg_z_map = mean_img(zmaps)
        avg_z_map.to_filename(
            join(sink_dir, level, '{0}_mean_zmap.nii.gz'.format(s)))
    except Exception as e:
        print(e)
Exemplo n.º 42
0
# Print the results
print("Classification accuracy: %.4f / Chance level: %f" %
      (classification_accuracy, 1. / len(np.unique(conditions))))
# Classification accuracy: 0.9861 / Chance level: 0.5000


#############################################################################
# Visualize the results

# Look at the SVC's discriminating weights
coef = svc.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
# reverse masking
weight_img = masker.inverse_transform(coef)


# Use the mean image as a background to avoid relying on anatomical data
from nilearn import image
mean_img = image.mean_img(func_filename)

# Create the figure
from nilearn.plotting import plot_stat_map, show
plot_stat_map(weight_img, mean_img, title='SVM weights')

# Saving the results as a Nifti file may also be important
weight_img.to_filename('haxby_face_vs_house.nii')


show()
Exemplo n.º 43
0
nested_cv_scores = cross_val_score(grid, X, y, cv=5)

#NEST_SCORE = np.mean(nested_cv_scores)
print("Nested CV score: %.4f" % np.mean(nested_cv_scores))

# Here is the image
coef = svc.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
# reverse masking
weight_img = masker.inverse_transform(coef)

# Use the mean image as a background to avoid relying on anatomical data
from nilearn import image
mean_img = image.mean_img(dataset)
mean_img.to_filename(
    '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/code/decoding/milkshake_vs_h2O/images/all_p1_mean_nimask.nii'
)

# Create the figure
from nilearn.plotting import plot_stat_map, show
display = plot_stat_map(weight_img, mean_img, title='Milkshake vs. H2O')
display.savefig(
    '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/code/decoding/milkshake_vs_h2O/images/all_p1_SVM_nimask.png'
)
# Saving the results as a Nifti file may also be important
weight_img.to_filename(
    '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/code/decoding/milkshake_vs_h2O/images/all_p1_SVM_nimask.nii'
)
Exemplo n.º 44
0
# Let's now retrieve a motor contrast from a Neurovault repository
motor_images = datasets.fetch_neurovault_motor_task()
print(motor_images.images)

###############################################################################
# motor_images is a list of filenames. We need to take the first one
tmap_filename = motor_images.images[0]

###############################################################################
# Visualizing a 3D file
# ----------------------
#
# The file contains a 3D volume, we can easily visualize it as a
# statistical map:
from nilearn import plotting
plotting.plot_stat_map(tmap_filename)

###############################################################################
# Visualizing works better with a threshold
plotting.plot_stat_map(tmap_filename, threshold=3)

###############################################################################
# Visualizing one volume in a 4D file
# -----------------------------------
#
# We can download resting-state networks from the Smith 2009 study on
# correspondance between rest and task
rsn = datasets.fetch_atlas_smith_2009()['rsn10']
print(rsn)

###############################################################################
Exemplo n.º 45
0
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]
contrast_val = np.hstack(([-1, -1, 1, 1], np.zeros(n_columns - 4)))

#########################################################################
# Statistics for the first session
from nilearn import plotting
cut_coords = [-129, -126, 49]
contrast_id = 'DSt_minus_SSt'

fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0])
summary_statistics_session1 = fmri_glm.compute_contrast(contrast_val,
                                                        output_type='all')
plotting.plot_stat_map(summary_statistics_session1['z_score'],
                       bg_img=mean_img_,
                       threshold=3.0,
                       cut_coords=cut_coords,
                       title='{0}, first session'.format(contrast_id))

#########################################################################
# Statistics for the second session

fmri_glm = fmri_glm.fit(fmri_img[1], design_matrices=design_matrices[1])
summary_statistics_session2 = fmri_glm.compute_contrast(contrast_val,
                                                        output_type='all')
plotting.plot_stat_map(summary_statistics_session2['z_score'],
                       bg_img=mean_img_,
                       threshold=3.0,
                       cut_coords=cut_coords,
                       title='{0}, second session'.format(contrast_id))
Exemplo n.º 46
0
print("Nested CV score: %.4f" % np.mean(nested_cv_scores))

# In[ ]:

# Here is the image
coef = svc.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
# reverse masking
weight_img = masker.inverse_transform(coef)

# Use the mean image as a background to avoid relying on anatomical data
from nilearn import image
mean_img = image.mean_img(dataset)
mean_img.to_filename(
    '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/code/decoding/LF_HS_vs_h2O/images/4wp2_k2_mean_nimask.nii'
)

# Create the figure
from nilearn.plotting import plot_stat_map, show
display = plot_stat_map(weight_img,
                        mean_img,
                        title='SVM weights LF_HS vs h2O 4 waves')
display.savefig(
    '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/code/decoding/LF_HS_vs_h2O/images/4wp2_k2_nimask.png'
)
# Saving the results as a Nifti file may also be important
weight_img.to_filename(
    '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/code/decoding/LF_HS_vs_h2O/images/4wp2_k2_nimask.nii'
)
Exemplo n.º 47
0
resampled_affine = resampled_stat_img.affine

template_img = load_img(template)
template_shape = template_img.shape
template_affine = template_img.affine

print("""Shape comparison:
- Original t-map image shape : {0}
- Resampled t-map image shape: {1}
- Template image shape       : {2}
""".format(original_shape, resampled_shape, template_shape))

print("""Affine comparison:
- Original t-map image affine :\n {0}
- Resampled t-map image affine:\n {1}
- Template image affine       :\n {2}
""".format(original_affine, resampled_affine, template_affine))

from nilearn import plotting

plotting.plot_stat_map(stat_img,
                       bg_img=template,
                       cut_coords=(36, -27, 66),
                       threshold=3,
                       title="t-map in original resolution")
plotting.plot_stat_map(resampled_stat_img,
                       bg_img=template,
                       cut_coords=(36, -27, 66),
                       threshold=3,
                       title="Resampled t-map")
plotting.show()
Exemplo n.º 48
0
### Fit TV-L1 #################################################################
# Here we're using the regressor object given that the task is to predict a
# continuous variable, the gain of the gamble.
from nilearn.decoding import SpaceNetRegressor
decoder = SpaceNetRegressor(
    mask=mask_img,
    penalty="tv-l1",
    eps=1e-1,  # prefer large alphas
    memory="cache")
decoder.fit(zmaps, object_category)  # fit

# Visualize TV-L1 weights
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
plot_stat_map(decoder.coef_img_,
              title="tv-l1",
              display_mode="yz",
              cut_coords=[20, -2])

### Fit Graph-Net #############################################################
decoder = SpaceNetRegressor(
    mask=mask_img,
    penalty="graph-net",
    eps=1e-1,  # prefer large alphas
    memory="cache")
decoder.fit(zmaps, object_category)  # fit

# Visualize Graph-Net weights
plot_stat_map(decoder.coef_img_,
              title="graph-net",
              display_mode="yz",
              cut_coords=[20, -2])
Exemplo n.º 49
0
axis = fig.subplots(nrows=U.shape[1] * 2, ncols=1)

for k in range(U.shape[1]):
    #k = 0
    idx = 2 * k
    map_img = map_img_l[k]

    #ax = fig.add_subplot(111)
    #ax.set_title("T-stats T>%.2f" %  tstats_thres)

    vmax = np.abs(map_arr).max()

    axis[idx].set_title("PC%i (EV:%.3f%%)" %  (k+1, explained_variance_ratio[k] * 100))
    plotting.plot_glass_brain(map_img, colorbar=True, vmax=vmax, figure=fig, axes=axis[idx])
    #pdf.savefig()
    display = plotting.plot_stat_map(map_img, colorbar=True, draw_cross=True, cmap=cmnl.cold_white_hot, figure=fig, axes=axis[idx+1])#, symmetric_cbar=False)#, cmap=plt.cm.hot_r)#,  cut_coords=[16, -4, 0], symmetric_cbar=False, cmap=cold_blue)#, threshold=3,)#, figure=fig, axes=ax)
    plt.show()

pdf.savefig()
plt.savefig(prefix+"_components-brain-maps.png")
plt.close(fig)
pdf.close()

########################################################################################################################
cd /neurospin/brainomics/2019_rundmc_wmh/analyses/201909_rundmc_wmh_pca/models/pca_enettv_0.000350_1.000_0.001
fsl5.0-fslsplit components-brain-maps.nii.gz ./components-brain-maps_PC -t
~/git/scripts/brainomics/image_clusters_analysis_nilearn.py /neurospin/brainomics/2019_rundmc_wmh/analyses/201909_rundmc_wmh_pca/models/pca_enettv_0.000350_1.000_0.001/components-brain-maps_PC0000.nii.gz --atlas JHU --thresh_size 10
~/git/scripts/brainomics/image_clusters_analysis_nilearn.py /neurospin/brainomics/2019_rundmc_wmh/analyses/201909_rundmc_wmh_pca/models/pca_enettv_0.000350_1.000_0.001/components-brain-maps_PC0001.nii.gz --atlas JHU --thresh_size 10
~/git/scripts/brainomics/image_clusters_analysis_nilearn.py /neurospin/brainomics/2019_rundmc_wmh/analyses/201909_rundmc_wmh_pca/models/pca_enettv_0.000350_1.000_0.001/components-brain-maps_PC0002.nii.gz --atlas JHU --thresh_size 10

#
Exemplo n.º 50
0
def plotting_hrf_stats(v,
                       t_r,
                       hrf_ref=None,
                       stat_type='tp',
                       display_mode='ortho',
                       cut_coords=None,
                       masker=None,
                       atlas_type='havard',
                       atlas_kwargs=dict(),
                       n_scales=122,
                       plot_dir='.',
                       fname=None,
                       save_nifti=False,
                       verbose=False):
    """ Plot, and save as pdf, each stats HRF for each ROIs.

    Parameters
    ----------
    v : array, shape (n_hrf_rois, n_times_atom), the initial used HRFs
    t_r : float, Time of Repetition, fMRI acquisition parameter, the temporal
        resolution
    hrf_ref : array or None, shape (n_times_atom, ), (default=None), reference
        HRF to plot for comparison
    stat_type : str, (default='tp'), statistic to compute on each HRFs possible
        choice are ('tp', 'fwhm')
    normalized : bool, (default=False), whether or not to normalized by the
        l-inf norm each HRFs
    display_mode : None or str, coords to cut the plotting, possible value are
        None to have x, y, z or 'x', 'y', 'z' for a single cut
    cut_coords : tuple or None, MNI coordinate to perform display
    masker : Nilearn-Masker like, masker class to perform the inverse Nifti
        transformation
    atlas_type : str, func, or None, (default=None), atlas type, possible
        choice are ['havard', 'basc', given-function]
    atlas_kwargs : dict, (default=dict()), additional kwargs for the atlas,
        if a function is passed.
    n_scales : int, (default=122), number of scale if atlas_type == 'basc'
    plot_dir : str, (default='.'), directory under which the pdf is saved
    fname : str, (default='v_{fwhm/tp}.pdf'), filename under which the pdf is
        saved
    save_nifti : bool, (default=False), whether or not to save the image as
        Nifti
    verbose : bool, (default=False), verbosity level
    """
    if stat_type not in ['tp', 'fwhm']:
        raise ValueError("stat_type should be in ['tp', 'fwhm'], "
                         "got {}".format(stat_type))
    if atlas_type == 'havard':
        _, atlas_rois = fetch_vascular_atlas()
    elif atlas_type == 'basc':
        n_scales_ = f"scale{int(n_scales)}"
        _, atlas_rois = fetch_atlas_basc_2015(n_scales=n_scales_)
    elif isinstance(atlas_type, collections.Callable):
        _, atlas_rois = atlas_type(**atlas_kwargs)
    else:
        raise ValueError(f"atlas_type should belong to ['havard', 'basc', "
                         f"given-function], got {atlas_type}")
    hrf_rois = dict()
    rois = masker.transform(atlas_rois).astype(int).ravel()
    index = np.arange(rois.shape[-1])
    for roi_label in np.unique(rois):
        hrf_rois[roi_label] = index[roi_label == rois]
    _, roi_label_from_hrf_idx, _ = split_atlas(hrf_rois)
    raw_atlas_rois = atlas_rois.get_data()
    n_hrf_rois, n_times_atom = v.shape
    if hrf_ref is not None:
        if stat_type == 'tp':
            ref_stat = tp(t_r, hrf_ref)
        elif stat_type == 'fwhm':
            ref_stat = fwhm(t_r, hrf_ref)
    for m in range(n_hrf_rois):
        v_ = v[m, :]
        if stat_type == 'tp':
            stat_ = tp(t_r, v_)
            stat_name = 'TtP'
        elif stat_type == 'fwhm':
            stat_ = fwhm(t_r, v_)
            stat_name = 'FWHM'
        if hrf_ref is not None:
            stat_ -= ref_stat
            title = "{0}(-{0}-reference) map (s)".format(stat_name)
        else:
            title = "{} map (s)".format(stat_name)
        label = roi_label_from_hrf_idx[m]
        raw_atlas_rois[raw_atlas_rois == label] = stat_
    stats_map = image.new_img_like(atlas_rois, raw_atlas_rois)
    plotting.plot_stat_map(stats_map,
                           title=title,
                           colorbar=True,
                           display_mode=display_mode,
                           cut_coords=cut_coords,
                           symmetric_cbar=False)
    if save_nifti:
        nii_filename = os.path.join(plot_dir, "v_{}.nii".format(stat_type))
        stats_map.to_filename(nii_filename)
    if fname is None:
        fname = "v_{}.pdf".format(stat_type)
    fname = os.path.join(plot_dir, fname)
    plt.savefig(fname, dpi=150)
    if verbose:
        print("Saving plot under '{0}'".format(fname))
Exemplo n.º 51
0
from nilearn.plotting import plot_stat_map, show

# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
from nilearn.image import get_data

mean_fmri_img = image.mean_img(func_filename)

threshold = -np.log10(0.1)  # 10% corrected

vmax = min(signed_neg_log_pvals.max(),
           neg_log_pvals_bonferroni.max())

# Plot thresholded p-values map corresponding to F-scores
display = plot_stat_map(neg_log_pvals_bonferroni_unmasked, mean_fmri_img,
                        threshold=threshold, cmap=plt.cm.RdBu_r,
                        display_mode='z', cut_coords=[-1, ],
                        vmax=vmax)

neg_log_pvals_bonferroni_data = get_data(neg_log_pvals_bonferroni_unmasked)
n_detections = (neg_log_pvals_bonferroni_data > threshold).sum()
title = ('Negative $\\log_{10}$ p-values'
         '\n(Parametric two-sided F-test'
         '\n+ Bonferroni correction)'
         '\n%d detections') % n_detections

display.title(title, y=1.1)

# Plot permutation p-values map
display = plot_stat_map(signed_neg_log_pvals_unmasked, mean_fmri_img,
                        threshold=threshold, cmap=plt.cm.RdBu_r,
                        display_mode='z', cut_coords=[-1, ],
Exemplo n.º 52
0
# the following line:
canica_components_img.to_filename(
    '/home/bk/Desktop/bkrest/canica_resting_state.nii.gz')

from nilearn.plotting import plot_prob_atlas

# Plot all ICA components together
plot_prob_atlas(canica_components_img, title='All ICA components')

from nilearn.image import iter_img
from nilearn.plotting import plot_stat_map

for i, cur_img in enumerate(iter_img(canica_components_img)):
    plot_stat_map(cur_img,
                  display_mode="z",
                  title="IC %d" % i,
                  cut_coords=1,
                  colorbar=False)

from nilearn.decomposition import DictLearning

dict_learning = DictLearning(n_components=20,
                             memory="nilearn_cache",
                             memory_level=2,
                             verbose=1,
                             random_state=0,
                             n_epochs=1,
                             mask_strategy='template')

print('[Example] Fitting dicitonary learning model')
dict_learning.fit(func_filenames)
Exemplo n.º 53
0
# taken from Wikipedia
query = "Aphasia is an inability to comprehend or formulate language"

response = encoder(query)
print(response.keys())

######################################################################
# The "z_map" entry of the results is a brain map showing the anatomical
# regions that are most strongly associated with the query in the neuroimaging
# literature. It is a `Nifti1Image` which can be saved, displayed, etc.

from nilearn import plotting

print(type(response["z_map"]))
plotting.plot_stat_map(response["z_map"],
                       display_mode="z",
                       title="aphasia",
                       threshold=3.1)

######################################################################
#

# Display the map on the cortical surface:
view = plotting.view_img_on_surf(response["z_map"], threshold=3.1)
view.open_in_browser()
# (in a Jupyter notebook, we can display an inline view):
view

######################################################################
#

# Or open interactive viewer:
                                 design_matrix=design_matrix.iloc[:, 0:40],
                                 ax=ax[i])

    fig.savefig(f'{out_dir}{sub}/figures/{sub}_contrasts_trials.png')

    #--- trial effects estimation
    zmaps = np.zeros((89, 105, 89, n_trials))

    for i in range(n_trials):
        zmap = fmri_glm_non_smoothed.compute_contrast(np.asarray(
            trial_contrasts[f'Q{i+1:02}']),
                                                      output_type='z_score')
        zmaps[:, :, :, i] = zmap.get_fdata()
        p = plot_stat_map(zmap,
                          threshold=3,
                          display_mode='z',
                          cut_coords=8,
                          black_bg=False,
                          title='Q{:0>2d}'.format(i + 1))
        p.savefig(f'{out_dir}{sub}/figures/{sub}_Q{i+1:02}_zmap.png')

    zmap_img = nib.Nifti1Image(zmaps, zmap.affine, zmap.header)
    nib.save(zmap_img, f'{out_dir}{sub}/{sub}_{task}_Q_all_zmaps.nii.gz')

    #--- overal effect estimation
    fig, ax = plt.subplots(1, 1)
    fig.set_size_inches(17, 3)
    plot_contrast_matrix(trial_contrasts['overall'],
                         design_matrix=design_matrix.iloc[:, 0:40],
                         ax=ax)
    fig.savefig(f'{out_dir}{sub}/figures/{sub}_contrast_overall.png')
Exemplo n.º 55
0
from gclda.utils import get_resource_path

###############################################################################
# Load model and initialize decoder
# ----------------------------------
model_file = join(get_resource_path(), 'models/Neurosynth2015Filtered2',
                  'model_200topics_2015Filtered2_10000iters.pklz')
model = Model.load(model_file)

###############################################################################
# Read in image to decode
# --------------------------------------
file_to_decode = '../data/faces_specificity_z.nii.gz'
img_to_decode = nib.load(file_to_decode)
fig = plotting.plot_stat_map(img_to_decode,
                             display_mode='z',
                             threshold=3.290527,
                             cut_coords=[-28, -4, 20, 50])

###############################################################################
# Decode image
# -------------
df, topic_weights = decode_continuous(model, img_to_decode)

###############################################################################
# Get associated terms
# ---------------------
df = df.sort_values(by='Weight', ascending=False)
print(df.head(10))

###############################################################################
# Plot topic weights
Exemplo n.º 56
0
def plot_brain(objIn,
               how="full",
               thr_upper=None,
               thr_lower=None,
               save=False,
               **kwargs):
    """
    More complete brain plotting of a Brain_Data instance
    Args:
        obj: (Brain_Data) object to plot
        how: (str) whether to plot a glass brain 'glass', 3 view-multi-slice mni 'mni', or both 'full'
        thr_upper: (str/float) thresholding of image. Can be string for percentage, or float for data units (see Brain_Data.threshold()
        thr_lower: (str/float) thresholding of image. Can be string for percentage, or float for data units (see Brain_Data.threshold()
        save (str): if a string file name or path is provided plots will be saved into this directory appended with the orientation they belong to
        kwargs: optionals args to nilearn plot functions (e.g. vmax)

    """
    if thr_upper or thr_lower:
        obj = objIn.threshold(upper=thr_upper, lower=thr_lower)
    else:
        obj = objIn.copy()

    views = ["x", "y", "z"]
    coords = [
        range(-50, 51, 8),
        range(-80, 50, 10),
        range(-40, 71, 9),
    ]  # [-88,-72,-58,-38,-26,8,20,34,46]
    cmap = "RdBu_r"

    if thr_upper is None and thr_lower is None:
        print("Plotting unthresholded image")
    else:
        if isinstance(thr_upper, str):
            print("Plotting top %s of voxels" % thr_upper)
        elif isinstance(thr_upper, (float, int)):
            print("Plotting voxels with stat value >= %s" % thr_upper)
        if isinstance(thr_lower, str):
            print("Plotting lower %s of voxels" % thr_lower)
        elif isinstance(thr_lower, (float, int)):
            print("Plotting voxels with stat value <= %s" % thr_lower)

    if save:
        path, filename = os.path.split(save)
        filename, extension = filename.split(".")
        glass_save = os.path.join(path, filename + "_glass." + extension)
        x_save = os.path.join(path, filename + "_x." + extension)
        y_save = os.path.join(path, filename + "_y." + extension)
        z_save = os.path.join(path, filename + "_z." + extension)
    else:
        glass_save, x_save, y_save, z_save = None, None, None, None

    saves = [x_save, y_save, z_save]

    if how == "full":
        plot_glass_brain(obj.to_nifti(),
                         display_mode="lzry",
                         colorbar=True,
                         cmap=cmap,
                         plot_abs=False,
                         **kwargs)
        if save:
            plt.savefig(glass_save, bbox_inches="tight")
        for v, c, savefile in zip(views, coords, saves):
            plot_stat_map(obj.to_nifti(),
                          cut_coords=c,
                          display_mode=v,
                          cmap=cmap,
                          bg_img=resolve_mni_path(MNI_Template)["brain"],
                          **kwargs)
            if save:
                plt.savefig(savefile, bbox_inches="tight")
    elif how == "glass":
        plot_glass_brain(obj.to_nifti(),
                         display_mode="lzry",
                         colorbar=True,
                         cmap=cmap,
                         plot_abs=False,
                         **kwargs)
        if save:
            plt.savefig(glass_save, bbox_inches="tight")
    elif how == "mni":
        for v, c, savefile in zip(views, coords, saves):
            plot_stat_map(obj.to_nifti(),
                          cut_coords=c,
                          display_mode=v,
                          cmap=cmap,
                          bg_img=resolve_mni_path(MNI_Template)["brain"],
                          **kwargs)
            if save:
                plt.savefig(savefile, bbox_inches="tight")
    del obj  # save memory
    return
    masker = estimator.masker_
    # Drop output maps to a Nifti   file
    components_img = masker.inverse_transform(estimator.components_)
    components_img.to_filename('%s_resting_state.nii.gz' % names[estimator])
    components_imgs.append(components_img)

###############################################################################
# Visualize the results
from nilearn.plotting import (plot_prob_atlas, find_xyz_cut_coords, show,
                              plot_stat_map)
from nilearn.image import index_img

# Selecting specific maps to display: maps were manually chosen to be similar
indices = {dict_learning: 1, canica: 31}
# We select relevant cut coordinates for displaying
cut_component = index_img(components_imgs[0], indices[dict_learning])
cut_coords = find_xyz_cut_coords(cut_component)
for estimator, components in zip(estimators, components_imgs):
    # 4D plotting
    plot_prob_atlas(components,
                    view_type="filled_contours",
                    title="%s" % names[estimator],
                    cut_coords=cut_coords,
                    colorbar=False)
    # 3D plotting
    plot_stat_map(index_img(components, indices[estimator]),
                  title="%s" % names[estimator],
                  cut_coords=cut_coords,
                  colorbar=False)
show()
Exemplo n.º 58
0
def plotting_spatial_comp(u,
                          variances,
                          masker,
                          plot_dir='.',
                          fname=None,
                          display_mode='ortho',
                          perc_voxels_to_retain=0.1,
                          bg_img=None,
                          save_nifti=False,
                          verbose=False):
    """ Plot, and save as pdf, each spatial estimated component.

    Parameters
    ----------
    u : array, shape (n_atoms, n_voxels), the spatial maps
    variances : array, shape (n_atoms, ) the order variances for each
        components
    masker : Nilearn-Masker like, masker class to perform the inverse Nifti
        transformation
    plot_dir : str, (default='.'), directory under which the pdf is saved
    fname : str, (default='u.pdf'), filename under which the pdf is saved
    display_mode : None or str, coords to cut the plotting, possible value are
        None to have x, y, z or 'x', 'y', 'z' for a single cut
    perc_voxels_to_retain : float, (default=0.1), percentage of voxels to
        retain when plotting the spatial maps
    bg_img : Nifti-like or None, (default=None), background image, None means
        no image
    save_nifti : bool, (default=False), whether or not to save the image as
        Nifti
    verbose : bool, (default=False), verbosity level
    """
    if display_mode in ['x', 'y', 'z']:
        cut_coords = 1
        colorbar = False
        compress_plot = True
    else:
        display_mode = 'ortho'
        cut_coords = None
        colorbar = True
        compress_plot = False

    n_atoms, n_voxels = u.shape
    img_u = []
    for k in range(1, n_atoms + 1):
        u_k = u[k - 1]
        last_retained_voxel_idx = int(perc_voxels_to_retain * n_voxels)
        th = np.sort(u_k)[-last_retained_voxel_idx]
        expl_var = variances[k - 1]
        if compress_plot:
            title = "Map-{}".format(k)
        else:
            title = "Map-{} (explained variance = {:.2e})".format(k, expl_var)
        img_u_k = masker.inverse_transform(u_k)
        img_u.append(img_u_k)
        if bg_img is not None:
            plotting.plot_stat_map(img_u_k,
                                   title=title,
                                   colorbar=colorbar,
                                   display_mode=display_mode,
                                   cut_coords=cut_coords,
                                   threshold=th,
                                   bg_img=bg_img)
        else:
            plotting.plot_stat_map(img_u_k,
                                   title=title,
                                   colorbar=colorbar,
                                   display_mode=display_mode,
                                   cut_coords=cut_coords,
                                   threshold=th)
        if save_nifti:
            nii_filename = os.path.join(plot_dir, "u_{0:03d}.nii".format(k))
            img_u_k.to_filename(nii_filename)
        plt.savefig(os.path.join(plot_dir, "u_{0:03d}.pdf".format(k)), dpi=150)
    pdf_files = os.path.join(plot_dir, 'u_*.pdf')
    if fname is None:
        fname = 'u.pdf'
    pdf_file = os.path.join(plot_dir, fname)
    if compress_plot:
        cmd_cat = ("pdfjam --suffix nup --nup 8x5 --no-landscape {} "
                   "--outfile {}".format(pdf_files, pdf_file))
        subprocess.call(cmd_cat, shell=True)
        cmd_crop = "pdfcrop {0} {0}".format(pdf_file)
        subprocess.call(cmd_crop, shell=True)
    else:
        cmd = "pdftk {} cat output {}".format(pdf_files, pdf_file)
        subprocess.call(cmd, shell=True)
    subprocess.call("rm -f {}".format(pdf_files), shell=True)
    if verbose:
        print("Saving plot under '{0}'".format(pdf_file))
    # Grab extracted components umasked back to Nifti image.
    # Note: For older versions, less than 0.4.1. components_img_
    # is not implemented. See Note section above for details.
    components_img = estimator.components_img_
    components_img.to_filename('%s_resting_state.nii.gz' %
                               names[estimator])
    components_imgs.append(components_img)

###############################################################################
# Visualize the results
# ----------------------
from nilearn.plotting import (plot_prob_atlas, find_xyz_cut_coords, show,
                              plot_stat_map)
from nilearn.image import index_img

# Selecting specific maps to display: maps were manually chosen to be similar
indices = {dict_learning: 25, canica: 33}
# We select relevant cut coordinates for displaying
cut_component = index_img(components_imgs[0], indices[dict_learning])
cut_coords = find_xyz_cut_coords(cut_component)
for estimator, components in zip(estimators, components_imgs):
    # 4D plotting
    plot_prob_atlas(components, view_type="filled_contours",
                    title="%s" % names[estimator],
                    cut_coords=cut_coords, colorbar=False)
    # 3D plotting
    plot_stat_map(index_img(components, indices[estimator]),
                  title="%s" % names[estimator],
                  cut_coords=cut_coords, colorbar=False)
show()
design_matrix = pd.DataFrame(np.hstack((tested_var, np.ones_like(tested_var))),
                             columns=['fluency', 'intercept'])

###########################################################################
# Fit of the second-level model
from nistats.second_level_model import SecondLevelModel
model = SecondLevelModel(smoothing_fwhm=5.0)
model.fit(contrast_map_filenames, design_matrix=design_matrix)

##########################################################################
# To estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = model.compute_contrast('fluency', output_type='z_score')

###########################################################################
# We compute the fdr-corrected p = 0.05 threshold for these data
from nistats.thresholding import map_threshold
_, threshold = map_threshold(z_map, alpha=.05, height_control='fdr')

###########################################################################
#Let us plot the second level contrast at the computed thresholds
from nilearn import plotting
plotting.plot_stat_map(
    z_map,
    threshold=threshold,
    colorbar=True,
    title='Group-level association between motor activity \n'
    'and reading fluency (fdr<0.05')

plotting.show()