def show_slices(img, coords=None, threshold=0.1, cmap=None, prefix=None, show_colorbar=None, formatter='%.2f'): if cmap is None: cmap = pylab.cm.hot data, aff = img.get_data(), img.get_affine() anatimg = load('/usr/share/fsl/data/standard/MNI152_T1_1mm_brain.nii.gz') anatdata, anataff = anatimg.get_data(), anatimg.get_affine() anatdata = anatdata.astype(np.float) anatdata[anatdata<10.] = np.nan outfile = 'cluster.svg' if prefix: outfile = '_'.join((prefix, outfile)) outfile = os.path.join('figures', outfile) if coords is None: osl = viz.plot_map(np.asarray(data), aff, threshold=threshold, cmap=cmap, black_bg=False) osl.frame_axes.figure.savefig(outfile, transparent=True) else: for idx,coord in enumerate(coords): outfile = 'cluster%02d' % idx if prefix: outfile = '_'.join((prefix, outfile)) outfile = os.path.join('figures', outfile) osl = viz.plot_map(np.asarray(data), aff, anat=anatdata, anat_affine=anataff, threshold=threshold, cmap=cmap, black_bg=False, cut_coords=coord) if show_colorbar: cb = colorbar(gca().get_images()[1], cax=axes([0.4, 0.075, 0.2, 0.025]), orientation='horizontal', format=formatter) cb.set_ticks([cb._values.min(), cb._values.max()]) show() osl.frame_axes.figure.savefig(outfile+'.svg', bbox_inches='tight', transparent=True) osl.frame_axes.figure.savefig(outfile+'.png', dpi=600, bbox_inches='tight', transparent=True)
def plot_map(self, niimg, title): data = niimg.get_data().squeeze() params = self.plot_map_params.copy() fig = pl.figure(facecolor='k', edgecolor='k') if 'percentile' in self.plot_map_params: threshold = scoreatpercentile( data.ravel(), self.plot_map_params['percentile']) params.pop('percentile') params['threshold'] = threshold # vmax = np.abs(data).max() vmax = np.percentile(np.abs(data), 99) plot_map(data, affine=niimg.get_affine(), vmin=-vmax, vmax=vmax, title=title, figure=fig, **params) fname = title.replace(' ', '_').replace('/', '_') pl.savefig(os.path.join( self.report_dir, '%s.png' % fname), **self.save_params) path = os.path.join(self.report_dir, '%s.nii.gz' % fname) nb.save(niimg, path) pl.close('all') return path
def plot_map(self, niimg, title): data = niimg.get_data().squeeze() params = self.plot_map_params.copy() fig = pl.figure(facecolor='k', edgecolor='k') if 'percentile' in self.plot_map_params: threshold = scoreatpercentile(data.ravel(), self.plot_map_params['percentile']) params.pop('percentile') params['threshold'] = threshold # vmax = np.abs(data).max() vmax = np.percentile(np.abs(data), 99) plot_map(data, affine=niimg.get_affine(), vmin=-vmax, vmax=vmax, title=title, figure=fig, **params) fname = title.replace(' ', '_').replace('/', '_') pl.savefig(os.path.join(self.report_dir, '%s.png' % fname), **self.save_params) path = os.path.join(self.report_dir, '%s.nii.gz' % fname) nb.save(niimg, path) pl.close('all') return path
def save_image(nifti, anat, cluster_dict, out_path, f, image_threshold=2, texcol=1, bgcol=0, iscale=2, text=None, **kwargs): '''Saves a single nifti image. Args: nifti (str or nipy.core.api.image.image.Image): nifti file to visualize. anat (nipy.core.api.image.image.Image): anatomical nifti file. cluster_dict (dict): dictionary of clusters. f (int): index. image_threshold (float): treshold for `plot_map`. texcol (float): text color. bgcol (float): background color. iscale (float): image scale. text (Optional[str]): text for figure. **kwargs: extra keyword arguments ''' if isinstance(nifti, str): nifti = load_image(nifti) feature = nifti.get_data() elif isinstance(nifti, nipy.core.image.image.Image): feature = nifti.get_data() font = {'size': 8} rc('font', **font) coords = cluster_dict['top_clust']['coords'] if coords == None: return feature /= feature.std() imax = np.max(np.absolute(feature)) imin = -imax imshow_args = dict( vmax=imax, vmin=imin, alpha=0.7 ) coords = ([-coords[0], -coords[1], coords[2]]) plt.axis('off') plt.text(0.05, 0.8, text, horizontalalignment='center', color=(texcol, texcol, texcol)) try: plot_map(feature, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=image_threshold, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: return plt.savefig(out_path, transparent=True, facecolor=(bgcol, bgcol, bgcol))
def show(self, label=None, rcmap=None, **options): self.P = np.array(self.P) if label is None: return viz.plot_map(self.P, self.affine, **options) else: color = rcmap or "black" slicer = viz.plot_map(self.P == label, self.affine, **options) slicer.contour_map(self.mask, self.affine, levels=[0], colors=(color,)) return slicer
def save_image(nifti, anat, cluster_dict, out_path, f, image_threshold=2, texcol=1, bgcol=0, iscale=2, text=None, **kwargs): if isinstance(nifti, str): nifti = load_image(nifti) feature = nifti.get_data() elif isinstance(nifti, nipy.core.image.image.Image): feature = nifti.get_data() font = {"size": 8} rc("font", **font) coords = cluster_dict["top_clust"]["coords"] if coords == None: logger.warning("No cluster found for %s" % nifti_file) return feature /= feature.std() imax = np.max(np.absolute(feature)) imin = -imax imshow_args = dict(vmax=imax, vmin=imin, alpha=0.7) coords = ([-coords[0], -coords[1], coords[2]]) #ax = fig.add_subplot(1, 1, 1) plt.axis("off") plt.text(0.05, 0.8, text, horizontalalignment="center", color=(texcol, texcol, texcol)) try: plot_map(feature, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=image_threshold, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: logger.exception(e) return plt.savefig(out_path, transparent=True, facecolor=(bgcol, bgcol, bgcol))
def show(self, label=None, rcmap=None, **options): self.label_image = np.array(self.label_image) if label is None: return viz.plot_map(self.label_image, self.affine, **options) else: color = rcmap or 'black' slicer = viz.plot_map(self.label_image == label, self.affine, **options) slicer.contour_map(self.mask, self.affine, levels=[0], colors=(color, )) return slicer
def save_montage(NIFTI, ANAT, ONAME, SGN): nifti = load_image(NIFTI) anat = load_image(ANAT) imax = nifti.get_data().max() imin = nifti.get_data().min() imshow_args = {'vmax': imax, 'vmin': imin} mcmap = cmaps[SGN + 1] num_features = nifti.shape[-1] y = max([1, int(round(sqrt(num_features / 3)))]) x = int(ceil(num_features / y) + 1) font = {'size': 8} rc('font', **font) f = figure(figsize=[iscale * y, iscale * x / 3]) subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99, wspace=0.1, hspace=0) for i in range(0, num_features): data = nifti.get_data()[:, :, :, i] data[sign(data) == negative(SGN)] = 0 if max(abs(data.flatten())) > thr + 0.2: ax = subplot(x, y, i + 1) max_idx = np.unravel_index(argmax(data), data.shape) plot_map(data, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), black_bg=True, threshold=thr, cut_coords=coord_transform(max_idx[0], max_idx[1], max_idx[2], xyz_affine(nifti)), annotate=False, axes=ax, cmap=mcmap, draw_cross=False, **imshow_args) text(0., 0.95, str(i), transform=ax.transAxes, horizontalalignment='center', color=(1, 1, 1)) savefig(ONAME, facecolor=(0, 0, 0))
def show(self, label=None, rcmap=None, **options): self.A = np.array(self.A) if label is not None: color = rcmap or "black" slicer = viz.plot_map(self.A[..., label], self.affine, **options) slicer.contour_map(self.mask, self.affine, levels=[0], colors=(color,)) return slicer else: slicer = viz.plot_map(self.mask, self.affine, **options) for i, label in enumerate(self.labels()): color = rcmap(1.0 * i / self.size) if rcmap is not None else pl.cm.gist_rainbow(1.0 * i / self.size) slicer.contour_map(self.A[..., label], self.affine, levels=[0], colors=(color,)) return slicer
def save_image(nifti, anat, cluster_dict, out_path, f, image_threshold=2, texcol=1, bgcol=0, iscale=2, text=None, **kwargs): if isinstance(nifti, str): nifti = load_image(nifti) feature = nifti.get_data() elif isinstance(nifti, nipy.core.image.image.Image): feature = nifti.get_data() font = {"size":8} rc("font", **font) coords = cluster_dict["top_clust"]["coords"] if coords == None: logger.warning("No cluster found for %s" % nifti_file) return feature /= feature.std() imax = np.max(np.absolute(feature)) imin = -imax imshow_args = dict( vmax=imax, vmin=imin, alpha=0.7 ) coords = ([-coords[0], -coords[1], coords[2]]) #ax = fig.add_subplot(1, 1, 1) plt.axis("off") plt.text(0.05, 0.8, text, horizontalalignment="center", color=(texcol, texcol, texcol)) try: plot_map(feature, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=image_threshold, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: logger.exception(e) return plt.savefig(out_path, transparent=True, facecolor=(bgcol, bgcol, bgcol))
def save_image(nifti, anat, cluster_dict, out_path, f, image_threshold=2, texcol=1, bgcol=0, iscale=2, text=None, **kwargs): ''' Saves a single nifti image. ''' if isinstance(nifti, str): nifti = load_image(nifti) feature = nifti.get_data() elif isinstance(nifti, nipy.core.image.image.Image): feature = nifti.get_data() font = {'size': 8} rc('font', **font) coords = cluster_dict['top_clust']['coords'] if coords == None: return feature /= feature.std() imax = np.max(np.absolute(feature)) imin = -imax imshow_args = dict( vmax=imax, vmin=imin, alpha=0.7 ) coords = ([-coords[0], -coords[1], coords[2]]) plt.axis('off') plt.text(0.05, 0.8, text, horizontalalignment='center', color=(texcol, texcol, texcol)) try: plot_map(feature, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=image_threshold, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: return plt.savefig(out_path, transparent=True, facecolor=(bgcol, bgcol, bgcol))
def brainplot(brainmat, savepath): """ takes a matrix (e.g. from loading an image file) and plots the activation the figure is saved at 'savepath' """ # savepath should end in .png plt.figure() osl = viz.plot_map(np.asarray(brainmat), imgaff, anat=anat_data, anat_affine=anat_aff, threshold=0.0001, black_bg=True, draw_cross=False) pylab.savefig(savepath)
def show(self, label=None, rcmap=None, **options): self.label_image = np.array(self.label_image) if label is not None: color = rcmap or 'black' slicer = viz.plot_map(self.label_image[..., label], self.affine, **options) slicer.contour_map(self.mask, self.affine, levels=[0], colors=(color, )) return slicer else: slicer = viz.plot_map(self.mask, self.affine, **options) for i, label in enumerate(self.labels()): color = rcmap(1. * i / self.size) if rcmap is not None \ else pl.cm.gist_rainbow(1. * i / self.size) slicer.contour_map( self.label_image[..., label], self.affine, levels=[0], colors=(color, )) return slicer
def show_slices(image_in, anat_file, coordinates, thr): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pylab as pl import numpy as np from nibabel import load import os from nipy.labs import viz anat = anat_file img = image_in coords = coordinates[0] threshold = thr cmap = pl.cm.jet prefix = None, show_colorbar = True formatter = '%.2f' img1 = load(img) data, aff = img1.get_data(), img1.get_affine() anatimg = load( anat) #load('/usr/share/fsl/data/standard/MNI152_T1_1mm_brain.nii.gz') anatdata, anataff = anatimg.get_data(), anatimg.get_affine() anatdata = anatdata.astype(np.float) anatdata[anatdata < 10.] = np.nan outfile1 = os.path.split(img)[1][0:-7] outfiles = [] for idx, coord in enumerate(coords): outfile = outfile1 + 'cluster%02d' % idx osl = viz.plot_map(np.asarray(data), aff, anat=anatdata, anat_affine=anataff, threshold=threshold, cmap=cmap, black_bg=False, cut_coords=coord) if show_colorbar: cb = plt.colorbar(plt.gca().get_images()[1], cax=plt.axes([0.4, 0.075, 0.2, 0.025]), orientation='horizontal', format=formatter) cb.set_ticks([cb._values.min(), cb._values.max()]) #osl.frame_axes.figure.savefig(outfile+'.svg', bbox_inches='tight', transparent=True) osl.frame_axes.figure.savefig(os.path.join(os.getcwd(), outfile + '.png'), dpi=600, bbox_inches='tight', transparent=True) #pl.savefig(os.path.join(os.getcwd(),outfile+'.png'), dpi=600, bbox_inches='tight', transparent=True) outfiles.append(os.path.join(os.getcwd(), outfile + '.png')) return outfiles
def plot_brain(x, affine, template, template_affine, imgfile): new_brain = x img = nb.Nifti1Image(new_brain, affine) nb.save(img, imgfile+".nii.gz") #title = imgfile.split("/")[-1] #slicer = plot_map(new_brain, affine, anat=template, anat_affine=template_affine, cmap = plt.cm.jet, title=title) slicer = plot_map(new_brain, affine, anat=template, anat_affine=template_affine, cmap=cm.cold_hot, black_bg=True)#.cm.jet slicer.contour_map(template, template_affine, cmap=plt.cm.binary, black_bg=True)# plt.cm.Greys #plt.show() #plt.savefig(imgfile+'.png', format='png') plt.savefig(imgfile+'.pdf', format='pdf', facecolot='k', edgecolor='k')
def plot_bg(cut_coords=None, title=None): anat, anat_affine, anat_max = anat_cache._AnatCache.get_anat() figure = pl.figure(figsize=(8, 2.6), facecolor='w', edgecolor='w') ax = pl.axes([.0, .0, .85, 1], axisbg='w') slicer = plot_map(anat, anat_affine, cmap=pl.cm.gray, vmin=.1 * anat_max, vmax=.8 * anat_max, figure=figure, cut_coords=cut_coords, axes=ax, ) slicer.annotate() slicer.draw_cross() if title: slicer.title(title, x=.05, y=.9) return slicer
def show_slices(image_in, anat_file, coordinates,thr): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pylab as pl import numpy as np from nibabel import load import os from nipy.labs import viz anat = anat_file img = image_in coords = coordinates[0] threshold=thr cmap=pl.cm.jet prefix=None, show_colorbar=True formatter='%.2f' img1 = load(img) data, aff = img1.get_data(), img1.get_affine() anatimg = load(anat) #load('/usr/share/fsl/data/standard/MNI152_T1_1mm_brain.nii.gz') anatdata, anataff = anatimg.get_data(), anatimg.get_affine() anatdata = anatdata.astype(np.float) anatdata[anatdata<10.] = np.nan outfile1 = os.path.split(img)[1][0:-7] outfiles = [] for idx,coord in enumerate(coords): outfile = outfile1+'cluster%02d' % idx osl = viz.plot_map(np.asarray(data), aff, anat=anatdata, anat_affine=anataff, threshold=threshold, cmap=cmap, black_bg=False, cut_coords=coord) if show_colorbar: cb = plt.colorbar(plt.gca().get_images()[1], cax=plt.axes([0.4, 0.075, 0.2, 0.025]), orientation='horizontal', format=formatter) cb.set_ticks([cb._values.min(), cb._values.max()]) #osl.frame_axes.figure.savefig(outfile+'.svg', bbox_inches='tight', transparent=True) osl.frame_axes.figure.savefig(os.path.join(os.getcwd(),outfile+'.png'), dpi=600, bbox_inches='tight', transparent=True) #pl.savefig(os.path.join(os.getcwd(),outfile+'.png'), dpi=600, bbox_inches='tight', transparent=True) outfiles.append(os.path.join(os.getcwd(),outfile+'.png')) return outfiles
def overlay_new(stat_image,background_image,threshold): import os.path import pylab as pl from nibabel import load from nipy.labs import viz from pylab import colorbar, gca, axes import numpy as np # Second example, with a given anatomical image slicing in the Z # direction fnames = [os.path.abspath('z_view.png'), os.path.abspath('x_view.png'), os.path.abspath('y_view.png')] formatter='%.2f' img = load(stat_image) data, affine = img.get_data(), img.get_affine() anat_img = load(background_image) anat = anat_img.get_data() anat_affine = anat_img.get_affine() anat = np.ones(anat.shape) - anat viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='z', threshold=threshold, cmap=viz.cm._cm.hot) cb = colorbar(gca().get_images()[1], cax=axes([0.3, 0.00, 0.4, 0.07]), orientation='horizontal', format=formatter) cb.set_ticks([cb._values.min(), cb._values.max()]) pl.savefig(fnames[0],bbox_inches='tight') viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='x', threshold=threshold, cmap=viz.cm._cm.hot) cb = colorbar(gca().get_images()[1], cax=axes([0.3, -0.06, 0.4, 0.07]), orientation='horizontal', format=formatter) cb.set_ticks([cb._values.min(), cb._values.max()]) pl.savefig(fnames[1],bbox_inches='tight') viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='y', threshold=threshold, cmap=viz.cm._cm.hot) cb = colorbar(gca().get_images()[1], cax=axes([0.3, -0.08, 0.4, 0.07]), orientation='horizontal', format=formatter) cb.set_ticks([cb._values.min(), cb._values.max()]) pl.savefig(fnames[2],bbox_inches='tight') pl.close() return fnames
from nipy.utils import example_data # Local import from get_data_light import get_second_level_dataset # get the data data_dir = get_second_level_dataset() # First example, with a anatomical template img = load(os.path.join(data_dir, 'spmT_0029.nii.gz')) data = img.get_data() affine = img.get_affine() viz.plot_map(data, affine, cut_coords=(-52, 10, 22), threshold=2.0, cmap=viz.cm.cold_hot) plt.savefig('ortho_view.png') # Second example, with a given anatomical image slicing in the Z direction try: anat_img = load( example_data.get_filename('neurospin', 'sulcal2000', 'nobias_anubis.nii.gz')) anat = anat_img.get_data() anat_affine = anat_img.get_affine() except OSError, e: # File does not exist: the data package is not installed print e anat = None
print('Computing contrasts...') mean_map = multi_session_model.means[0] # for display for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id)) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = multi_session_model.contrast([contrast_val] * 2, con_id=contrast_id, output_z=True) save(z_map, z_image_path) # make a snapshot of the contrast activation if contrast_id == 'Effects_of_interest': vmax = max(-z_map.get_data().min(), z_map.get_data().max()) vmin = -vmax plot_map(z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(), anat_affine=mean_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, figure=10, threshold=2.5, black_bg=True) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print("All the results were witten in %s" % write_dir) plt.show()
contrasts = make_fiac_contrasts() write_dir = os.getcwd() print "Computing contrasts..." for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print " Contrast % 2i out of %i: %s" % (index + 1, len(contrasts), contrast_id) contrast_path = op.join(write_dir, "%s_z_map.nii" % contrast_id) write_array = mask_array.astype(np.float) ffx_z_map = (results[0].contrast(contrast_val) + results[1].contrast(contrast_val)).z_score() write_array[mask_array] = ffx_z_map contrast_image = Nifti1Image(write_array, affine) save(contrast_image, contrast_path) vmax = max(-write_array.min(), write_array.max()) vmin = -vmax plot_map( write_array, affine, anat=wmean, anat_affine=affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, figure=10, threshold=2.5, black_bg=True, ) plt.savefig(op.join(write_dir, "%s_z_map.png" % contrast_id)) plt.clf() print "All the results were witten in %s" % write_dir
######################################### # Estimate the contrasts ######################################### print('Computing contrasts...') for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id)) # save the z_image image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True) save(z_map, image_path) # Create snapshots of the contrasts vmax = max(-z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, slicer='z', black_bg=True, threshold=2.5, title=contrast_id) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print("All the results were witten in %s" % write_dir) plt.show()
def montage(nifti, anat, roi_dict, thr=2, fig=None, out_file=None, feature_dict=None, target_stat=None, target_value=None): if isinstance(anat, str): anat = load_image(anat) assert nifti is not None assert anat is not None assert roi_dict is not None texcol = 1 bgcol = 0 iscale = 2 weights = nifti.get_data() #weights = weights / weights.std(axis=3) features = weights.shape[-1] indices = [0] y = 8 x = int(ceil(1.0 * features / y)) font = {"size": 8} rc("font", **font) if fig is None: fig = plt.figure(figsize=[iscale * y, iscale * x / 2.5]) plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99, wspace=0.1, hspace=0) for f in xrange(features): roi = roi_dict.get(f, None) if roi is None: continue coords = roi["top_clust"]["coords"] assert coords is not None feat = weights[:, :, :, f] feat = feat / feat.std() imax = np.max(np.absolute(feat)) imin = -imax imshow_args = {"vmax": imax, "vmin": imin} coords = ([-coords[0], -coords[1], coords[2]]) ax = fig.add_subplot(x, y, f + 1) plt.axis("off") try: plot_map(feat, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=thr, figure=fig, axes=ax, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: logger.exception(e) plt.text(0.05, 0.8, str(f), transform=ax.transAxes, horizontalalignment="center", color=(texcol, texcol, texcol)) pos = [(0.05, 0.05), (0.4, 0.05), (0.8, 0.05)] colors = ["purple", "yellow", "green"] if feature_dict is not None and feature_dict.get(f, None) is not None: d = feature_dict[f] for i, key in enumerate([k for k in d if k != "real_id"]): plt.text(pos[i][0], pos[i][1], "%s=%.2f" % (key, d[key]), transform=ax.transAxes, horizontalalignment="left", color=colors[i]) if key == target_stat: assert target_value is not None if d[key] >= target_value: p_fancy = FancyBboxPatch((0.1, 0.1), 2.5 - .1, 1 - .1, boxstyle="round,pad=0.1", ec=(1., 0.5, 1.), fc="none") ax.add_patch(p_fancy) elif d[key] <= -target_value: p_fancy = FancyBboxPatch((0.1, 0.1), iscale * 2.5 - .1, iscale - .1, boxstyle="round,pad=0.1", ec=(0., 0.5, 0.), fc="none") ax.add_patch(p_fancy) # stdout.write("\rSaving montage: DONE\n") if out_file is not None: plt.savefig(out_file, transparent=True, facecolor=(bgcol, bgcol, bgcol)) else: plt.draw()
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of activation image vizualization with nipy.labs vizualization tools """ print __doc__ import os.path import pylab as pl from nibabel import load from nipy.labs import viz import get_data_light # get the data data_dir = get_data_light.get_it() img = load(os.path.join(data_dir, 'spmT_0029.nii.gz')) data = img.get_data() affine = img.get_affine() viz.plot_map(data, affine, cut_coords=(-52, 10, 22), threshold=2.0, cmap=viz.cm.cold_hot) pl.show()
######################################## # Perform a GLM analysis on H1 ######################################## fmri_glm = FMRILinearModel(fmri_data, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # Estimate the contrast z_map, = fmri_glm.contrast(reading_vs_visual, output_z=True) # Plot the contrast vmax = max(-z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, slicer='z', black_bg=True, threshold=2.5, title='Reading vs visual') # Count all the clusters for |Z| > 2.5 Z = z_map.get_data() from scipy import ndimage cluster_map, n_clusters = ndimage.label(np.abs(Z) > 2.5) cluster_sizes = np.bincount(cluster_map.ravel())[1:] print "Cluster sizes:" print np.sort(cluster_sizes) mask = fmri_glm.mask ######################################## # Perform GLM analysis on H0 (permuted)
print 'Computing contrasts...' for index, (contrast_id, contrast_val) in enumerate(contrasts.iteritems()): print ' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id) contrast_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) write_array = mask_array.astype(np.float) write_array[mask_array] = results.contrast(contrast_val).z_score() contrast_image = Nifti1Image(write_array, affine) save(contrast_image, contrast_path) vmax = max(-write_array.min(), write_array.max()) plot_map(write_array, affine, cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) plt.clf() ######################################### # End ######################################### print "All the results were witten in %s" % write_dir # make a simple 2D plot plot_map(write_array, affine,
from nibabel import load from nipy.labs import viz # activation image to look at img = load('spmT_0029.nii.gz') data = img.get_data() affine = img.get_affine() # visualize the activation on top of MNI template viz.plot_map(data, affine, cut_coords=(-52, 10, 22), threshold=3.0, cmap=viz.cm.cold_hot) plt.show()
####################################### # Data and analysis parameters ####################################### input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') if not path.exists(input_image): get_second_level_dataset() brain_map = load(input_image) vmin, vmax = brain_map.get_data().min(), brain_map.get_data().max() # make a simple 2D plot plot_map(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=10, threshold=3) # More plots using 3D if True: # replace with False to skip this plot_map(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=11, threshold=3, do3d=True)
def generate_subject_stats_report( stats_report_filename, contrasts, z_maps, mask, design_matrices=None, subject_id=None, anat=None, anat_affine=None, threshold=2.3, cluster_th=0, cmap=viz.cm.cold_hot, start_time=None, user_script_name=None, progress_logger=None, shutdown_all_reloaders=True, **glm_kwargs ): """Generates a report summarizing the statistical methods and results Parameters ---------- stats_report_filename: string: html file to which output (generated html) will be written contrasts: dict of arrays contrasts we are interested in; same number of contrasts as zmaps; same keys zmaps: dict of image objects or strings (image filenames) zmaps for contrasts we are interested in; one per contrast id mask: 'nifti image object' brain mask for ROI design_matrix: list of 'DesignMatrix', `numpy.ndarray` objects or of strings (.png, .npz, etc.) for filenames design matrices for the experimental conditions contrasts: dict of arrays dictionary of contrasts of interest; the keys are the contrast ids, the values are contrast values (lists) z_maps: dict of 3D image objects or strings (image filenames) dict with same keys as 'contrasts'; the values are paths of z-maps for the respective contrasts anat: 3D array (optional) brain image to serve bg unto which activation maps will be plotted; passed to viz.plot_map API anat_affine: 2D array (optional) affine data for the anat threshold: float (optional) threshold to be applied to activation maps voxel-wise cluster_th: int (optional) minimal voxel count for clusteres declared as 'activated' cmap: cmap object (default viz.cm.cold_hot) color-map to use in plotting activation maps start_time: string (optional) start time for the stats analysis (useful for the generated report page) user_script_name: string (optional, default None) existing filename, path to user script used in doing the analysis progress_logger: ProgressLogger object (optional) handle for logging progress shutdown_all_reloaders: bool (optional, default True) if True, all pages connected to the stats report page will be prevented from reloading after the stats report page has been completely generated **glm_kwargs: kwargs used to specify the control parameters used to specify the experimental paradigm and the GLM """ # prepare for stats reporting if progress_logger is None: progress_logger = base_reporter.ProgressReport() output_dir = os.path.dirname(stats_report_filename) # copy css and js stuff to output dir base_reporter.copy_web_conf_files(output_dir) # initialize gallery of design matrices design_thumbs = base_reporter.ResultsGallery( loader_filename=os.path.join(output_dir, "design.html") ) # initialize gallery of activation maps activation_thumbs = base_reporter.ResultsGallery( loader_filename=os.path.join(output_dir, "activation.html") ) # get caller module handle from stack-frame if user_script_name is None: user_script_name = sys.argv[0] user_source_code = base_reporter.get_module_source_code( user_script_name) methods = """ GLM and Statistical Inference have been done using the <i>%s</i> script, \ powered by <a href="%s">nipy</a>. Statistic images have been thresholded at \ Z>%s voxel-level. """ % (user_script_name, base_reporter.NIPY_URL, threshold) # report the control parameters used in the paradigm and analysis design_params = "" if len(glm_kwargs): design_params += ("The following control parameters were used for " " specifying the experimental paradigm and fitting the " "GLM:<br/><ul>") design_params += base_reporter.dict_to_html_ul(glm_kwargs) if start_time is None: start_time = time.ctime() report_title = "GLM and Statistical Inference" if not subject_id is None: report_title += " for subject %s" % subject_id level1_html_markup = base_reporter.get_subject_report_stats_html_template( ).substitute( title=report_title, start_time=start_time, subject_id=subject_id, # insert source code stub source_script_name=user_script_name, source_code=user_source_code, design_params=design_params, methods=methods, cmap=cmap.name) with open(stats_report_filename, 'w') as fd: fd.write(str(level1_html_markup)) fd.close() progress_logger.log("<b>Level 1 statistics</b><br/><br/>") # create design matrix thumbs if not design_matrices is None: for design_matrix, j in zip(design_matrices, xrange(len(design_matrices))): # sanitize design_matrix type if isinstance(design_matrix, basestring): if not isinstance(design_matrix, DesignMatrix): if design_matrix.endswith('.npz'): npz = np.load(design_matrix) design_matrix = DesignMatrix(npz['X'], npz['conditions'], ) else: # XXX handle case of .png, jpeg design matrix image raise TypeError( "Unsupported design matrix type '%'" % type( design_matrix)) elif isinstance(design_matrix, np.ndarray) or isinstance( design_matrix, list): X = np.array(design_matrix) assert len(X.shape) == 2 conditions = ['%i' % i for i in xrange(X.shape[-1])] design_matrix = DesignMatrix(X, conditions) # else: # raise TypeError( # "Unsupported design matrix type '%s'" % type( # design_matrix)) # plot design_matrix proper ax = design_matrix.show(rescale=True) ax.set_position([.05, .25, .9, .65]) dmat_outfile = os.path.join(output_dir, 'design_matrix_%i.png' % (j + 1), ) pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200) thumb = base_reporter.Thumbnail() thumb.a = base_reporter.a(href=os.path.basename(dmat_outfile)) thumb.img = base_reporter.img(src=os.path.basename(dmat_outfile), height="500px", ) thumb.description = "Design Matrix" thumb.description += " %s" % (j + 1) if len( design_matrices) > 1 else "" # commit activation thumbnail into gallery design_thumbs.commit_thumbnails(thumb) # make colorbar (place-holder, will be overridden, once we've figured out # the correct end points) for activations colorbar_outfile = os.path.join(output_dir, 'activation_colorbar.png') base_reporter.make_standalone_colorbar( cmap, threshold, 8., colorbar_outfile) # create activation thumbs _vmax = 0 _vmin = threshold for j in xrange(len(contrasts)): contrast_id = contrasts.keys()[j] contrast_val = contrasts[contrast_id] z_map = z_maps[contrast_id] # compute cut_coords for viz.plot_map(..) API # XXX review computation of cut_coords, vmin, and vmax; not clean!!! if isinstance(z_map, basestring): z_map = nibabel.load(z_map) pos_data = z_map.get_data() * (np.abs(z_map.get_data()) > 0) n_axials = 12 delta_z_axis = 3 z_axis_max = np.unravel_index( pos_data.argmax(), z_map.shape)[2] z_axis_min = np.unravel_index( np.argmax(-pos_data), z_map.shape)[2] z_axis_min, z_axis_max = (min(z_axis_min, z_axis_max), max(z_axis_max, z_axis_min)) z_axis_min = min(z_axis_min, z_axis_max - delta_z_axis * n_axials) cut_coords = np.linspace(z_axis_min, z_axis_max, n_axials) # compute vmin and vmax vmax = pos_data.max() vmin = -vmax # vmax = max(- z_map.get_data().min(), z_map.get_data().max()) # vmin = - vmax # # update colorbar endpoints _vmax = max(_vmax, vmax) # plot activation proper viz.plot_map(pos_data, z_map.get_affine(), cmap=cmap, anat=anat, anat_affine=anat_affine, vmin=vmin, vmax=vmax, threshold=threshold, slicer='z', cut_coords=cut_coords, black_bg=True, ) # store activation plot z_map_plot = os.path.join(output_dir, "%s_z_map.png" % contrast_id) pl.savefig(z_map_plot, dpi=200, bbox_inches='tight', facecolor="k", edgecolor="k") stats_table = os.path.join(output_dir, "%s_stats_table.html" % contrast_id) # create thumbnail for activation thumbnail = base_reporter.Thumbnail() thumbnail.a = base_reporter.a(href=os.path.basename(stats_table)) thumbnail.img = base_reporter.img( src=os.path.basename(z_map_plot), height="200px",) thumbnail.description = "%s contrast: %s" % (contrast_id, contrast_val) activation_thumbs.commit_thumbnails(thumbnail) # generate level 1 stats table title = "Level 1 stats for %s contrast" % contrast_id generate_level1_stats_table( z_map, mask, stats_table, cluster_th=cluster_th, z_threshold=threshold, title=title, ) # make colorbar for activations base_reporter.make_standalone_colorbar( cmap, _vmin, _vmax, colorbar_outfile) # we're done, shut down re-loaders progress_logger.log('<hr/>') # prevent stats report page from reloading henceforth progress_logger.finish(stats_report_filename) # prevent any related page from reloading if shutdown_all_reloaders: progress_logger.finish_dir(output_dir) # return generated html with open(stats_report_filename, 'r') as fd: stats_report = fd.read() fd.close() return stats_report
# GLM fitting print('Starting fit...') glms = [] for x, y in zip(X, Y): glm = GeneralLinearModel(x) data, mean = data_scaling(y.get_data()[mask_array].T) glm.fit(data, 'ar1') glms.append(glm) # Compute the required contrast print('Computing test contrast image...') nregressors = X[0].shape[1] ## should check that all design matrices have the same c = np.zeros(nregressors) c[0:4] = cvect z_vals = (glms[0].contrast(c) + glms[1].contrast(c)).z_score() # Show Zmap image z_map = mask_array.astype(np.float) z_map[mask_array] = z_vals mean_map = mask_array.astype(np.float) mean_map[mask_array] = mean plot_map(z_map, affine, anat=mean_map, anat_affine=affine, cmap=cm.cold_hot, threshold=2.5, black_bg=True) plt.show()
def montage(nifti, anat, roi_dict, thr=2, fig=None, out_file=None, order=None, stats=dict()): ''' Saves a montage of nifti images. ''' if isinstance(anat, str): anat = load_image(anat) assert nifti is not None assert anat is not None assert roi_dict is not None texcol = 0 bgcol = 1 iscale = 2 if isinstance(nifti, list): weights = np.array([n.get_data() for n in nifti]).astype('float32') weights = weights.transpose(1, 2, 3, 0) nifti = nifti[0] else: weights = nifti.get_data(); #weights = weights / weights.std(axis=3) features = weights.shape[-1] indices = [0] y = 8 x = int(ceil(1.0 * features / y)) font = {'size': 8} rc('font',**font) if fig is None: fig = plt.figure(figsize=[iscale * y, (1.5 * iscale) * x / 2.5]) plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.99, wspace=0.05, hspace=0.5) if order is None: order = range(features) for i, f in enumerate(order): roi = roi_dict.get(f, None) if roi is None: continue if 'top_clust' in roi.keys(): coords = roi['top_clust']['coords'] else: coords = (0., 0., 0.) assert coords is not None feat = weights[:, :, :, f] feat = feat / feat.std() imax = np.max(np.absolute(feat)); imin = -imax imshow_args = {'vmax': imax, 'vmin': imin} coords = ([-coords[0], -coords[1], coords[2]]) ax = fig.add_subplot(x, y, i + 1) #plt.axis('off') try: plot_map(feat, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=thr, figure=fig, axes=ax, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: print e pass plt.text(0.05, 0.8, str(f), transform=ax.transAxes, horizontalalignment='center', color=(texcol, texcol, texcol)) for j, r in enumerate(roi['top_clust']['rois']): plt.text(0.05, -0.15 * (.5 + j), r[:35], transform=ax.transAxes, horizontalalignment='left', color=(0, 0, 0)) pos = [(0.05, 0.05), (0.4, 0.05), (0.8, 0.05)] colors = ['purple', 'blue', 'green'] for i, (k, vs) in enumerate(stats.iteritems()): v = vs[f] plt.text(pos[i][0], pos[i][1], '%s=%.2f' % (k, v), transform=ax.transAxes, horizontalalignment='left', color=colors[i]) if out_file is not None: plt.savefig(out_file, transparent=True, facecolor=(bgcol, bgcol, bgcol)) else: plt.draw()
print ' Contrast % 2i out of %i: %s' % (index+1, len(contrasts), contrast_id) contrast_path = op.join(swd, '%s_z_map.nii'% contrast_id) write_array = mask_array.astype(np.float) z_values = st.norm.isf(st.t.sf(output[contrast_id]['t'],result.df_resid)) write_array[mask_array] = z_values contrast_image = Nifti1Image(write_array, fmri_image.get_affine() ) save(contrast_image, contrast_path) affine = fmri_image.get_affine() vmax = max(-write_array.min(), write_array.max()) plot_map(write_array, affine, cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) pylab.savefig(op.join(swd, '%s_z_map.png' % contrast_id)) pylab.clf() ######################################### # End ######################################### print "All the results were witten in %s" %swd plot_map(write_array, affine,
parser.error("Directory Path should be added as argument") fmri_files = [f for f in listdir(args.path) if isfile(join(args.path, f))] mask_file = mask_generator.make_mask(args.path, "./Group_Mask") design_files = design_matrix_generator.make_design(args.path) multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') # Compute the required contrast print('Computing test contrast image...') n_regressors = [np.load(f)['arr_0'].shape[1] for f in design_files] con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors] z_map, = multi_session_model.contrast(con) # Show Z-map image mean_map = multi_session_model.means[0] print(mean_map) plot_map(z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(), anat_affine=mean_map.get_affine(), cmap=cm.cold_hot, threshold=2.5, black_bg=True) plt.savefig('1st_analiz.png')
def montage(nifti, anat, roi_dict, thr=2, fig=None, out_file=None, order=None, stats=dict()): '''Saves a montage of nifti images. Args: nifti (list or nipy.core.api.image.image.Image): 4d nifti or list of \ 3D niftis. anat (nipy.core.api.image.image.Image): anatomical nifti image. roi_dict (dict): dictionary of cluster dictionaries. out_file (str): output file path. order (list): List of integers. Order of montage. stats (Optional[dict]): extra statistics to print on montage as text. ''' if isinstance(anat, str): anat = load_image(anat) assert nifti is not None assert anat is not None assert roi_dict is not None texcol = 0 bgcol = 1 iscale = 2 if isinstance(nifti, list): weights = np.array([n.get_data() for n in nifti]).astype('float32') weights = weights.transpose(1, 2, 3, 0) nifti = nifti[0] else: weights = nifti.get_data() #weights = weights / weights.std(axis=3) features = weights.shape[-1] indices = [0] y = 8 x = int(ceil(1.0 * features / y)) font = {'size': 8} rc('font', **font) if fig is None: fig = plt.figure(figsize=[iscale * y, (1.5 * iscale) * x / 2.5]) plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.99, wspace=0.05, hspace=0.5) if order is None: order = range(features) for i, f in enumerate(order): roi = roi_dict.get(f, None) if roi is None: continue if 'top_clust' in roi.keys(): coords = roi['top_clust']['coords'] else: coords = (0., 0., 0.) assert coords is not None feat = weights[:, :, :, f] feat = feat / feat.std() imax = np.max(np.absolute(feat)) imin = -imax imshow_args = {'vmax': imax, 'vmin': imin} coords = ([-coords[0], -coords[1], coords[2]]) ax = fig.add_subplot(x, y, i + 1) #plt.axis('off') try: plot_map(feat, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=thr, figure=fig, axes=ax, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: print e pass plt.text(0.05, 0.8, str(f), transform=ax.transAxes, horizontalalignment='center', color=(texcol, texcol, texcol)) for j, r in enumerate(roi['top_clust']['rois']): plt.text(0.05, -0.15 * (.5 + j), r[:35], transform=ax.transAxes, horizontalalignment='left', color=(0, 0, 0)) pos = [(0.05, 0.05), (0.4, 0.05), (0.8, 0.05)] colors = ['purple', 'blue', 'green'] for i, (k, vs) in enumerate(stats.iteritems()): v = vs[f] plt.text(pos[i][0], pos[i][1], '%s=%.2f' % (k, v), transform=ax.transAxes, horizontalalignment='left', color=colors[i]) if out_file is not None: plt.savefig(out_file, transparent=True, facecolor=(bgcol, bgcol, bgcol)) else: plt.draw()
# concatenate the individual images first_level_image = concat_images(betas) # set the model design_matrix = np.ones(len(betas))[:, np.newaxis] # only the intercept grp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask) # GLM fitting using ordinary least_squares grp_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1]])) # the only possible contrast ! z_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True) # write the results save(z_map, path.join(write_dir, 'one_sample_z_map.nii')) # look at the result vmax = max(- z_map.get_data().min(), z_map.get_data().max()) vmin = - vmax plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, threshold=3., black_bg=True) plt.savefig(path.join(write_dir, '%s_z_map.png' % 'one_sample')) plt.show() print "Wrote all the results in directory %s" % write_dir
def montage(nifti, anat, roi_dict, thr=2, fig=None, out_file=None, feature_dict=None, target_stat=None, target_value=None): if isinstance(anat, str): anat = load_image(anat) assert nifti is not None assert anat is not None assert roi_dict is not None texcol = 1 bgcol = 0 iscale = 2 weights = nifti.get_data(); #weights = weights / weights.std(axis=3) features = weights.shape[-1] indices = [0] y = 8 x = int(ceil(1.0 * features / y)) font = {"size":8} rc("font",**font) if fig is None: fig = plt.figure(figsize=[iscale * y, iscale * x / 2.5]) plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99, wspace=0.1, hspace=0) for f in xrange(features): roi = roi_dict.get(f, None) if roi is None: continue coords = roi["top_clust"]["coords"] assert coords is not None feat = weights[:, :, :, f] feat = feat / feat.std() imax = np.max(np.absolute(feat)); imin = -imax imshow_args = {"vmax": imax, "vmin": imin} coords = ([-coords[0], -coords[1], coords[2]]) ax = fig.add_subplot(x, y, f + 1) plt.axis("off") try: plot_map(feat, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=thr, figure=fig, axes=ax, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: logger.exception(e) plt.text(0.05, 0.8, str(f), transform=ax.transAxes, horizontalalignment="center", color=(texcol,texcol,texcol)) pos = [(0.05, 0.05), (0.4, 0.05), (0.8, 0.05)] colors = ["purple", "yellow", "green"] if feature_dict is not None and feature_dict.get(f, None) is not None: d = feature_dict[f] for i, key in enumerate([k for k in d if k != "real_id"]): plt.text(pos[i][0], pos[i][1], "%s=%.2f" % (key, d[key]) ,transform=ax.transAxes, horizontalalignment="left", color=colors[i]) if key == target_stat: assert target_value is not None if d[key] >= target_value: p_fancy = FancyBboxPatch((0.1, 0.1), 2.5 - .1, 1 - .1, boxstyle="round,pad=0.1", ec=(1., 0.5, 1.), fc="none") ax.add_patch(p_fancy) elif d[key] <= -target_value: p_fancy = FancyBboxPatch((0.1, 0.1), iscale * 2.5 - .1, iscale - .1, boxstyle="round,pad=0.1", ec=(0., 0.5, 0.), fc="none") ax.add_patch(p_fancy) # stdout.write("\rSaving montage: DONE\n") if out_file is not None: plt.savefig(out_file, transparent=True, facecolor=(bgcol, bgcol, bgcol)) else: plt.draw()
if not path.exists(write_dir): mkdir(write_dir) print 'Computing contrasts...' for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print ' Contrast % 2i out of %i: %s' % ( index + 1, len(contrasts), contrast_id) contrast_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) write_array = mask_array.astype(np.float) ffx_z_map = (results[0].contrast(contrast_val) + results[1].contrast(contrast_val)).z_score() write_array[mask_array] = ffx_z_map contrast_image = Nifti1Image(write_array, affine) save(contrast_image, contrast_path) vmax = max(- write_array.min(), write_array.max()) vmin = - vmax plot_map(write_array, affine, anat=wmean, anat_affine=affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, figure=10, threshold=2.5, black_bg=True) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print "All the results were witten in %s" % write_dir plt.show()
z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask if __name__ == "__maih__": mem = Memory(os.path.join(output_dir, "cache")) first_level_glms = map(mem.cache(do_subject_glm), subject_dirs) # plot stats (per subject) import matplotlib.pyplot as plt import nipy.labs.viz as viz all_masks = [] all_effects_maps = [] for (subject_id, anat, effects_maps, z_maps, contrasts, mask) in first_level_glms: all_masks.append(mask) anat_img = nibabel.load(anat) z_map = nibabel.load(z_maps.values()[0]) all_effects_maps.append(effects_maps) for contrast_id, z_map in z_maps.iteritems(): z_map = nibabel.load(z_map) viz.plot_map(z_map.get_data(), z_map.get_affine(), anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), slicer='ortho', title="%s: %s" % (subject_id, contrast_id), black_bg=True, cmap=viz.cm.cold_hot, threshold=2.3) plt.savefig("%s_%s.png" % (subject_id, contrast_id))
# Input files fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') # Compute the required contrast print('Computing test contrast image...') n_regressors = [np.load(f)['X'].shape[1] for f in design_files] con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors] z_map, = multi_session_model.contrast(con) # Show Z-map image mean_map = multi_session_model.means[0] plot_map(z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(), anat_affine=mean_map.get_affine(), cmap=cm.cold_hot, threshold=2.5, black_bg=True) plt.show()
def plot_cv_tc(epi_data, session_ids, subject_id, do_plot=True, write_image=True, mask=True, bg_image=False, plot_diff=True, _output_dir=None, cv_tc_plot_outfile=None): """ Compute coefficient of variation of the data and plot it Parameters ---------- epi_data: list of strings, input fMRI 4D images session_ids: list of strings of the same length as epi_data, session indexes (for figures) subject_id: string, id of the subject (for figures) do_plot: bool, optional, should we plot the resulting time course write_image: bool, optional, should we write the cv image mask: bool or string, optional, (string) path of a mask or (bool) should we mask the data bg_image: bool or string, optional, (string) pasth of a background image for display or (bool) should we compute such an image as the mean across inputs. if no, an MNI template is used (works for normalized data) """ if _output_dir is None: if not cv_tc_plot_outfile is None: _output_dir = os.path.dirname(cv_tc_plot_outfile) else: _output_dir = tempfile.mkdtemp() cv_tc_ = [] if isinstance(mask, basestring): mask_array = nibabel.load(mask).get_data() > 0 elif mask == True: mask_array = compute_mask_files(epi_data[0]) else: mask_array = None for (session_id, fmri_file) in zip(session_ids, epi_data): nim = do_3Dto4D_merge(fmri_file, output_dir=_output_dir) affine = nim.get_affine() if len(nim.shape) == 4: # get the data data = nim.get_data() else: raise TypeError("Expecting 4D image!") pass # compute the CV for the session cache_dir = os.path.join(_output_dir, "CV") if not os.path.exists(cache_dir): os.makedirs(cache_dir) mem = joblib.Memory(cachedir=cache_dir, verbose=5) cv = mem.cache(compute_cv)(data, mask_array=mask_array) if write_image: # write an image nibabel.save(nibabel.Nifti1Image(cv, affine), os.path.join(_output_dir, 'cv_%s.nii' % session_id)) if bg_image == False: try: viz.plot_map( cv, affine, threshold=.01, cmap=viz.cm.cold_hot) except IndexError: print traceback.format_exc() else: if isinstance(bg_image, basestring): _tmp = nibabel.load(bg_image) anat, anat_affine = ( _tmp.get_data(), _tmp.get_affine()) else: anat, anat_affine = data.mean(-1), affine try: viz.plot_map( cv, affine, threshold=.01, cmap=viz.cm.cold_hot, anat=anat, anat_affine=anat_affine) except IndexError: print traceback.format_exc() # compute the time course of cv cv_tc_sess = np.median( np.sqrt((data[mask_array > 0].T / data[mask_array > 0].mean(-1) - 1) ** 2), 1) cv_tc_.append(cv_tc_sess) cv_tc = np.concatenate(cv_tc_) if do_plot: # plot the time course of cv for different subjects pl.figure() pl.plot(cv_tc, label=subject_id) pl.legend() pl.xlabel('time(scans)') pl.ylabel('Median coefficient of variation') pl.axis('tight') if not cv_tc_plot_outfile is None: pl.savefig(cv_tc_plot_outfile, bbox_inches="tight", dpi=200) return cv_tc
def make_parcels(X, grp_mask, contrasts, affine, subjects, write_dir='/tmp/', method='ward', n_clusters=500, do_ttest=False, do_ftest=False, do_csv=False, write_mean=False): # Define the structure A of the data. Pixels connected to their neighbors. n_voxels, n_contrasts, n_subjects = X.shape if len(contrasts) != n_contrasts: raise ValueError('Incorrect Number of contrasts provided') # Define a spatial model shape = grp_mask.shape connectivity = grid_to_graph(shape[0], shape[1], shape[2], grp_mask).tocsr() # concatenate the data spatially Xv = np.reshape(X, (n_voxels, n_contrasts * n_subjects)) X_ = PCA(n_components=100).fit_transform(Xv) if method == 'spectral': i, j = connectivity.nonzero() sigma = np.sum((Xv[i] - Xv[j])**2, 1).mean() connectivity.data = np.exp(-np.sum( (Xv[i] - Xv[j])**2, 1) / (2 * sigma)) connectivity = connectivity.copy() + dia_matrix( (1.e-3 * np.ones(n_voxels), [0]), shape=(n_voxels, n_voxels)).tocsr() # Compute clustering print "Compute structured hierarchical clustering..." if method == 'ward': ward = Ward(n_clusters=n_clusters, connectivity=connectivity).fit(X_) labels = ward.labels_ elif method == 'spectral': labels = spectral_clustering(connectivity, n_clusters=n_clusters, eigen_solver='arpack', n_init=5) elif method in ['k-means', 'kmeans']: _, labels, _ = k_means(X_, n_clusters=n_clusters, n_init=5, precompute_distances=False, max_iter=30) else: xyz = np.array(np.where(grp_mask)).T _, labels, _ = k_means(xyz, n_clusters=n_clusters, n_init=1, precompute_distances=False, max_iter=10) wlabel = grp_mask.astype(np.int16) - 1 wlabel[wlabel == 0] = labels save(Nifti1Image(wlabel, affine), path.join(write_dir, 'parcel_%s_%d.nii' % (method, n_clusters))) ll, bic = 0, 0 for c, contrast in enumerate(contrasts): mu_map = np.zeros_like(wlabel).astype(np.float) s1_map = np.zeros_like(wlabel).astype(np.float) s2_map = np.zeros_like(wlabel).astype(np.float) ll_, mu_, sigma1_, sigma2_, bic_ = parameter_map(X[:, c], labels, null=False) ll += ll_.sum() bic += bic_.sum() if write_mean: mu_map[grp_mask == 1] = mu_[labels] s1_map[grp_mask == 1] = sigma1_[labels] s2_map[grp_mask == 1] = sigma2_[labels] save(Nifti1Image(mu_map, affine), path.join(write_dir, 'mu_%s.nii' % contrast)) save(Nifti1Image(s1_map, affine), path.join(write_dir, 's1_%s.nii' % contrast)) save(Nifti1Image(s2_map, affine), path.join(write_dir, 's2_%s.nii' % contrast)) # Get the signals per parcel mean_X = np.empty((n_clusters, n_contrasts, n_subjects), np.float) for k in range(n_clusters): mean_X[k] = X[labels == k].mean(0).reshape(n_subjects, n_contrasts).T if do_ttest: # create one-sample t-tests images wlabel[grp_mask == 1] = labels active = np.array(np.maximum(0, wlabel.astype(np.float))) for c, contrast in enumerate(contrasts): t_test = mean_X[:, c].mean(1) / mean_X[:, c].std(1) *\ np.sqrt(n_subjects) active[grp_mask == 1] = t_test[(labels).astype(np.int16)] viz.plot_map(active, affine, threshold=4.0, cmap=viz.cm.cold_hot, vmin=-20., vmax=20) if do_ftest: # pseudo F-test F_test = n_subjects * (mean_X.mean(2)**2 / mean_X.var(2)).sum(1) / 3. active[grp_mask == 1] = F_test[(labels).astype(np.int16)] viz.plot_map(active, affine, threshold=4.0, cmap=viz.cm.cold_hot, vmin=-20., vmax=20) save(Nifti1Image(active, affine), path.join(write_dir, 'F_RFX.nii')) if do_csv: # write parcel signals as csv file hash_ = hashlib.sha224(wlabel).hexdigest() for c, contrast in enumerate(contrasts): wpath = path.join(write_dir, 'contrast_%s_%s.csv' % (contrast, hash_)) fid = open(wpath, 'wb') writer = csv.writer(fid, delimiter=' ') writer.writerow(subjects) pdata = mean_X[:, c] # write pdata for row in pdata: writer.writerow(row) fid.close() return ll, bic
######################################## # Output beta and variance images ######################################## beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates of the beta variance_hat = fmri_glm.glms[0].get_mse() # Estimates of the variance mask = fmri_glm.mask.get_data() > 0 # output beta images beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset') save(beta_image, path.join(write_dir, 'beta.nii')) print "Beta image witten in %s" % write_dir variance_map = mask.astype(np.float) variance_map[mask] = variance_hat # Create a snapshots of the variance image contrasts vmax = np.log(variance_hat.max()) plot_map(np.log(variance_map + .1), fmri_glm.affine, cmap=cm.hot_black_bone, vmin=np.log(0.1), vmax=vmax, anat=None, threshold=.1, alpha=.9) plt.show()
# concatenate the individual images first_level_image = concat_images(betas) # set the model design_matrix = np.ones(len(betas))[:, np.newaxis] # only the intercept grp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask) # GLM fitting using ordinary least_squares grp_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1]])) # the only possible contrast ! z_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True) # write the results save(z_map, path.join(write_dir, 'one_sample_z_map.nii')) # look at the result vmax = max(-z_map.get_data().min(), z_map.get_data().max()) vmin = -vmax plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, threshold=3., black_bg=True) plt.savefig(path.join(write_dir, '%s_z_map.png' % 'one_sample')) plt.show() print("Wrote all the results in directory %s" % write_dir)
def generate_ica_report( stats_report_filename, ica_maps, mask=None, report_title='ICA Report', methods_text='ICA', anat=None, anat_affine=None, threshold=2., cluster_th=0, cmap=viz.cm.cold_hot, start_time=None, user_script_name=None, progress_logger=None, shutdown_all_reloaders=True, **glm_kwargs ): """Generates a report summarizing the statistical methods and results Parameters ---------- stats_report_filename: string: html file to which output (generated html) will be written contrasts: dict of arrays contrasts we are interested in; same number of contrasts as zmaps; same keys zmaps: dict of image objects or strings (image filenames) zmaps for contrasts we are interested in; one per contrast id mask: 'nifti image object' brain mask for ROI design_matrix: list of 'DesignMatrix', `numpy.ndarray` objects or of strings (.png, .npz, etc.) for filenames design matrices for the experimental conditions contrasts: dict of arrays dictionary of contrasts of interest; the keys are the contrast ids, the values are contrast values (lists) z_maps: dict of 3D image objects or strings (image filenames) dict with same keys as 'contrasts'; the values are paths of z-maps for the respective contrasts anat: 3D array (optional) brain image to serve bg unto which activation maps will be plotted; passed to viz.plot_map API anat_affine: 2D array (optional) affine data for the anat threshold: float (optional) threshold to be applied to activation maps voxel-wise cluster_th: int (optional) minimal voxel count for clusteres declared as 'activated' cmap: cmap object (default viz.cm.cold_hot) color-map to use in plotting activation maps start_time: string (optional) start time for the stats analysis (useful for the generated report page) user_script_name: string (optional, default None) existing filename, path to user script used in doing the analysis progress_logger: ProgressLogger object (optional) handle for logging progress shutdown_all_reloaders: bool (optional, default True) if True, all pages connected to the stats report page will be prevented from reloading after the stats report page has been completely generated **glm_kwargs: kwargs used to specify the control parameters used to specify the experimental paradigm and the GLM """ # prepare for stats reporting if progress_logger is None: progress_logger = base_reporter.ProgressReport() output_dir = os.path.dirname(stats_report_filename) # copy css and js stuff to output dir base_reporter.copy_web_conf_files(output_dir) # initialize gallery of activation maps activation_thumbs = base_reporter.ResultsGallery( loader_filename=os.path.join(output_dir, "activation.html") ) # get caller module handle from stack-frame if user_script_name is None: user_script_name = sys.argv[0] user_source_code = base_reporter.get_module_source_code( user_script_name) if start_time is None: start_time = time.ctime() ica_html_markup = base_reporter.get_ica_html_template( ).substitute( title=report_title, start_time=start_time, # insert source code stub source_script_name=user_script_name, source_code=user_source_code, methods=methods_text, cmap=cmap.name) with open(stats_report_filename, 'w') as fd: fd.write(str(ica_html_markup)) fd.close() progress_logger.log("<b>ICA</b><br/><br/>") # make colorbar (place-holder, will be overridden, once we've figured out # the correct end points) for activations colorbar_outfile = os.path.join(output_dir, 'activation_colorbar.png') base_reporter.make_standalone_colorbar( cmap, threshold, 8., colorbar_outfile) # generate thumbs for the gallery _vmax = 0 _vmin = threshold for ica_map_id, ica_map in ica_maps.iteritems(): # load the map if isinstance(ica_map, basestring): ica_map = nibabel.load(ica_map) # compute cut_coords for viz.plot_map(..) API cut_coords = base_reporter.get_cut_coords( ica_map.get_data(), n_axials=12, delta_z_axis=3) # compute vmin and vmax vmin, vmax = base_reporter.compute_vmin_vmax(ica_map.get_data()) # update colorbar endpoints _vmax = max(_vmax, vmax) _vmin = min(_vmin, vmin) # plot activation proper viz.plot_map(ica_map.get_data(), ica_map.get_affine(), cmap=cmap, anat=anat, anat_affine=anat_affine, vmin=vmin, vmax=vmax, threshold=threshold, slicer='z', cut_coords=cut_coords, black_bg=True, ) # store activation plot ica_map_plot = os.path.join(output_dir, "%s_ica_map.png" % ica_map_id) pl.savefig(ica_map_plot, dpi=200, bbox_inches='tight', facecolor="k", edgecolor="k") stats_table = ica_map_plot # os.path.join(output_dir, # "%s_stats_table.html" % ica_map_id) # create thumbnail for activation thumbnail = base_reporter.Thumbnail() thumbnail.a = base_reporter.a(href=os.path.basename(stats_table)) thumbnail.img = base_reporter.img( src=os.path.basename(ica_map_plot), height="200px",) thumbnail.description = "Component: %s" % ica_map_id activation_thumbs.commit_thumbnails(thumbnail) # make colorbar for activations base_reporter.make_standalone_colorbar( cmap, _vmin, _vmax, colorbar_outfile) # we're done, shut down re-loaders progress_logger.log('<hr/>') # prevent stats report page from reloading henceforth progress_logger.finish(stats_report_filename) # prevent any related page from reloading if shutdown_all_reloaders: progress_logger.finish_dir(output_dir) # return generated html with open(stats_report_filename, 'r') as fd: stats_report = fd.read() fd.close() return stats_report
from nipy.labs import viz from nipy.utils import example_data # Local import from get_data_light import get_second_level_dataset # get the data data_dir = get_second_level_dataset() # First example, with a anatomical template img = load(os.path.join(data_dir, 'spmT_0029.nii.gz')) data = img.get_data() affine = img.get_affine() viz.plot_map(data, affine, cut_coords=(-52, 10, 22), threshold=2.0, cmap=viz.cm.cold_hot) plt.savefig('ortho_view.png') # Second example, with a given anatomical image slicing in the Z direction try: anat_img = load(example_data.get_filename('neurospin', 'sulcal2000', 'nobias_anubis.nii.gz')) anat = anat_img.get_data() anat_affine = anat_img.get_affine() except OSError as e: # File does not exist: the data package is not installed print(e) anat = None anat_affine = None viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine,
def save_image(nifti, anat, cluster_dict, out_path, f, image_threshold=2, texcol=1, bgcol=0, iscale=2, text=None, **kwargs): '''Saves a single nifti image. Args: nifti (str or nipy.core.api.image.image.Image): nifti file to visualize. anat (nipy.core.api.image.image.Image): anatomical nifti file. cluster_dict (dict): dictionary of clusters. f (int): index. image_threshold (float): treshold for `plot_map`. texcol (float): text color. bgcol (float): background color. iscale (float): image scale. text (Optional[str]): text for figure. **kwargs: extra keyword arguments ''' if isinstance(nifti, str): nifti = load_image(nifti) feature = nifti.get_data() elif isinstance(nifti, nipy.core.image.image.Image): feature = nifti.get_data() font = {'size': 8} rc('font', **font) coords = cluster_dict['top_clust']['coords'] if coords == None: return feature /= feature.std() imax = np.max(np.absolute(feature)) imin = -imax imshow_args = dict(vmax=imax, vmin=imin, alpha=0.7) coords = ([-coords[0], -coords[1], coords[2]]) plt.axis('off') plt.text(0.05, 0.8, text, horizontalalignment='center', color=(texcol, texcol, texcol)) try: plot_map(feature, xyz_affine(nifti), anat=anat.get_data(), anat_affine=xyz_affine(anat), threshold=image_threshold, cut_coords=coords, annotate=False, cmap=cmap, draw_cross=False, **imshow_args) except Exception as e: return plt.savefig(out_path, transparent=True, facecolor=(bgcol, bgcol, bgcol))
def show_slices( data, affine, coords=None, cmap=None, show_colorbar=None, showCross=False, cluster_thr=0, annotate=True, ###### KW DOCUMENT template='../scripts/templates/MNI152_T1_1mm_brain.nii.gz', ####### KW DOCUMENT dpiRes=300, suffix='png', show_title=False): # Prepare background image anatimg = nb.load(template) anatdata, anataff = anatimg.get_data(), anatimg.affine() anatdata = anatdata.astype(np.float) anatdata[anatdata < 10.] = np.nan # Create output figure for each peak coordinate # (so a different figure for each cluster) for idx, coord in enumerate(coords): # Name the output file to include the cluster id, # the cluster threshold and the minimum cluster extent outfile = 'Cluster_{}_thr{:04.2f}_minext{:03:0f}'.format( idx, cluster_thr, cluster_extent) # If show_title argument has been set to true then print the file name # and the peak coordinates in the title of the figure if show_title: title = '{} {}'.format(outfile + coord) else: title = '' # Woooo plot three orthogonal views of the cluster sliced through the # peak coordinate osl = viz.plot_map(np.asarray(data), affine, anat=anatdata, anat_affine=anataff, threshold=cluster_thr, cmap=cmap, annotate=annotate, black_bg=False, cut_coords=coord, draw_cross=showCross, slicer='ortho', title=title) # If the show colorbar option is true then show the color bar on the # right hand side of the image if show_colorbar: cbarLocation = [-0.1, 0.2, 0.015, 0.6] im = plt.gca().get_images()[1] cb = plt.colorbar(im, cax=plt.axes(cbarLocation), orientation='horizontal', format='%.2f') cb.set_ticks([cb._values.min(), cb._values.max()]) # Save the figure! osl.frame_axes.figure.savefig(opj(output_folder, '{}.{}'.format(outfile, suffix)), dpi=dpiRes, bbox_inches='tight', transparent=True) # DONE! Close the plot plt.close()
# Perform a GLM analysis on H1 ######################################## fmri_glm = FMRILinearModel(fmri_data, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # Estimate the contrast z_map, = fmri_glm.contrast(reading_vs_visual, output_z=True) # Plot the contrast vmax = max(-z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, slicer='z', black_bg=True, threshold=2.5, title='Reading vs visual') # Count all the clusters for |Z| > 2.5 Z = z_map.get_data() from scipy import ndimage cluster_map, n_clusters = ndimage.label(np.abs(Z) > 2.5) cluster_sizes = np.bincount(cluster_map.ravel())[1:] print "Cluster sizes:" print np.sort(cluster_sizes) mask = fmri_glm.mask
# Estimate the contrasts ######################################### contrast_id = 'left_right_motor_min' z_map, effects_map = fmri_glm.contrast( np.vstack((contrasts['left'], contrasts['right'])), contrast_type='tmin-conjunction', output_z=True, output_effects=True) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) save(z_map, z_image_path) contrast_path = path.join(write_dir, '%s_con.nii' % contrast_id) save(effects_map, contrast_path) # note that the effects_map is two-dimensional: # these dimensions correspond to 'left' and 'right' # Create snapshots of the contrasts vmax = max(- z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), fmri_glm.affine, cmap=cm.cold_hot, vmin=- vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) plt.show() print('All the results were witten in %s' % write_dir) # Note: fancier visualization of the results are shown # in the viz3d example
mkdir(write_dir) print("Computing contrasts...") mean_map = multi_session_model.means[0] # for display for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(" Contrast % 2i out of %i: %s" % (index + 1, len(contrasts), contrast_id)) z_image_path = path.join(write_dir, "%s_z_map.nii" % contrast_id) z_map, = multi_session_model.contrast([contrast_val] * 2, con_id=contrast_id, output_z=True) save(z_map, z_image_path) # make a snapshot of the contrast activation if contrast_id == "Effects_of_interest": vmax = max(-z_map.get_data().min(), z_map.get_data().max()) vmin = -vmax plot_map( z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(), anat_affine=mean_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, figure=10, threshold=2.5, black_bg=True, ) plt.savefig(path.join(write_dir, "%s_z_map.png" % contrast_id)) print("All the results were witten in %s" % write_dir) plt.show()
print('Computing contrasts...') for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id)) # save the z_image image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True) save(z_map, image_path) # Create snapshots of the contrasts vmax = max(-z_map.get_data().min(), z_map.get_data().max()) if index > 0: plt.clf() plot_map( z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, anat=None, cut_coords=None, slicer='z', black_bg=True, # looks much better thus figure=10, threshold=2.5) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print("All the results were witten in %s" % write_dir) plt.show()
contrast_id = 'left_right_motor_min' z_map, effects_map = fmri_glm.contrast(np.vstack( (contrasts['left'], contrasts['right'])), contrast_type='tmin-conjunction', output_z=True, output_effects=True) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) save(z_map, z_image_path) contrast_path = path.join(write_dir, '%s_con.nii' % contrast_id) save(effects_map, contrast_path) # note that the effects_map is two-dimensional: # these dimensions correspond to 'left' and 'right' # Create snapshots of the contrasts vmax = max(-z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), fmri_glm.affine, cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) plt.show() print('All the results were witten in %s' % write_dir) # Note: fancier visualization of the results are shown # in the viz3d example
####################################### # Data and analysis parameters ####################################### input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') if not path.exists(input_image): get_second_level_dataset() brain_map = load(input_image) vmin, vmax = brain_map.get_data().min(), brain_map.get_data().max() # make a simple 2D plot plot_map(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=10, threshold=3) # More plots using 3D if True: # replace with False to skip this plot_map(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=11, threshold=3, do3d=True) from nipy.labs import viz3d
######################################## # Output beta and variance images ######################################## beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates of the beta variance_hat = fmri_glm.glms[0].get_mse() # Estimates of the variance mask = fmri_glm.mask.get_data() > 0 # output beta images beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset') save(beta_image, path.join(write_dir, 'beta.nii')) print("Beta image witten in %s" % write_dir) variance_map = mask.astype(np.float) variance_map[mask] = variance_hat # Create a snapshots of the variance image contrasts vmax = np.log(variance_hat.max()) plot_map(np.log(variance_map + .1), fmri_glm.affine, cmap=cm.hot_black_bone, vmin=np.log(0.1), vmax=vmax, anat=None, threshold=.1, alpha=.9) plt.show()