def window_show_test(bundles, mask_roi, anat, interactive=True, outpath=None): """ :param bundles: :param mask_roi: :param anat: :param interactive: :param outpath: :return: """ candidate_streamlines_actor = actor.streamtube( bundles, cmap.line_colors(candidate_sl)) ROI_actor = actor.contour_from_roi(mask_roi, color=(1., 1., 0.), opacity=0.5) ren = window.Renderer() if anat: vol_actor = actor.slicer(anat) vol_actor.display(x=40) vol_actor2 = vol_actor.copy() vol_actor2.display(z=35) # Add display objects to canvas ren.add(candidate_streamlines_actor) ren.add(ROI_actor) ren.add(vol_actor) ren.add(vol_actor2) if outpath is not None: window.record(ren, n_frames=1, out_path=outpath, size=(800, 800)) if interactive: window.show(ren)
def show_cc_parts_weighted(streamlines_g, streamlines_b, streamlines_s, g_mean, b_mean, s_mean, folder_name, lut_cmap, bar): mean_g_vec = [g_mean] * streamlines_g.__len__() mean_b_vec = [b_mean] * streamlines_b.__len__() mean_s_vec = [s_mean] * streamlines_s.__len__() genu_actor = actor.line(streamlines_g, mean_g_vec, linewidth=0.5, lookup_colormap=lut_cmap) body_actor = actor.line(streamlines_b, mean_b_vec, linewidth=0.5, lookup_colormap=lut_cmap) splenium_actor = actor.line(streamlines_s, mean_s_vec, linewidth=0.5, lookup_colormap=lut_cmap) r = window.Renderer() r.add(genu_actor) r.add(body_actor) r.add(splenium_actor) r.add(bar) window.show(r) save_as = folder_name + '\cc_parts.png' r.set_camera(r.camera_info()) window.record(r, out_path=save_as, size=(800, 800))
def show_bundles(bundles, colors=None, show=True, fname=None, fa=False, str_tube=False): ren = window.Renderer() ren.SetBackground(1., 1, 1) if str_tube: bundle_actor = actor.streamtube(bundles, colors, linewidth=0.5) ren.add(bundle_actor) else: for (i, bundle) in enumerate(bundles): color = colors[i] # lines_actor = actor.streamtube(bundle, color, linewidth=0.05 lines_actor = actor.line(bundle, color, linewidth=2.5) #lines_actor.RotateX(-90) #lines_actor.RotateZ(90) ren.add(lines_actor) if fa: fa, affine_fa = load_nifti( '/Users/alex/code/Wenlin/data/wenlin_results/bmfaN54900.nii.gz') fa_actor = actor.slicer(fa, affine_fa) ren.add(fa_actor) if show: window.show(ren) if fname is not None: sleep(1) window.record(ren, n_frames=1, out_path=fname, size=(900, 900))
def showsls(sls, values, outpath, show=False): from dipy.viz import window, actor, fvtk from dipy.data import fetch_bundles_2_subjects, read_bundles_2_subjects from dipy.tracking.streamline import transform_streamlines #renderer.clear() from dipy.tracking.streamline import length renderer = window.Renderer() hue = [0.5, 1] # white to purple to red saturation = [0.0, 1.0] # black to white lut_cmap = actor.colormap_lookup_table( scale_range=(values.min(), np.percentile(values, 50)), hue_range=hue, saturation_range=saturation) stream_actor5 = actor.line(sls, values, linewidth=0.1, lookup_colormap=lut_cmap) renderer.add(stream_actor5) bar3 = actor.scalar_bar(lut_cmap) renderer.add(bar3) # window.show(renderer, size=(600, 600), reset_camera=False) if outpath: window.record(renderer, out_path=outpath, size=(600, 600)) if show: fvtk.show(renderer)
def plot_response(response_src, out_png=False): # start virtual display print("starting Xvfb"); vdisplay = Xvfb() vdisplay.start() response_src = np.loadtxt(src_txt) if len(response_src.shape) > 1: response_src = response_src[1] sphere = get_sphere('symmetric724') sh_resp = AxSymShResponse(0, response_src) sig_resp = sh_resp.on_sphere(sphere) sig_resp = sig_resp[None, None, None, :] ren = window.Renderer() sphere_actor = actor.odf_slicer(sig_resp, sphere=sphere,colormap='blues') ren.add(sphere_actor) my_camera = ren.camera() my_camera.SetPosition(1.62, -9.19, 4.01) my_camera.SetFocalPoint(0.01, -0.46, -0.19) my_camera.SetViewUp(0.24, 0.46, 0.86) if out_png != False: window.record(ren, out_path=out_png, magnification=10, size=(60, 60)) else: window.show(ren, reset_camera=False) print('Camera Settings') print('Position: ', '(%.2f, %.2f, %.2f)' % my_camera.GetPosition()) print('Focal Point: ', '(%.2f, %.2f, %.2f)' % my_camera.GetFocalPoint()) print('View Up: ', '(%.2f, %.2f, %.2f)' % my_camera.GetViewUp()) vdisplay.stop()
def calculate_fodf(gtab, images, name, sphere=default_sphere, radius=10, fa_threshold=0.7): response, ratio = auto_response(gtab, images, roi_radius=radius, fa_thr=fa_threshold) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(images) csd_odf = csd_fit.odf(sphere) fodf_spheres = actor.odf_slicer(csd_odf, sphere=sphere, scale=0.9, norm=False, colormap='plasma') ren = window.Scene() ren.add(fodf_spheres) print('Saving illustration as csd_odfs_{}.png'.format(name)) window.record(ren, out_path='results/csd_odfs_{}.png'.format(name), size=(600, 600)) return csd_fit
def tractography(brain, affine_brain, labels, diff, affine_diff, gtab, img): # Tractography reconstruction based on EuDX determinist algorithm labels = segmentation(brain, affine_brain, diff, affine_diff) white_matter = (labels == 3) csa_model = CsaOdfModel(gtab, sh_order=2) csa_peaks = peaks_from_model(csa_model, diff, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) stopping_criterion = dipy.tracking.stopping_criterion.ThresholdStoppingCriterion( csa_peaks.gfa, .25) seeds = utils.seeds_from_mask(white_matter, affine_diff, density=1) streamlines_generator = LocalTracking(csa_peaks, stopping_criterion, seeds, affine=affine_diff, step_size=.5) streamlines = Streamlines(streamlines_generator) if has_fury: color = colormap.line_colors(streamlines) streamlines_actor = actor.line(streamlines, color) r = window.Renderer() r.add(streamlines_actor) window.record(r, out_path='tractogram.png', size=(800, 800)) window.show(r) sft = StatefulTractogram(streamlines, img, Space.RASMM) save_trk(sft, "tractogram.trk", streamlines) return streamlines
def visualize(evals,evecs,viz_scale=0.5, fname='tensor_ellipsoids.png', size=(1000,1000)): # Do vizualisation interactive = True ren = window.Scene() from dipy.data import get_sphere #sphere = get_sphere('symmetric362') #sphere = get_sphere('repulsion724') sphere = get_sphere('symmetric642') # Calculate the colors. See dipy documentation. from dipy.reconst.dti import fractional_anisotropy, color_fa FA = fractional_anisotropy(evals) #print(FA) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) RGB = color_fa(FA, evecs) k=0 cfa = RGB[:, :, k:k+1] # Normalizing like this increases the contrast, but this will make the contrast different across plots #cfa /= cfa.max() # imgplot = plt.imshow(FA, cmap='gray') # plt.show() ren.add(actor.tensor_slicer(evals, evecs, sphere=sphere, scalar_colors=cfa, scale=viz_scale, norm=False)) if interactive: window.show(ren) window.record(ren, n_frames=1, out_path=fname, size=(1000, 1000))
def visualize(fibers, outf=None): """ Takes fiber streamlines and visualizes them using DiPy Required Arguments: - fibers: fiber streamlines in a list as returned by DiPy Optional Arguments: - save: flag indicating whether or not you want the image saved to disk after being displayed """ # Initialize renderer renderer = window.Renderer() # Add streamlines as a DiPy viz object stream_actor = actor.line(fibers) # Set camera orientation properties # TODO: allow this as an argument renderer.set_camera() # args are: position=(), focal_point=(), view_up=() # Add streamlines to viz session renderer.add(stream_actor) # Display fibers # TODO: allow size of window as an argument window.show(renderer, size=(600, 600), reset_camera=False) # Saves file, if you're into that sort of thing... if outf is not None: window.record(renderer, out_path=outf, size=(600, 600))
def afficher_tenseurs(fa_,evec,eva) : cfa = dti.color_fa(fa_, evec) sphere = dpd.default_sphere ren = window.Renderer() ren.add(actor.tensor_slicer(eva, evec, scalar_colors=cfa, sphere=sphere, scale=0.5)) window.record(ren, out_path='tensor.png', size=(1200, 1200))
def print_peaks(sh_signal, mask=None): if has_fury: data_small = sh_signal[:, :, 50:51] ren = window.Renderer() sh_order = order_from_ncoef(data_small.shape[-1]) theta = default_sphere.theta phi = default_sphere.phi sh_params = SIGNAL_PARAMETERS['processing_params']['sh_params'] basis_type = sh_params['basis_type'] sph_harm_basis = sph_harm_lookup.get(basis_type) sampling_matrix, m, n = sph_harm_basis(sh_order, theta, phi) odfs = np.dot(data_small, sampling_matrix.T) odfs = np.clip(odfs, 0, np.max(odfs, -1)[..., None]) odfs_actor = actor.odf_slicer(odfs, sphere=default_sphere, colormap='plasma', scale=0.4) odfs_actor.display(z=0) ren.add(odfs_actor) print('Saving illustration as csa_odfs.png') window.record(ren, n_frames=1, out_path='csa_odfs.png', size=(600, 600)) window.show(ren)
def show_tract(segmented_tract, color_positive, segmented_tract_negative, color_negative, out_path): """Visualization of the segmented tract. """ affine = utils.affine_for_trackvis(voxel_size=np.array([1.25, 1.25, 1.25])) bundle_native = transform_streamlines(segmented_tract, np.linalg.inv(affine)) bundle_nativeNeg = transform_streamlines(segmented_tract_negative, np.linalg.inv(affine)) renderer = window.Renderer() stream_actor2 = actor.line(bundle_native, colors=color_positive, linewidth=0.1) stream_actorNeg = actor.line(bundle_nativeNeg, colors=color_negative, opacity=0.01, linewidth=0.1) renderer.set_camera(position=(408.85, -26.23, 92.12), focal_point=(0.42, -14.03, 0.82), view_up=(-0.09, 0.85, 0.51)) bar = actor.scalar_bar() renderer.add(stream_actor2) renderer.add(stream_actorNeg) renderer.add(bar) window.show(renderer, size=(1920, 1039), reset_camera=False) renderer.camera_info() """Take a snapshot of the window and save it """ window.record(renderer, out_path=out_path, size=(1920, 1039))
def show_weighted_tractography(folder_name, vec_vols, s_list, bundle_short, direction, downsamp=1): s_img = rf'{folder_name}\streamlines\ax_fa_corr_{bundle_short}_{direction}.png' if downsamp != 1: vec_vols = vec_vols[::downsamp] s_list = s_list[::downsamp] vec_vols.append(1) vec_vols.append(-1) cmap = create_colormap(np.asarray(vec_vols), name='seismic') vec_vols = vec_vols[:-2] cmap = cmap[:-2] print(min(vec_vols), max(vec_vols)) #w_actor = actor.line(s_list, vec_vols, linewidth=1.2, lookup_colormap=cmap) w_actor = actor.line(s_list, cmap, linewidth=1.2) r = window.Scene() #r.SetBackground(*window.colors.white) r.add(w_actor) #r.add(bar) window.show(r) r.set_camera(r.camera_info()) window.record(r, out_path=s_img, size=(800, 800))
def visualize_roi(roi, affine_or_mapping=None, static_img=None, roi_affine=None, static_affine=None, reg_template=None, ren=None, color=None, inline=True, interact=False): """ Render a region of interest into a VTK viz as a volume """ if not isinstance(roi, np.ndarray): if isinstance(roi, str): roi = nib.load(roi).get_data() else: roi = roi.get_data() if affine_or_mapping is not None: if isinstance(affine_or_mapping, np.ndarray): # This is an affine: if (static_img is None or roi_affine is None or static_affine is None): raise ValueError( "If using an affine to transform an ROI, " "need to also specify all of the following", "inputs: `static_img`, `roi_affine`, ", "`static_affine`") roi = reg.resample(roi, static_img, roi_affine, static_affine) else: # Assume it is a mapping: if (isinstance(affine_or_mapping, str) or isinstance(affine_or_mapping, nib.Nifti1Image)): if reg_template is None or static_img is None: raise ValueError( "If using a mapping to transform an ROI, need to ", "also specify all of the following inputs: ", "`reg_template`, `static_img`") affine_or_mapping = reg.read_mapping(affine_or_mapping, static_img, reg_template) roi = auv.patch_up_roi( affine_or_mapping.transform_inverse( roi, interpolation='nearest')).astype(bool) if ren is None: ren = window.Renderer() roi_actor = actor.contour_from_roi(roi, color=color) ren.add(roi_actor) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") window.record(ren, out_path=fname) display.display_png(display.Image(fname)) return _inline_interact(ren, inline, interact)
def plot_tracts(classes, bundle_segmentations, affine, out_dir, brain_mask=None): ''' By default this does not work on a remote server connection (ssh -X) because -X does not support OpenGL. On the remote Server you can do 'export DISPLAY=":0"' . (you should set the value you get if you do 'echo $DISPLAY' if you login locally on the remote server). Then all graphics will get rendered locally and not via -X. (important: graphical session needs to be running on remote server (e.g. via login locally)) (important: login needed, not just stay at login screen) ''' from dipy.viz import window from tractseg.libs import vtk_utils SMOOTHING = 10 WINDOW_SIZE = (800, 800) bundles = ["CST_right", "CA", "IFO_right"] renderer = window.Renderer() renderer.projection('parallel') rows = len(bundles) X, Y, Z = bundle_segmentations.shape[:3] for j, bundle in enumerate(bundles): i = 0 #only one method bundle_idx = exp_utils.get_bundle_names(classes)[1:].index(bundle) mask_data = bundle_segmentations[:,:,:,bundle_idx] if bundle == "CST_right": orientation = "axial" elif bundle == "CA": orientation = "axial" elif bundle == "IFO_right": orientation = "sagittal" else: orientation = "axial" #bigger: more border if orientation == "axial": border_y = -100 #-60 else: border_y = -100 x_current = X * i # column (width) y_current = rows * (Y * 2 + border_y) - (Y * 2 + border_y) * j # row (height) (starts from bottom?) plot_mask(renderer, mask_data, affine, x_current, y_current, orientation=orientation, smoothing=SMOOTHING, brain_mask=brain_mask) #Bundle label text_offset_top = -50 # 60 text_offset_side = -100 # -30 position = (0 - int(X) + text_offset_side, y_current + text_offset_top, 50) text_actor = vtk_utils.label(text=bundle, pos=position, scale=(6, 6, 6), color=(1, 1, 1)) renderer.add(text_actor) renderer.reset_camera() window.record(renderer, out_path=join(out_dir, "preview.png"), size=(WINDOW_SIZE[0], WINDOW_SIZE[1]), reset_camera=False, magnification=2)
def genren_AGG(sls, sls2=None, niidata=None, roi1=None, roi2=None, roi3=None, aff=np.eye(4), putpath='test.png', showme=False, showaxes=False): renderer = window.Renderer() renderer.set_camera(position=(-606.93, -153.23, 28.70), focal_point=(2.78, 11.06, 15.66), view_up=(0, 0, 1)) stream_actor = actor.line(sls) renderer.add(stream_actor) if sls2 is not None: stream_actor2 = actor.line(sls2, colors=(1, 1, 1)) renderer.add(stream_actor2) if roi1 is not None: contour_actor1 = actor.contour_from_roi(roi1, affine=aff, color=(1., 1., 0.), opacity=0.5) renderer.add(contour_actor1) if roi2 is not None: contour_actor2 = actor.contour_from_roi(roi2, affine=aff, color=(1., 0., 0.), opacity=0.5) renderer.add(contour_actor2) if roi3 is not None: contour_actor3 = actor.contour_from_roi(roi3, affine=aff, color=(0., 0., 1.), opacity=0.5) renderer.add(contour_actor3) if niidata is not None: slice_actor = actor.slicer(niidata, affine=aff) renderer.add(slice_actor) if showaxes: axes = actor.axes() renderer.add(axes) if showme: window.show(renderer, size=(500, 500), reset_camera=False) window.record(renderer, out_path=putpath, size=(500, 500)) # renderer.camera_info() del renderer return putpath
def test_renderer(self): vdisplay = Xvfb() vdisplay.start() ren = window.Renderer() window.record(ren, n_frames=1, out_path=self.out_file, size=(600, 600)) self.assertTrue(os.path.exists(self.out_file)) vdisplay.stop()
def bundle_coherence(streamlines, affine, k, t1_data=None, interactive=False): # Compute lookup table # Apply FBC measures from dipy.tracking.fbcmeasures import FBCMeasures fbc = FBCMeasures(streamlines, k) # Calculate LFBC for original fibers fbc_sl_orig, clrs_orig, rfbc_orig = \ fbc.get_points_rfbc_thresholded(0, emphasis=0.01) # Apply a threshold on the RFBC to remove spurious fibers fbc_sl_thres, clrs_thres, rfbc_thres = \ fbc.get_points_rfbc_thresholded(0.125, emphasis=0.01) # Visualize the results from dipy.viz import window, actor # Create renderer ren = window.Renderer() # Original lines colored by LFBC lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2) ren.add(lineactor) # Horizontal (axial) slice of T1 data if t1_data is not None: vol_actor1 = actor.slicer(t1_data, affine=affine) vol_actor1.display(z=20) ren.add(vol_actor1) # Vertical (sagittal) slice of T1 data vol_actor2 = actor.slicer(t1_data, affine=affine) vol_actor2.display(x=35) ren.add(vol_actor2) # Show original fibers ren.set_camera(position=(-264, 285, 155), focal_point=(0, -14, 9), view_up=(0, 0, 1)) window.record(ren, n_frames=1, out_path='OR_before.png', size=(900, 900)) if interactive: window.show(ren) # Show thresholded fibers ren.rm(lineactor) ren.add(actor.line(fbc_sl_thres, clrs_thres, linewidth=0.2)) window.record(ren, n_frames=1, out_path='OR_after.png', size=(900, 900)) if interactive: window.show(ren) return k
def test_renderer(self): vdisplay = Xvfb() vdisplay.start() ren = window.Renderer() with tempfile.TemporaryDirectory() as dir: out_file = os.path.join(dir, 'test.png') window.record(ren, n_frames=1, out_path=out_file, size=(600, 600)) self.assertTrue(os.path.exists(out_file)) vdisplay.stop()
def create_gif(figure, file_name, n_frames=60, zoom=1, z_offset=0.5, size=(600, 600), rotate_forward=True): """ Convert a Fury Scene object into a gif Parameters ---------- figure: Fury Scene object Scene to be converted to a gif file_name: str File to save gif to. n_frames: int, optional Number of frames in gif. Will be evenly distributed throughout the rotation. Default: 60 zoom: int, optional How much to magnify the figure in the fig. Default: 1 size: tuple, optional Size of the gif. Default: (600, 600) rotate_forward: bool, optional Whether to rotate the figure forward before converting to a gif. Generally necessary for fury scenes. Default: True """ if rotate_forward: figure = scene_rotate_forward(figure) tdir = tempfile.gettempdir() window.record(figure, az_ang=360.0 / n_frames, n_frames=n_frames, path_numbering=True, out_path=tdir + '/tgif', magnification=zoom, size=size) vut.gif_from_pngs(tdir, file_name, n_frames, png_fname="tgif", add_zeros=True)
def visualize_fibs(fibs, fibfile, atlasfile, outdir, opacity, num_samples): """ Takes fiber streamlines and visualizes them using DiPy Required Arguments: - fibfile: Path to fiber file - atlasfile: Path to atlas file - outdir: Path to output directory - opacity: Opacity of overlayed brain - num_samples: number of fibers to randomly sample from fibfile Optional Arguments: """ try: import vtk print("VTK found - beginning fiber QA.") except ImportError: print("!! VTK not found; skipping fiber QA.") return # loading the fibers fibs = threshold_fibers(fibs) # make sure if fiber streamlines # have no fibers, no error occurs if len(fibs) == 0: return # randomly sample num_samples fibers from given fibers resampled_fibs = random_sample(fibs, num_samples) # load atlas file atlas_volume = load_atlas(atlasfile, opacity) # Initialize renderer renderer = window.Renderer() renderer.SetBackground(1.0, 1.0, 1.0) # Add streamlines as a DiPy viz object stream_actor = actor.line(fibs) # Set camera orientation properties # TODO: allow this as an argument renderer.set_camera() # args are: position=(), focal_point=(), view_up=() # Add streamlines to viz session renderer.add(stream_actor) renderer.add(atlas_volume) # Display fibers # TODO: allow size of window as an argument # window.show(renderer, size=(600, 600), reset_camera=False) fname = os.path.split(fibfile)[1].split('.')[0] + '.png' window.record(renderer, out_path=outdir + fname, size=(600, 600))
def _inline_interact(ren, inline, interact): """ Helper function to reuse across viz functions """ if interact: window.show(ren) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") window.record(ren, out_path=fname, size=(1200, 1200)) display.display_png(display.Image(fname)) return ren
def show_both_bundles(bundles, colors=None, show=True, fname=None): scene = window.Scene() scene.SetBackground(1., 1, 1) for (i, bundle) in enumerate(bundles): color = colors[i] streamtube_actor = actor.streamtube(bundle, color, linewidth=0.3) streamtube_actor.RotateX(-90) streamtube_actor.RotateZ(90) scene.add(streamtube_actor) if show: window.show(scene) elif fname is not None: window.record(scene, out_path=fname, size=(900, 900))
def show_template_bundles(bundles, static, show=True, fname=None): scene = window.Scene() template_actor = actor.slicer(static) scene.add(template_actor) lines_actor = actor.streamtube(bundles, window.colors.orange, linewidth=0.3) scene.add(lines_actor) if show: window.show(scene) if fname is not None: window.record(scene, n_frames=1, out_path=fname, size=(900, 900))
def show_fascicles_wholebrain(s_list, vec_vols, folder_name, mask_type, downsamp=1, scale=[3, 6], hue=[0.25, -0.05], saturation=[0.1, 1.0]): s_img = folder_name + r'\streamlines' + r'\fascicles_AxCaliber_weighted_3d_' + mask_type + '.png' #hue = [0.4, 0.7] # blues #hue = [0.25, -0.05] #Hot #hue = [0, 1] #All weighted = True ''' if weighted: scale = [0, 3] else: scale = [0, 6] vec_vols = np.log(vec_vols) #vec_vols = vec_vols-np.nanmin(vec_vols)/(np.nanmax(vec_vols)-np.nanmin(vec_vols)) ''' if downsamp != 1: vec_vols = vec_vols[::downsamp] s_list = s_list[::downsamp] lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) bar = actor.scalar_bar(lut_cmap) #w_actor = actor.line(s_list, vec_vols, linewidth=1.2, lookup_colormap=lut_cmap) w_actor = actor.streamtube(s_list, vec_vols, linewidth=0.6, lookup_colormap=lut_cmap) #w_actor = actor.streamtube(s_list, vec_vols, linewidth=0.3, lookup_colormap=lut_cmap) #w_actor = actor.line(s_list, linewidth=1.0, lookup_colormap=lut_cmap) r = window.Renderer() #r.SetBackground(*window.colors.white) r.add(w_actor) r.add(bar) window.show(r) r.set_camera(r.camera_info()) window.record(r, out_path=s_img, size=(800, 800))
def visualize(atlasfile, outdir, intensityfile): """ Takes fiber streamlines and visualizes them using DiPy Required Arguments: - atlasfile: Path to atlas file - outdir: Path to output directory - opacity: Opacity of overlayed brain Optional Arguments: - fname: name of output file. default is None (fname based on input fibfile name) """ intensities, signs = parse_csv(intensityfile) # load atlas file atlas_volume = load_atlas(atlasfile, intensities, signs) faces = [ (1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 1, 0), (0, 0, 1), (0, 0, -1), (1, 1, 0), (1, 0, 1), (0, 1, 1), (-1, -1, 0), (-1, 0, -1), (0, -1, -1), ] for i in range(len(faces)): # Initialize renderer renderer = window.Renderer() renderer.background((1, 1, 1)) # Set camera orientation properties # TODO: allow this as an argument renderer.set_camera( position=faces[i] ) # args are: position=(), focal_point=(), view_up=() # Add streamlines to viz session renderer.add(atlas_volume) # Saves file, if you're into that sort of thing... fname = os.path.split(atlasfile)[1].split('.')[0] + str(i) + '.png' window.record(renderer, out_path=outdir + fname, size=(600, 600)) print('done')
def show_tract(segmented_tract, color): """Visualization of the segmented tract. """ affine=utils.affine_for_trackvis(voxel_size=np.array([1.25,1.25,1.25])) bundle_native = transform_streamlines(segmented_tract, np.linalg.inv(affine)) renderer = window.Renderer() stream_actor = actor.line(bundle_native, linewidth=0.1) bar = actor.scalar_bar() renderer.add(stream_actor) renderer.add(bar) window.show(renderer, size=(600, 600), reset_camera=False) """Take a snapshot of the window and save it """ window.record(renderer, out_path='bundle2.1.png', size=(600, 600))
def show_both_bundles(bundles, colors=None, show=True, fname=None): ren = window.Renderer() ren.SetBackground(1., 1, 1) for (i, bundle) in enumerate(bundles): color = colors[i] lines_actor = actor.streamtube(bundle, color, linewidth=0.3) lines_actor.RotateX(-90) lines_actor.RotateZ(90) ren.add(lines_actor) if show: window.show(ren) if fname is not None: sleep(1) window.record(ren, n_frames=1, out_path=fname, size=(900, 900))
def viewclusters(clusters,streamlines, outpath=None, interactive=False): #Linked to viewing clusters. If outpath given, will save info to right location, if interactive, will show window colormap = actor.create_colormap(np.ravel(clusters.centroids)) colormap_full = np.ones((len(streamlines), 3)) for cluster, color in zip(clusters, colormap): colormap_full[cluster.indices] = color scene = window.Scene() scene.SetBackground(1, 1, 1) scene.add(actor.streamtube(streamlines, colormap_full)) window.record(scene, out_path=outpath, size=(600, 600)) # Enables/disables interactive visualization if interactive: window.show(scene)
def visualize(atlasfile, outdir, intensityfile): """ Takes fiber streamlines and visualizes them using DiPy Required Arguments: - atlasfile: Path to atlas file - outdir: Path to output directory - opacity: Opacity of overlayed brain Optional Arguments: - fname: name of output file. default is None (fname based on input fibfile name) """ intensities, signs = parse_csv(intensityfile) # load atlas file atlas_volume = load_atlas(atlasfile, intensities, signs) faces = [( 1, 0, 0), (-1, 0, 0), ( 0, 1, 0), ( 0,-1, 0), ( 0, 1, 0), ( 0, 0, 1), ( 0, 0,-1), ( 1, 1, 0), ( 1, 0, 1), ( 0, 1, 1), (-1,-1, 0), (-1, 0,-1), ( 0,-1,-1), ] for i in range(len(faces)): # Initialize renderer renderer = window.Renderer() renderer.background((1, 1, 1)) # Set camera orientation properties # TODO: allow this as an argument renderer.set_camera(position=faces[i]) # args are: position=(), focal_point=(), view_up=() # Add streamlines to viz session renderer.add(atlas_volume) # Saves file, if you're into that sort of thing... fname = os.path.split(atlasfile)[1].split('.')[0] + str(i) + '.png' window.record(renderer, out_path=outdir + fname, size=(600, 600)) print('done')
def visualize_streamline(darray, score, save_able=False, save_name='default.png', control_par=1, hue=[0.5, 1]): data_evl = darray streamlines_evl = Streamlines() for i in range(np.shape(data_evl)[0]): tmp = data_evl[i] tmp = zero_remove(tmp) #tmp = tmp[~np.all(tmp == 0, axis=-1)] #tmp = np.around(tmp, decimals=0) streamlines_evl.append(tmp) mse_nor = score # Visualize the streamlines, colored by cci ren = window.Scene() saturation = [0.0, 1.0] lut_cmap = actor.colormap_lookup_table( scale_range=(min(mse_nor), max(mse_nor) / control_par), hue_range=hue, saturation_range=saturation) bar3 = actor.scalar_bar(lut_cmap) ren.add(bar3) stream_actor = actor.line(streamlines_evl, mse_nor, linewidth=0.1, lookup_colormap=lut_cmap) ren.add(stream_actor) if not save_able: interactive = True if interactive: window.show(ren) if save_able: window.record(ren, n_frames=1, out_path=save_name, size=(800, 800))
def show_tracts(hue, saturation, scale, streamlines, mean_vol_per_tract, folder_name, fig_type): from dipy.viz import window, actor lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) streamlines_actor = actor.streamtube(streamlines, mean_vol_per_tract, linewidth=0.5, lookup_colormap=lut_cmap) bar = actor.scalar_bar(lut_cmap) r = window.Scene() r.add(streamlines_actor) r.add(bar) mean_pasi_weighted_img = f'{folder_name}{os.sep}streamlines{os.sep}mean_pasi_weighted{fig_type}.png' window.show(r) r.set_camera(r.camera_info()) window.record(r, out_path=mean_pasi_weighted_img, size=(800, 800))
def show_bundles(bundles, colors=None, size=(1080, 600), show=False, fname=None): ren = window.Renderer() ren.background((1., 1, 1)) for (i, bundle) in enumerate(bundles): color = colors[i] lines = actor.line(bundle, color, linewidth=1.5) ren.add(lines) ren.reset_clipping_range() ren.reset_camera() # if show: window.show(ren, size=size, reset_camera=True) if fname is not None: window.record(ren, n_frames=1, out_path=fname, size=size)
def plot_tracts(bundle_segmentations, out_dir): ''' By default this does not work on a remote server connection (ssh -X) because -X does not support OpenGL. On the remote Server you can do 'export DISPLAY=":0"' (you should set the value you get if you do 'echo $DISPLAY' if you login locally on the remote server). Then all graphics will get rendered locally and not via -X. ''' from dipy.viz import window from tractseg.libs.VtkUtils import VtkUtils ren = window.Renderer() SMOOTHING = 10 #CST ren.add(VtkUtils.contour_smooth(bundle_segmentations[:,:,:,15], colors=[(0., 0., 1.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) ren.add(VtkUtils.contour_smooth(bundle_segmentations[:,:,:,16], colors=[(0., 0., 1.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) #CA ren.add(VtkUtils.contour_smooth(bundle_segmentations[:,:,:,5], colors=[(1., 0., 0.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) #FX ren.add(VtkUtils.contour_smooth(bundle_segmentations[:,:,:,23], colors=[(0., 1., 0.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) ren.add(VtkUtils.contour_smooth(bundle_segmentations[:,:,:,24], colors=[(0., 1., 0.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) #ICP ren.add(VtkUtils.contour_smooth(bundle_segmentations[:, :, :, 25], colors=[(1., 1., 0.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) ren.add(VtkUtils.contour_smooth(bundle_segmentations[:, :, :, 26], colors=[(1., 1., 0.)], levels=[1], opacities=[1.], smoothing=SMOOTHING)) #First View (Front) ren.set_camera(position=(72.47, 343.04, 18.99), focal_point=(71.01, 90.47, 56.05), view_up=(0.03, 0.14, 0.99)) # window.show(ren, size=(1000, 1000), reset_camera=False) window.record(ren, out_path=join(out_dir, "preview_front.png"), size=(600, 600)) #Second View (Top) ren.set_camera(position=(69.76, 144.06, 278.23), focal_point=(71.01, 90.47, 56.05), view_up=(0.01, -0.97, 0.23)) window.record(ren, out_path=join(out_dir, "preview_top.png"), size=(600, 600))
stream_actor = actor.line(long_streamlines, cci, linewidth=0.1, lookup_colormap=lut_cmap) ren.add(stream_actor) """ If you set interactive to True (below), the rendering will pop up in an interactive window. """ interactive = False if interactive: window.show(ren) window.record(ren, n_frames=1, out_path='cci_streamlines.png', size=(800, 800)) """ .. figure:: cci_streamlines.png :align: center Cluster Confidence Index of corpus callosum dataset. If you think of each streamline as a sample of a potential pathway through a complex landscape of white matter anatomy probed via water diffusion, intuitively we have more confidence that pathways represented by many samples (streamlines) reflect a more stable representation of the underlying phenomenon we are trying to model (anatomical landscape) than do lone samples. The CCI provides a voting system where by each streamline (within a set
Once all elements have been initialised, they have to be added to the show manager in the following manner. """ current_size = (600, 600) show_manager = window.ShowManager(size=current_size, title="DIPY UI Example") show_manager.ren.add(cube_actor_1) show_manager.ren.add(cube_actor_2) show_manager.ren.add(panel) show_manager.ren.add(text) show_manager.ren.add(line_slider) show_manager.ren.add(ring_slider) show_manager.ren.add(listbox) show_manager.ren.add(img) show_manager.ren.reset_camera() show_manager.ren.reset_clipping_range() show_manager.ren.azimuth(30) # Uncomment this to start the visualisation # show_manager.start() window.record(show_manager.ren, size=current_size, out_path="viz_ui.png") """ .. figure:: viz_ui.png :align: center **User interface example**. """
clusters = qb.cluster(streamlines) """ We will now visualize the clustering result. """ # Color each streamline according to the cluster they belong to. colormap = actor.create_colormap(np.ravel(clusters.centroids)) colormap_full = np.ones((len(streamlines), 3)) for cluster, color in zip(clusters, colormap): colormap_full[cluster.indices] = color ren = window.Renderer() ren.SetBackground(1, 1, 1) ren.add(actor.streamtube(streamlines, colormap_full)) window.record(ren, out_path='fornix_clusters_arclength.png', size=(600, 600)) # Enables/disables interactive visualization interactive = False if interactive: window.show(ren) """ .. figure:: fornix_clusters_arclength.png :align: center Showing the different clusters obtained by using the arc length. Extending `Metric` ==================
metric = CosineMetric(feature) qb = QuickBundles(threshold=0.1, metric=metric) clusters = qb.cluster(streamlines) # Color each streamline according to the cluster they belong to. colormap = actor.create_colormap(np.arange(len(clusters))) colormap_full = np.ones((len(streamlines), 3)) for cluster, color in zip(clusters, colormap): colormap_full[cluster.indices] = color # Visualization ren = window.Renderer() window.clear(ren) ren.SetBackground(0, 0, 0) ren.add(actor.streamtube(streamlines, colormap_full)) window.record(ren, out_path='cosine_metric.png', size=(600, 600)) if interactive: window.show(ren) """ .. figure:: cosine_metric.png :align: center Showing the streamlines colored according to their orientation. .. include:: ../links_names.inc References ---------- .. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
# Extract feature of every streamline. centers = np.asarray(list(map(feature.extract, streamlines))) # Color each center of mass according to the cluster they belong to. colormap = actor.create_colormap(np.arange(len(clusters))) colormap_full = np.ones((len(streamlines), 3)) for cluster, color in zip(clusters, colormap): colormap_full[cluster.indices] = color # Visualization ren = window.Renderer() window.clear(ren) ren.SetBackground(0, 0, 0) ren.add(actor.streamtube(streamlines, window.colors.white, opacity=0.05)) ren.add(actor.point(centers[:, 0, :], colormap_full, point_radius=0.2)) window.record(ren, n_frames=1, out_path='center_of_mass_feature.png', size=(600, 600)) if interactive: window.show(ren) """ .. figure:: center_of_mass_feature.png :align: center Showing the center of mass of each streamline and colored according to the QuickBundles results. .. _clustering-examples-MidpointFeature: Midpoint Feature ================ **What:** Instances of `MidpointFeature` extract the middle point of a
right_top_pos=(200, 35)) """ Position the camera. """ renderer.zoom(0.7) renderer.roll(10.) renderer.reset_clipping_range() """ Uncomment the following lines to start the interaction. """ # show_manager.initialize() # show_manager.render() # show_manager.start() window.record(renderer, out_path='mini_ui.png', size=(800, 800), reset_camera=False) del show_manager """ .. figure:: mini_ui.png :align: center **A minimalistic user interface**. """
""" Fitting the model to this small volume of data, we calculate the ODF of this model on the sphere, and plot it. """ sf_fit = sf_model.fit(data_small) sf_odf = sf_fit.odf(sphere) fodf_spheres = actor.odf_slicer(sf_odf, sphere=sphere, scale=1.3, colormap='plasma') ren = window.Renderer() ren.add(fodf_spheres) print('Saving illustration as sf_odfs.png') window.record(ren, out_path='sf_odfs.png', size=(1000, 1000)) if interactive: window.show(ren) """ We can extract the peaks from the ODF, and plot these as well """ sf_peaks = dpp.peaks_from_model(sf_model, data_small, sphere, relative_peak_threshold=.5, min_separation_angle=25, return_sh=False)
renderer.add(slice_actor2) renderer.reset_camera() renderer.zoom(1.4) """ In order to interact with the data you will need to uncomment the line below. """ # window.show(renderer, size=(600, 600), reset_camera=False) """ Otherwise, you can save a screenshot using the following command. """ window.record(renderer, out_path='slices.png', size=(600, 600), reset_camera=False) """ .. figure:: slices.png :align: center Simple slice viewer. Render slices from FA with your colormap ======================================== It is also possible to set the colormap of your preference. Here we are loading an FA image and showing it in a non-standard way using an HSV colormap. """ fname_fa = os.path.join(os.path.expanduser('~'), '.dipy',
print("new surface colors") print(ut_vtk.get_polydata_colors(cube_polydata)) """ Visualize surfaces """ # get vtkActor cube_actor = ut_vtk.get_actor_from_polydata(cube_polydata) # renderer and scene renderer = window.Renderer() renderer.add(cube_actor) renderer.set_camera(position=(10, 5, 7), focal_point=(0.5, 0.5, 0.5)) renderer.zoom(3) # display # window.show(renderer, size=(600, 600), reset_camera=False) window.record(renderer, out_path='cube.png', size=(600, 600)) """ .. figure:: cube.png :align: center An example of a simple surface visualized with DIPY. .. include:: ../links_names.inc """
streamlines_actor = actor.streamtube( list(move_streamlines(plot_streamlines, inv(t1_aff))), cmap.line_colors(streamlines), linewidth=0.1) vol_actor = actor.slicer(t1_data) vol_actor.display(40, None, None) vol_actor2 = vol_actor.copy() vol_actor2.display(None, None, 35) ren = window.Renderer() ren.add(streamlines_actor) ren.add(vol_actor) ren.add(vol_actor2) window.record(ren, out_path='sfm_streamlines.png', size=(800, 800)) if interactive: window.show(ren) """ .. figure:: sfm_streamlines.png :align: center **Sparse Fascicle Model tracks** Finally, we can save these streamlines to a 'trk' file, for use in other software, or for further analysis. """ from dipy.io.trackvis import save_trk save_trk("sfm_detr.trk", streamlines, affine, labels.shape)
""" odf = f_fit.odf(sphere) print('fODF.shape (%d, %d, %d, %d)' % odf.shape) """ Display a part of the fODFs """ odf_actor = actor.odf_slicer(odf[16:36, :, 30:45], sphere=sphere, colormap='plasma', scale=0.6) odf_actor.display(y=0) odf_actor.RotateX(-90) ren = window.Renderer() ren.add(odf_actor) window.record(ren, out_path='fODFs.png', size=(600, 600), magnification=4) """ .. figure:: fODFs.png :align: center **Fiber Orientation Distribution Functions, in a small ROI of the brain**. References ---------- .. [Anderson2005] Anderson A. W., "Measurement of Fiber Orientation Distributions Using High Angular Resolution Diffusion Imaging", Magnetic Resonance in Medicine, 2005. .. [Kaden2016] Kaden E. et al., "Quantitative Mapping of the Per-Axon Diffusion
fit_wls = dti_wls.fit(data) fa1 = fit_wls.fa evals1 = fit_wls.evals evecs1 = fit_wls.evecs cfa1 = dti.color_fa(fa1, evecs1) sphere = dpd.get_sphere('symmetric724') """ We visualize the ODFs in the ROI using ``dipy.viz`` module: """ ren = window.Renderer() ren.add(actor.tensor_slicer(evals1, evecs1, scalar_colors=cfa1, sphere=sphere, scale=0.3)) print('Saving illustration as tensor_ellipsoids_wls.png') window.record(ren, out_path='tensor_ellipsoids_wls.png', size=(600, 600)) if interactive: window.show(ren) """ .. figure:: tensor_ellipsoids_wls.png :align: center Tensor Ellipsoids. """ window.clear(ren) """ Next, we corrupt the data with some noise. To simulate a subject that moves intermittently, we will replace a few of the images with a very low signal
if size != obj.GetSize(): slider.place(ren) size = obj.GetSize() show_m.initialize() """ Finally, please uncomment the following 3 lines so that you can interact with the available 3D and 2D objects. """ # show_m.add_window_callback(win_callback) # show_m.render() # show_m.start() ren.zoom(1.5) ren.reset_clipping_range() window.record(ren, out_path='bundles_and_a_slice.png', size=(1200, 900), reset_camera=False) """ .. figure:: bundles_and_a_slice.png :align: center **A few bundles with interactive slicing**. """ del show_m
=============================================== This is the default option when you are using ``line`` or ``streamtube``. """ renderer = window.Renderer() stream_actor = actor.line(bundle_native) renderer.set_camera(position=(-176.42, 118.52, 128.20), focal_point=(113.30, 128.31, 76.56), view_up=(0.18, 0.00, 0.98)) renderer.add(stream_actor) # Uncomment the line below to show to display the window # window.show(renderer, size=(600, 600), reset_camera=False) window.record(renderer, out_path="bundle1.png", size=(600, 600)) """ .. figure:: bundle1.png :align: center **One orientation color for every streamline**. You may wonder how we knew how to set the camera. This is very easy. You just need to run ``window.show`` once see how you want to see the object and then close the window and call the ``camera_info`` method which prints the position, focal point and view up vectors of the camera. """ renderer.camera_info()
# Generate streamlines object streamlines = Streamlines(streamlines_generator) # Prepare the display objects. color = line_colors(streamlines) if window.have_vtk: streamlines_actor = actor.line(streamlines, line_colors(streamlines)) # Create the 3D display. r = window.Renderer() r.add(streamlines_actor) # Save still images for this static example. Or for interactivity use window.record(r, n_frames=1, out_path='deterministic.png', size=(800, 800)) if interactive: window.show(r) """ .. figure:: deterministic.png :align: center **Corpus Callosum Deterministic** We've created a deterministic set of streamlines, so called because if you repeat the fiber tracking (keeping all the inputs the same) you will get exactly the same set of streamlines. We can save the streamlines as a Trackvis file so it can be loaded into other software for visualization or further analysis. """
# use itertools to avoid global variables counter = itertools.count() def timer_callback(obj, event): cnt = next(counter) tb.message = "Let's count up to 100 and exit :" + str(cnt) showm.ren.azimuth(0.05 * cnt) sphere_actor.GetProperty().SetOpacity(cnt/100.) showm.render() if cnt == 100: showm.exit() renderer.add(tb) # Run every 200 milliseconds showm.add_timer_callback(True, 200, timer_callback) showm.start() window.record(showm.ren, size=(900, 768), out_path="viz_timer.png") """ .. figure:: viz_timer.png :align: center **Showing 100 spheres of random radii and opacity levels**. """
odf = multi_tensor_odf(sphere.vertices, mevals, angles, fractions) from dipy.viz import window, actor # Enables/disables interactive visualization interactive = False ren = window.Renderer() odf_actor = actor.odf_slicer(odf[None, None, None, :], sphere=sphere, colormap='plasma') odf_actor.RotateX(90) ren.add(odf_actor) print('Saving illustration as multi_tensor_simulation') window.record(ren, out_path='multi_tensor_simulation.png', size=(300, 300)) if interactive: window.show(ren) """ .. figure:: multi_tensor_simulation.png :align: center Simulating a MultiTensor ODF. .. include:: ../links_names.inc """
[ 64.02451324, 88.43942261, 75.0697403 ]], dtype=float32) `clusters` has also attributes like `centroids` (cluster representatives), and methods like `add`, `remove`, and `clear` to modify the clustering result. Lets first show the initial dataset. """ # Enables/disables interactive visualization interactive = False ren = window.Renderer() ren.SetBackground(1, 1, 1) ren.add(actor.streamtube(streamlines, window.colors.white)) window.record(ren, out_path='fornix_initial.png', size=(600, 600)) if interactive: window.show(ren) """ .. figure:: fornix_initial.png :align: center Initial Fornix dataset. Show the centroids of the fornix after clustering (with random colors): """ colormap = actor.create_colormap(np.arange(len(clusters))) window.clear(ren)
Example #1: Bootstrap direction getter with CSD Model """ from dipy.direction import BootDirectionGetter from dipy.tracking.streamline import Streamlines from dipy.data import small_sphere boot_dg_csd = BootDirectionGetter.from_data(data, csd_model, max_angle=30., sphere=small_sphere) boot_streamline_generator = LocalTracking(boot_dg_csd, classifier, seeds, affine, step_size=.5) streamlines = Streamlines(boot_streamline_generator) renderer.clear() renderer.add(actor.line(streamlines, line_colors(streamlines))) window.record(renderer, out_path='bootstrap_dg_CSD.png', size=(600, 600)) """ .. figure:: bootstrap_dg_CSD.png :align: center **Corpus Callosum Bootstrap Probabilistic Direction Getter** We have created a bootstrapped probabilistic set of streamlines. If you repeat the fiber tracking (keeping all inputs the same) you will NOT get exactly the same set of streamlines. We can save the streamlines as a Trackvis file so it can be loaded into other software for visualization or further analysis. """ save_trk("bootstrap_dg_CSD.trk", streamlines, affine, labels.shape)
Every streamline will be coloured according to its orientation """ from dipy.viz.colormap import line_colors """ `actor.line` creates a streamline actor for streamline visualization and `ren.add` adds this actor to the scene """ ren.add(actor.streamtube(tensor_streamlines, line_colors(tensor_streamlines))) print('Saving illustration as tensor_tracks.png') ren.SetBackground(1, 1, 1) window.record(ren, out_path='tensor_tracks.png', size=(600, 600)) # Enables/disables interactive visualization interactive = False if interactive: window.show(ren) """ .. figure:: tensor_tracks.png :align: center Deterministic streamlines with EuDX on a Tensor Field. References ---------- .. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography",
surface_color, surface_opacity) ren = window.Renderer() ren.add(streamlines_actor) ren.add(seedroi_actor) """ If you set interactive to True (below), the rendering will pop up in an interactive window. """ interactive = False if interactive: window.show(ren) window.record(ren, n_frames=1, out_path='plm_roi_sls.png', size=(800, 800)) """ .. figure:: plm_roi_sls.png :align: center **A top view of corpus callosum streamlines with the blue transparent ROI in the center**. """ """ Now we calculate the Path Length Map using the corpus callosum streamline bundle and corpus callosum ROI. NOTE: the mask used to seed the tracking does not have to be the Path
# use itertools to avoid global variables counter = itertools.count() def timer_callback(obj, event): cnt = next(counter) tb.message = "Let's count up to 100 and exit :" + str(cnt) renderer.azimuth(0.05 * cnt) sphere_actor.GetProperty().SetOpacity(cnt/100.) showm.render() if cnt == 100: showm.exit() renderer.add(tb) # Run every 200 milliseconds showm.add_timer_callback(True, 200, timer_callback) showm.start() window.record(renderer, size=(900, 768), out_path="viz_timer.png") """ .. figure:: viz_timer.png :align: center **Showing 100 spheres of random radii and opacity levels**. """
vol_actor = actor.slicer(t1_data) vol_actor.display(x=40) vol_actor2 = vol_actor.copy() vol_actor2.display(z=35) # Add display objects to canvas r = window.Renderer() r.add(vol_actor) r.add(vol_actor2) r.add(cc_streamlines_actor) r.add(cc_ROI_actor) # Save figures window.record(r, n_frames=1, out_path='corpuscallosum_axial.png', size=(800, 800)) if interactive: window.show(r) r.set_camera(position=[-1, 0, 0], focal_point=[0, 0, 0], view_up=[0, 0, 1]) window.record(r, n_frames=1, out_path='corpuscallosum_sagittal.png', size=(800, 800)) if interactive: window.show(r) """ .. figure:: corpuscallosum_axial.png :align: center **Corpus Callosum Axial** .. include:: ../links_names.inc