def test_parallel_projection(): ren = window.Renderer() axes = actor.axes() ren.add(axes) axes2 = actor.axes() axes2.SetPosition((2, 0, 0)) ren.add(axes2) # Put the camera on a angle so that the # camera can show the difference between perspective # and parallel projection ren.set_camera((1.5, 1.5, 1.5)) ren.GetActiveCamera().Zoom(2) # window.show(ren, reset_camera=True) ren.reset_camera() arr = window.snapshot(ren) ren.projection('parallel') # window.show(ren, reset_camera=False) arr2 = window.snapshot(ren) # Because of the parallel projection the two axes # will have the same size and therefore occupy more # pixels rather than in perspective projection were # the axes being further will be smaller. npt.assert_equal(np.sum(arr2 > 0) > np.sum(arr > 0), True)
def test_dots(interactive=False): points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) dots_actor = actor.dots(points, color=(0, 255, 0)) renderer = window.Renderer() renderer.add(dots_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer, reset_camera=False) npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1) extent = renderer.GetActors().GetLastActor().GetBounds() npt.assert_equal(extent, (0.0, 1.0, 0.0, 1.0, 0.0, 0.0)) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=(0, 255, 0)) npt.assert_equal(report.objects, 3) # Test one point points = np.array([0, 0, 0]) dot_actor = actor.dots(points, color=(0, 0, 255)) renderer.clear() renderer.add(dot_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=(0, 0, 255)) npt.assert_equal(report.objects, 1)
def test_rectangle_2d(): window_size = (700, 700) show_manager = window.ShowManager(size=window_size) rect = ui.Rectangle2D(size=(100, 50)) rect.set_position((50, 80)) npt.assert_equal(rect.position, (50, 80)) rect.color = (1, 0.5, 0) npt.assert_equal(rect.color, (1, 0.5, 0)) rect.opacity = 0.5 npt.assert_equal(rect.opacity, 0.5) # Check the rectangle is drawn at right place. show_manager.ren.add(rect) # Uncomment this to start the visualisation # show_manager.start() colors = [rect.color] arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True) report = window.analyze_snapshot(arr, colors=colors) assert report.objects == 1 assert report.colors_found # Test visibility off. rect.set_visibility(False) arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True) report = window.analyze_snapshot(arr) assert report.objects == 0
def test_ui_disk_2d(): window_size = (700, 700) show_manager = window.ShowManager(size=window_size) disk = ui.Disk2D(outer_radius=20, inner_radius=5) disk.position = (50, 80) npt.assert_equal(disk.position, (50, 80)) disk.color = (1, 0.5, 0) npt.assert_equal(disk.color, (1, 0.5, 0)) disk.opacity = 0.5 npt.assert_equal(disk.opacity, 0.5) # Check the rectangle is drawn at right place. show_manager.ren.add(disk) # Uncomment this to start the visualisation # show_manager.start() colors = [disk.color] arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True) report = window.analyze_snapshot(arr, colors=colors) assert report.objects == 1 assert report.colors_found # Test visibility off. disk.set_visibility(False) arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True) report = window.analyze_snapshot(arr) assert report.objects == 0
def gen_sagittal_views(show=False): from dipy.viz import window from dipy.viz.clustering import show_clusters streamlines = get_data() thresholds = [40, 30, 25]#, 20, 15] qbx_class = QuickBundlesX(thresholds) print "Clustering {} streamlines ({})...".format(len(streamlines), thresholds) qbx = qbx_class.cluster(streamlines) clusters = qbx.get_clusters(len(thresholds)) clusters.refdata = streamlines print "Displaying {} clusters...".format(len(clusters)) tree = qbx.get_tree_cluster_map() tree.refdata = streamlines color_tree(tree) # #TMP # clusters = tree.get_clusters(len(thresholds)) # clusters.refdata = streamlines # ren = show_clusters(clusters, show=True) # #window.snapshot(ren, fname="sagittal_{}".format(thresholds[-1]), size=(1200, 1200)) # return for level in range(1, len(thresholds) + 1): print level, thresholds[level-1] clusters = tree.get_clusters(level) clusters.refdata = streamlines ren = show_clusters(clusters, show=show) ren.reset_camera_tight() window.snapshot(ren, fname="sagittal_{}".format(thresholds[level-1]), size=(1200, 1200))
def test_renderer(): ren = window.Renderer() # background color for renderer (1, 0.5, 0) # 0.001 added here to remove numerical errors when moving from float # to int values bg_float = (1, 0.501, 0) # that will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) ren.background(bg_float) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) axes = actor.axes() ren.add(axes) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm(axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) window.add(ren, axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm_all() arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) ren2 = window.renderer(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_renderer(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) ren2.add(axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 3) window.rm(ren2, axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 0)
def visualize_roi(roi, affine_or_mapping=None, static_img=None, roi_affine=None, static_affine=None, reg_template=None, scene=None, color=np.array([1, 0, 0]), opacity=1.0, inline=False, interact=False): """ Render a region of interest into a VTK viz as a volume """ if not isinstance(roi, np.ndarray): if isinstance(roi, str): roi = nib.load(roi).get_fdata() else: roi = roi.get_fdata() if affine_or_mapping is not None: if isinstance(affine_or_mapping, np.ndarray): # This is an affine: if (static_img is None or roi_affine is None or static_affine is None): raise ValueError( "If using an affine to transform an ROI, " "need to also specify all of the following", "inputs: `static_img`, `roi_affine`, ", "`static_affine`") roi = reg.resample(roi, static_img, roi_affine, static_affine) else: # Assume it is a mapping: if (isinstance(affine_or_mapping, str) or isinstance(affine_or_mapping, nib.Nifti1Image)): if reg_template is None or static_img is None: raise ValueError( "If using a mapping to transform an ROI, need to ", "also specify all of the following inputs: ", "`reg_template`, `static_img`") affine_or_mapping = reg.read_mapping(affine_or_mapping, static_img, reg_template) roi = auv.patch_up_roi( affine_or_mapping.transform_inverse( roi, interpolation='nearest')).astype(bool) if scene is None: scene = window.Scene() roi_actor = actor.contour_from_roi(roi, color=color, opacity=opacity) scene.add(roi_actor) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") window.snapshot(scene, fname=fname) display.display_png(display.Image(fname)) return _inline_interact(scene, inline, interact)
def test_order_transparent(): renderer = window.Renderer() lines = [ np.array([[-1, 0, 0.], [1, 0, 0.]]), np.array([[-1, 1, 0.], [1, 1, 0.]]) ] colors = np.array([[1., 0., 0.], [0., .5, 0.]]) stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5) renderer.add(stream_actor) renderer.reset_camera() # green in front renderer.elevation(90) renderer.camera().OrthogonalizeViewUp() renderer.reset_clipping_range() renderer.reset_camera() not_xvfb = os.environ.get("TEST_WITH_XVFB", False) if not_xvfb: arr = window.snapshot(renderer, fname='green_front.png', offscreen=True, order_transparent=False) else: arr = window.snapshot(renderer, fname='green_front.png', offscreen=False, order_transparent=False) # therefore the green component must have a higher value (in RGB terms) npt.assert_equal(arr[150, 150][1] > arr[150, 150][0], True) # red in front renderer.elevation(-180) renderer.camera().OrthogonalizeViewUp() renderer.reset_clipping_range() if not_xvfb: arr = window.snapshot(renderer, fname='red_front.png', offscreen=True, order_transparent=True) else: arr = window.snapshot(renderer, fname='red_front.png', offscreen=False, order_transparent=True) # therefore the red component must have a higher value (in RGB terms) npt.assert_equal(arr[150, 150][0] > arr[150, 150][1], True)
def _inline_interact(scene, inline, interact): """ Helper function to reuse across viz functions """ if interact: window.show(scene) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") window.snapshot(scene, fname=fname, size=(1200, 1200)) display.display_png(display.Image(fname)) return scene
def test_text_widget(): interactive = False renderer = window.Renderer() axes = actor.axes() window.add(renderer, axes) renderer.ResetCamera() show_manager = window.ShowManager(renderer, size=(900, 900)) if interactive: show_manager.initialize() show_manager.render() fetch_viz_icons() button_png = read_viz_icons(fname='home3.png') def button_callback(obj, event): print('Button Pressed') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.8, 1.2), (100, 100)) global rulez rulez = True def text_callback(obj, event): global rulez print('Text selected') if rulez: obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!") rulez = False else: obj.GetTextActor().SetInput("Diffusion Imaging in Python") rulez = True show_manager.render() text = widget.text(show_manager.iren, show_manager.ren, text_callback, message="Diffusion Imaging in Python", left_down_pos=(0., 0.), right_top_pos=(0.4, 0.05), opacity=1., border=False) if not interactive: button.Off() text.Off() pass if interactive: show_manager.render() show_manager.start() arr = window.snapshot(renderer, size=(900, 900)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3)
def take_snapshot(bundles, interact_with=False): if interact_with: showm = window.ShowManager(ren, size=resolution, reset_camera=False) showm.start() ren.camera_info() snapshot_fname = "_".join(bundles) + ".png" print("Saving {}".format(snapshot_fname)) window.snapshot(ren, fname=snapshot_fname, size=resolution, offscreen=True, order_transparent=False) return snapshot_fname
def test_order_transparent(): renderer = window.Renderer() lines = [np.array([[-1, 0, 0.], [1, 0, 0.]]), np.array([[-1, 1, 0.], [1, 1, 0.]])] colors = np.array([[1., 0., 0.], [0., .5, 0.]]) stream_actor = actor.streamtube(lines, colors, linewidth=0.3, opacity=0.5) renderer.add(stream_actor) renderer.reset_camera() # green in front renderer.elevation(90) renderer.camera().OrthogonalizeViewUp() renderer.reset_clipping_range() renderer.reset_camera() not_xvfb = os.environ.get("TEST_WITH_XVFB", False) if not_xvfb: arr = window.snapshot(renderer, fname='green_front.png', offscreen=True, order_transparent=False) else: arr = window.snapshot(renderer, fname='green_front.png', offscreen=False, order_transparent=False) # therefore the green component must have a higher value (in RGB terms) npt.assert_equal(arr[150, 150][1] > arr[150, 150][0], True) # red in front renderer.elevation(-180) renderer.camera().OrthogonalizeViewUp() renderer.reset_clipping_range() if not_xvfb: arr = window.snapshot(renderer, fname='red_front.png', offscreen=True, order_transparent=True) else: arr = window.snapshot(renderer, fname='red_front.png', offscreen=False, order_transparent=True) # therefore the red component must have a higher value (in RGB terms) npt.assert_equal(arr[150, 150][0] > arr[150, 150][1], True)
def test_streamtube_and_line_actors(): renderer = window.renderer() line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]]) line2 = line1 + np.array([0.5, 0., 0.]) lines = [line1, line2] colors = np.array([[1, 0, 0], [0, 0, 1.]]) c = actor.line(lines, colors, linewidth=3) window.add(renderer, c) c = actor.line(lines, colors, spline_subdiv=5, linewidth=3) window.add(renderer, c) # create streamtubes of the same lines and shift them a bit c2 = actor.streamtube(lines, colors, linewidth=.1) c2.SetPosition(2, 0, 0) window.add(renderer, c2) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0), (0, 0, 255)], find_objects=True) npt.assert_equal(report.objects, 4) npt.assert_equal(report.colors_found, [True, True]) # as before with splines c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1) c2.SetPosition(2, 0, 0) window.add(renderer, c2) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0), (0, 0, 255)], find_objects=True) npt.assert_equal(report.objects, 4) npt.assert_equal(report.colors_found, [True, True])
def gen_qbx_tree_progress(): from dipy.viz import window from dipy.viz.clustering import show_clusters_graph_progress streamlines = get_data() thresholds = [40, 30, 25]#, 20, 15] qbx_class = QuickBundlesX(thresholds) print "Clustering {} streamlines ({})...".format(len(streamlines), thresholds) qbx = qbx_class.cluster(streamlines) print "Displaying clusters graph..." tree = qbx.get_tree_cluster_map() tree.refdata = streamlines color_tree(tree, min_level=0) #max_indices = [100, 500, 1000, 3000, len(streamlines)] max_indices = [100, 250, 500, 750, 1000, 2000, 3000, 5000, len(streamlines)] #max_indices = np.arange(10, len(streamlines), 100) for i, ren in enumerate(show_clusters_graph_progress(tree, max_indices, show=False)): ren.reset_camera_tight() window.snapshot(ren, fname="tree_{}_part_{}".format("-".join(map(str, thresholds)), i), size=(1200, 1200))
def test_timer(): """ Testing add a timer and exit window and app from inside timer. """ xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 50], [300, 0, 0, 100]]) xyzr2 = np.array([[0, 200, 0, 30], [100, 200, 0, 50], [300, 200, 0, 100]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.45]]) renderer = window.Renderer() global sphere_actor, tb, cnt sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:], radii=xyzr[:, 3]) sphere = get_sphere('repulsion724') sphere_actor2 = actor.sphere(centers=xyzr2[:, :3], colors=colors[:], radii=xyzr2[:, 3], vertices=sphere.vertices, faces=sphere.faces.astype('i8')) renderer.add(sphere_actor) renderer.add(sphere_actor2) tb = ui.TextBlock2D() cnt = 0 global showm showm = window.ShowManager(renderer, size=(1024, 768), reset_camera=False, order_transparent=True) showm.initialize() def timer_callback(obj, event): global cnt, sphere_actor, showm, tb cnt += 1 tb.message = "Let's count to 10 and exit :" + str(cnt) showm.render() if cnt > 9: showm.exit() renderer.add(tb) # Run every 200 milliseconds showm.add_timer_callback(True, 200, timer_callback) showm.start() arr = window.snapshot(renderer) npt.assert_(np.sum(arr) > 0)
def test_spheres(interactive=False): xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 25], [200, 0, 0, 50]]) colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]]) renderer = window.Renderer() sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:], radii=xyzr[:, 3]) renderer.add(sphere_actor) if interactive: window.show(renderer, order_transparent=True) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 3)
def test_points(interactive=False): points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]) colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) points_actor = actor.point(points, colors) renderer = window.Renderer() renderer.add(points_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer, reset_camera=False) npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=colors) npt.assert_equal(report.objects, 3)
def test_bundle_maps(): renderer = window.renderer() bundle = fornix_streamlines() bundle, shift = center_streamlines(bundle) mat = np.array([[1, 0, 0, 100], [0, 1, 0, 100], [0, 0, 1, 100], [0, 0, 0, 1.]]) bundle = transform_streamlines(bundle, mat) # metric = np.random.rand(*(200, 200, 200)) metric = 100 * np.ones((200, 200, 200)) # add lower values metric[100, :, :] = 100 * 0.5 # create a nice orange-red colormap lut = actor.colormap_lookup_table(scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1), value_range=(1., 1)) line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut) window.add(renderer, line) window.add(renderer, actor.scalar_bar(lut, ' ')) report = window.analyze_renderer(renderer) npt.assert_almost_equal(report.actors, 1) # window.show(renderer) renderer.clear() nb_points = np.sum([len(b) for b in bundle]) values = 100 * np.random.rand(nb_points) # values[:nb_points/2] = 0 line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut) renderer.add(line) # window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') renderer.clear() colors = np.random.rand(nb_points, 3) # values[:nb_points/2] = 0 line = actor.line(bundle, colors, linewidth=2) renderer.add(line) # window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') # window.show(renderer) arr = window.snapshot(renderer) report2 = window.analyze_snapshot(arr) npt.assert_equal(report2.objects, 1) # try other input options for colors renderer.clear() actor.line(bundle, (1., 0.5, 0)) actor.line(bundle, np.arange(len(bundle))) actor.line(bundle) colors = [np.random.rand(*b.shape) for b in bundle] actor.line(bundle, colors=colors)
def test_contour_from_roi(): # Render volume renderer = window.renderer() data = np.zeros((50, 50, 50)) data[20:30, 25, 25] = 1. data[25, 20:30, 25] = 1. affine = np.eye(4) surface = actor.contour_from_roi(data, affine, color=np.array([1, 0, 1]), opacity=.5) renderer.add(surface) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # Test binarization renderer2 = window.renderer() data2 = np.zeros((50, 50, 50)) data2[20:30, 25, 25] = 1. data2[35:40, 25, 25] = 1. affine = np.eye(4) surface2 = actor.contour_from_roi(data2, affine, color=np.array([0, 1, 1]), opacity=.5) renderer2.add(surface2) renderer2.reset_camera() renderer2.reset_clipping_range() # window.show(renderer2) arr = window.snapshot(renderer, 'test_surface.png', offscreen=True) arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) report2 = window.analyze_snapshot(arr2, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(report2.objects, 2) # test on real streamlines using tracking example from dipy.data import read_stanford_labels from dipy.reconst.shm import CsaOdfModel from dipy.data import default_sphere from dipy.direction import peaks_from_model from dipy.tracking.local import ThresholdTissueClassifier from dipy.tracking import utils from dipy.tracking.local import LocalTracking from dipy.viz.colormap import line_colors hardi_img, gtab, labels_img = read_stanford_labels() data = hardi_img.get_data() labels = labels_img.get_data() affine = hardi_img.get_affine() white_matter = (labels == 1) | (labels == 2) csa_model = CsaOdfModel(gtab, sh_order=6) csa_peaks = peaks_from_model(csa_model, data, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25) seed_mask = labels == 2 seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine) # Initialization of LocalTracking. # The computation happens in the next step. streamlines = LocalTracking(csa_peaks, classifier, seeds, affine, step_size=2) # Compute streamlines and store as a list. streamlines = list(streamlines) # Prepare the display objects. streamlines_actor = actor.line(streamlines, line_colors(streamlines)) seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5) # Create the 3d display. r = window.ren() r2 = window.ren() r.add(streamlines_actor) arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True) report3 = window.analyze_snapshot(arr3, find_objects=True) r2.add(streamlines_actor) r2.add(seedroi_actor) arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True) report4 = window.analyze_snapshot(arr4, find_objects=True) # assert that the seed ROI rendering is not far # away from the streamlines (affine error) npt.assert_equal(report3.objects, report4.objects)
def test_button_and_slider_widgets(): recording = False filename = "test_button_and_slider_widgets.log.gz" recording_filename = pjoin(DATA_DIR, filename) renderer = window.Renderer() # create some minimalistic streamlines lines = [np.array([[-1, 0, 0.], [1, 0, 0.]]), np.array([[-1, 1, 0.], [1, 1, 0.]])] colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]]) stream_actor = actor.streamtube(lines, colors) states = {'camera_button_count': 0, 'plus_button_count': 0, 'minus_button_count': 0, 'slider_moved_count': 0, } renderer.add(stream_actor) # the show manager allows to break the rendering process # in steps so that the widgets can be added properly show_manager = window.ShowManager(renderer, size=(800, 800)) if recording: show_manager.initialize() show_manager.render() def button_callback(obj, event): print('Camera pressed') states['camera_button_count'] += 1 def button_plus_callback(obj, event): print('+ pressed') states['plus_button_count'] += 1 def button_minus_callback(obj, event): print('- pressed') states['minus_button_count'] += 1 fetch_viz_icons() button_png = read_viz_icons(fname='camera.png') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.98, 1.), (80, 50)) button_png_plus = read_viz_icons(fname='plus.png') button_plus = widget.button(show_manager.iren, show_manager.ren, button_plus_callback, button_png_plus, (.98, .9), (120, 50)) button_png_minus = read_viz_icons(fname='minus.png') button_minus = widget.button(show_manager.iren, show_manager.ren, button_minus_callback, button_png_minus, (.98, .9), (50, 50)) def print_status(obj, event): rep = obj.GetRepresentation() stream_actor.SetPosition((rep.GetValue(), 0, 0)) states['slider_moved_count'] += 1 slider = widget.slider(show_manager.iren, show_manager.ren, callback=print_status, min_value=-1, max_value=1, value=0., label="X", right_normalized_pos=(.98, 0.6), size=(120, 0), label_format="%0.2lf") # This callback is used to update the buttons/sliders' position # so they can stay on the right side of the window when the window # is being resized. global size size = renderer.GetSize() if recording: show_manager.record_events_to_file(recording_filename) print(states) else: show_manager.play_events_from_file(recording_filename) npt.assert_equal(states["camera_button_count"], 7) npt.assert_equal(states["plus_button_count"], 3) npt.assert_equal(states["minus_button_count"], 4) npt.assert_equal(states["slider_moved_count"], 116) if not recording: button.Off() slider.Off() # Uncomment below to test the slider and button with analyze # button.place(renderer) # slider.place(renderer) arr = window.snapshot(renderer, size=(800, 800)) report = window.analyze_snapshot(arr) # import pylab as plt # plt.imshow(report.labels, origin='lower') # plt.show() npt.assert_equal(report.objects, 4) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1)
def test_button_and_slider_widgets(): recording = False filename = "test_button_and_slider_widgets.log.gz" recording_filename = pjoin(DATA_DIR, filename) renderer = window.Renderer() # create some minimalistic streamlines lines = [ np.array([[-1, 0, 0.], [1, 0, 0.]]), np.array([[-1, 1, 0.], [1, 1, 0.]]) ] colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]]) stream_actor = actor.streamtube(lines, colors) states = { 'camera_button_count': 0, 'plus_button_count': 0, 'minus_button_count': 0, 'slider_moved_count': 0, } renderer.add(stream_actor) # the show manager allows to break the rendering process # in steps so that the widgets can be added properly show_manager = window.ShowManager(renderer, size=(800, 800)) if recording: show_manager.initialize() show_manager.render() def button_callback(obj, event): print('Camera pressed') states['camera_button_count'] += 1 def button_plus_callback(obj, event): print('+ pressed') states['plus_button_count'] += 1 def button_minus_callback(obj, event): print('- pressed') states['minus_button_count'] += 1 fetch_viz_icons() button_png = read_viz_icons(fname='camera.png') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.98, 1.), (80, 50)) button_png_plus = read_viz_icons(fname='plus.png') button_plus = widget.button(show_manager.iren, show_manager.ren, button_plus_callback, button_png_plus, (.98, .9), (120, 50)) button_png_minus = read_viz_icons(fname='minus.png') button_minus = widget.button(show_manager.iren, show_manager.ren, button_minus_callback, button_png_minus, (.98, .9), (50, 50)) def print_status(obj, event): rep = obj.GetRepresentation() stream_actor.SetPosition((rep.GetValue(), 0, 0)) states['slider_moved_count'] += 1 slider = widget.slider(show_manager.iren, show_manager.ren, callback=print_status, min_value=-1, max_value=1, value=0., label="X", right_normalized_pos=(.98, 0.6), size=(120, 0), label_format="%0.2lf") # This callback is used to update the buttons/sliders' position # so they can stay on the right side of the window when the window # is being resized. global size size = renderer.GetSize() if recording: show_manager.record_events_to_file(recording_filename) print(states) else: show_manager.play_events_from_file(recording_filename) npt.assert_equal(states["camera_button_count"], 7) npt.assert_equal(states["plus_button_count"], 3) npt.assert_equal(states["minus_button_count"], 4) npt.assert_equal(states["slider_moved_count"], 116) if not recording: button.Off() slider.Off() # Uncomment below to test the slider and button with analyze # button.place(renderer) # slider.place(renderer) arr = window.snapshot(renderer, size=(800, 800)) report = window.analyze_snapshot(arr) # import pylab as plt # plt.imshow(report.labels, origin='lower') # plt.show() npt.assert_equal(report.objects, 4) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1)
def test_text_block_2d_justification(): window_size = (700, 700) show_manager = window.ShowManager(size=window_size) # To help visualize the text positions. grid_size = (500, 500) bottom, middle, top = 50, 300, 550 left, center, right = 50, 300, 550 line_color = (1, 0, 0) grid_top = (center, top), (grid_size[0], 1) grid_bottom = (center, bottom), (grid_size[0], 1) grid_left = (left, middle), (1, grid_size[1]) grid_right = (right, middle), (1, grid_size[1]) grid_middle = (center, middle), (grid_size[0], 1) grid_center = (center, middle), (1, grid_size[1]) grid_specs = [ grid_top, grid_bottom, grid_left, grid_right, grid_middle, grid_center ] for spec in grid_specs: line = ui.Rectangle2D(size=spec[1], color=line_color) line.center = spec[0] show_manager.ren.add(line) font_size = 60 bg_color = (1, 1, 1) texts = [] texts += [ ui.TextBlock2D("HH", position=(left, top), font_size=font_size, color=(1, 0, 0), bg_color=bg_color, justification="left", vertical_justification="top") ] texts += [ ui.TextBlock2D("HH", position=(center, top), font_size=font_size, color=(0, 1, 0), bg_color=bg_color, justification="center", vertical_justification="top") ] texts += [ ui.TextBlock2D("HH", position=(right, top), font_size=font_size, color=(0, 0, 1), bg_color=bg_color, justification="right", vertical_justification="top") ] texts += [ ui.TextBlock2D("HH", position=(left, middle), font_size=font_size, color=(1, 1, 0), bg_color=bg_color, justification="left", vertical_justification="middle") ] texts += [ ui.TextBlock2D("HH", position=(center, middle), font_size=font_size, color=(0, 1, 1), bg_color=bg_color, justification="center", vertical_justification="middle") ] texts += [ ui.TextBlock2D("HH", position=(right, middle), font_size=font_size, color=(1, 0, 1), bg_color=bg_color, justification="right", vertical_justification="middle") ] texts += [ ui.TextBlock2D("HH", position=(left, bottom), font_size=font_size, color=(0.5, 0, 1), bg_color=bg_color, justification="left", vertical_justification="bottom") ] texts += [ ui.TextBlock2D("HH", position=(center, bottom), font_size=font_size, color=(1, 0.5, 0), bg_color=bg_color, justification="center", vertical_justification="bottom") ] texts += [ ui.TextBlock2D("HH", position=(right, bottom), font_size=font_size, color=(0, 1, 0.5), bg_color=bg_color, justification="right", vertical_justification="bottom") ] show_manager.ren.add(*texts) # Uncomment this to start the visualisation # show_manager.start() arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True) if vtk.vtkVersion.GetVTKVersion() == "6.0.0": expected = np.load(pjoin(DATA_DIR, "test_ui_text_block.npz")) npt.assert_array_almost_equal(arr, expected["arr_0"])
def test_slicer(): renderer = window.renderer() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) window.add(renderer, slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # copy pixels in numpy array directly arr = window.snapshot(renderer, 'test_slicer.png') import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) print(report) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) renderer.ResetCamera() window.add(renderer, slicer) # save pixels in png file not a numpy array with TemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(renderer) arr = window.snapshot(renderer, fname) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) renderer.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) renderer.add(rgb_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) renderer.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut2 = slicer_lut.copy() slicer_lut2.display(None, None, 10) renderer.add(slicer_lut2) renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1)
def test_button_and_slider_widgets(): interactive = False renderer = window.Renderer() # create some minimalistic streamlines lines = [ np.array([[-1, 0, 0.], [1, 0, 0.]]), np.array([[-1, 1, 0.], [1, 1, 0.]]) ] colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]]) stream_actor = actor.streamtube(lines, colors) renderer.add(stream_actor) # the show manager allows to break the rendering process # in steps so that the widgets can be added properly show_manager = window.ShowManager(renderer, size=(800, 800)) if interactive: show_manager.initialize() show_manager.render() def button_callback(obj, event): print('Camera pressed') def button_plus_callback(obj, event): print('+ pressed') def button_minus_callback(obj, event): print('- pressed') fetch_viz_icons() button_png = read_viz_icons(fname='camera.png') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.98, 1.), (80, 50)) button_png_plus = read_viz_icons(fname='plus.png') button_plus = widget.button(show_manager.iren, show_manager.ren, button_plus_callback, button_png_plus, (.98, .9), (120, 50)) button_png_minus = read_viz_icons(fname='minus.png') button_minus = widget.button(show_manager.iren, show_manager.ren, button_minus_callback, button_png_minus, (.98, .9), (50, 50)) def print_status(obj, event): rep = obj.GetRepresentation() stream_actor.SetPosition((rep.GetValue(), 0, 0)) slider = widget.slider(show_manager.iren, show_manager.ren, callback=print_status, min_value=-1, max_value=1, value=0., label="X", right_normalized_pos=(.98, 0.6), size=(120, 0), label_format="%0.2lf") # This callback is used to update the buttons/sliders' position # so they can stay on the right side of the window when the window # is being resized. global size size = renderer.GetSize() def win_callback(obj, event): global size if size != obj.GetSize(): button.place(renderer) button_plus.place(renderer) button_minus.place(renderer) slider.place(renderer) size = obj.GetSize() if interactive: # show_manager.add_window_callback(win_callback) # you can also register any callback in a vtk way like this # show_manager.window.AddObserver(vtk.vtkCommand.ModifiedEvent, # win_callback) show_manager.render() show_manager.start() if not interactive: button.Off() slider.Off() # Uncomment below to test the slider and button with analyze # button.place(renderer) # slider.place(renderer) arr = window.snapshot(renderer, size=(800, 800)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 2) # imshow(report.labels, origin='lower') report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1)
def test_button_and_slider_widgets(): interactive = False renderer = window.Renderer() # create some minimalistic streamlines lines = [np.array([[-1, 0, 0.], [1, 0, 0.]]), np.array([[-1, 1, 0.], [1, 1, 0.]])] colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]]) stream_actor = actor.streamtube(lines, colors) renderer.add(stream_actor) # the show manager allows to break the rendering process # in steps so that the widgets can be added properly show_manager = window.ShowManager(renderer, size=(800, 800)) if interactive: show_manager.initialize() show_manager.render() def button_callback(obj, event): print('Camera pressed') def button_plus_callback(obj, event): print('+ pressed') def button_minus_callback(obj, event): print('- pressed') fetch_viz_icons() button_png = read_viz_icons(fname='camera.png') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.98, 1.), (80, 50)) button_png_plus = read_viz_icons(fname='plus.png') button_plus = widget.button(show_manager.iren, show_manager.ren, button_plus_callback, button_png_plus, (.98, .9), (120, 50)) button_png_minus = read_viz_icons(fname='minus.png') button_minus = widget.button(show_manager.iren, show_manager.ren, button_minus_callback, button_png_minus, (.98, .9), (50, 50)) def print_status(obj, event): rep = obj.GetRepresentation() stream_actor.SetPosition((rep.GetValue(), 0, 0)) slider = widget.slider(show_manager.iren, show_manager.ren, callback=print_status, min_value=-1, max_value=1, value=0., label="X", right_normalized_pos=(.98, 0.6), size=(120, 0), label_format="%0.2lf") # This callback is used to update the buttons/sliders' position # so they can stay on the right side of the window when the window # is being resized. global size size = renderer.GetSize() def win_callback(obj, event): global size if size != obj.GetSize(): button.place(renderer) button_plus.place(renderer) button_minus.place(renderer) slider.place(renderer) size = obj.GetSize() if interactive: # show_manager.add_window_callback(win_callback) # you can also register any callback in a vtk way like this # show_manager.window.AddObserver(vtk.vtkCommand.ModifiedEvent, # win_callback) show_manager.render() show_manager.start() if not interactive: button.Off() slider.Off() # Uncomment below to test the slider and button with analyze # button.place(renderer) # slider.place(renderer) arr = window.snapshot(renderer, size=(800, 800)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 2) # imshow(report.labels, origin='lower') report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1)
def test_text_widget(): interactive = False renderer = window.Renderer() axes = fvtk.axes() window.add(renderer, axes) renderer.ResetCamera() show_manager = window.ShowManager(renderer, size=(900, 900)) if interactive: show_manager.initialize() show_manager.render() fetch_viz_icons() button_png = read_viz_icons(fname='home3.png') def button_callback(obj, event): print('Button Pressed') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.8, 1.2), (100, 100)) global rulez rulez = True def text_callback(obj, event): global rulez print('Text selected') if rulez: obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!") rulez = False else: obj.GetTextActor().SetInput("Diffusion Imaging in Python") rulez = True show_manager.render() text = widget.text(show_manager.iren, show_manager.ren, text_callback, message="Diffusion Imaging in Python", left_down_pos=(0., 0.), right_top_pos=(0.4, 0.05), opacity=1., border=False) if not interactive: button.Off() text.Off() pass if interactive: show_manager.render() show_manager.start() arr = window.snapshot(renderer, size=(900, 900)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3)
def test_slicer(): renderer = window.renderer() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # copy pixels in numpy array directly arr = window.snapshot(renderer, 'test_slicer.png', offscreen=True) import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) renderer.ResetCamera() renderer.add(slicer) # save pixels in png file not a numpy array with TemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(renderer) window.snapshot(renderer, fname, offscreen=True) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) renderer.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) renderer.add(rgb_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) renderer.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut.opacity(0.5) slicer_lut.tolerance(0.03) slicer_lut2 = slicer_lut.copy() npt.assert_equal(slicer_lut2.GetOpacity(), 0.5) npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03) slicer_lut2.opacity(1) slicer_lut2.tolerance(0.025) slicer_lut2.display(None, None, 10) renderer.add(slicer_lut2) renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) renderer.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) slicer = actor.slicer(data, affine, interpolation='nearest') slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(data.shape, slicer.shape) renderer.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) from dipy.align.reslice import reslice data2, affine2 = reslice(data, affine, zooms=(1, 3, 2), new_zooms=(1, 1, 1)) slicer = actor.slicer(data2, affine2, interpolation='linear') slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer, reset_camera=False) arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_array_equal([1, 3, 2] * np.array(data.shape), np.array(slicer.shape))
def test_odf_slicer(interactive=False): sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') print(fid) print(fname) odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) renderer = window.Renderer() mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='jet') fa = 0. * np.zeros(odfs.shape[:3]) fa[:, 0, :] = 1. fa[:, -1, :] = 1. fa[0, :, :] = 1. fa[-1, :, :] = 1. fa[5, 5, 5] = 1 k = 5 I, J, K = odfs.shape[:3] fa_actor = actor.slicer(fa, affine) fa_actor.display_extent(0, I, 0, J, k, k) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) if interactive: window.show(renderer, reset_camera=False) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11) renderer.clear() renderer.add(fa_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) mask[:] = 0 mask[5, 5, 5] = 1 fa[5, 5, 5] = 0 fa_actor = actor.slicer(fa, None) fa_actor.display(None, None, 5) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='jet', norm=False, global_cm=True) renderer.clear() renderer.add(fa_actor) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) odfs[:, :, :] = 1 mask = np.ones(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='jet', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) renderer.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) fa_actor.display(i, None, None) if interactive: window.show(renderer) for j in range(11): odf_actor.display(None, j, None) fa_actor.display(None, j, None) if interactive: window.show(renderer) # with mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') del odf_actor odfs._mmap.close() del odfs os.close(fid) os.remove(fname)
def main(): parser = _build_arg_parser() args = parser.parse_args() output_names = [ 'axial_superior', 'axial_inferior', 'coronal_posterior', 'coronal_anterior', 'sagittal_left', 'sagittal_right' ] output_paths = [ os.path.join(os.path.dirname(args.output), '{}_' + os.path.basename(args.output)).format(name) for name in output_names ] assert_inputs_exist(parser, [args.bundle, args.map]) assert_outputs_exists(parser, args, output_paths) assignment = np.load(args.map)['arr_0'] lut = actor.colormap_lookup_table(scale_range=(np.min(assignment), np.max(assignment)), hue_range=(0.1, 1.), saturation_range=(1, 1.), value_range=(1., 1.)) tubes = actor.line(nib.streamlines.load(args.bundle).streamlines, assignment, lookup_colormap=lut) scalar_bar = actor.scalar_bar(lut) ren = window.Renderer() ren.add(tubes) ren.add(scalar_bar) window.snapshot(ren, output_paths[0]) ren.pitch(180) ren.reset_camera() window.snapshot(ren, output_paths[1]) ren.pitch(90) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() window.snapshot(ren, output_paths[2]) ren.pitch(180) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() window.snapshot(ren, output_paths[3]) ren.yaw(90) ren.reset_camera() window.snapshot(ren, output_paths[4]) ren.yaw(180) ren.reset_camera() window.snapshot(ren, output_paths[5])
def test_active_camera(): renderer = window.Renderer() renderer.add(actor.axes(scale=(1, 1, 1))) renderer.reset_camera() renderer.reset_clipping_range() direction = renderer.camera_direction() position, focal_point, view_up = renderer.get_camera() renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up) position, focal_point, view_up = renderer.get_camera() npt.assert_almost_equal(np.dot(direction, position), -1) renderer.zoom(1.5) new_position, _, _ = renderer.get_camera() npt.assert_array_almost_equal(position, new_position) renderer.zoom(1) # rotate around focal point renderer.azimuth(90) position, _, _ = renderer.get_camera() npt.assert_almost_equal(position, (1.0, 0.0, 0)) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.colors_found, [True]) # rotate around camera's center renderer.yaw(90) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(0, 0, 0)]) npt.assert_equal(report.colors_found, [True]) renderer.yaw(-90) renderer.elevation(90) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=(0, 255, 0)) npt.assert_equal(report.colors_found, [True]) renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up) # vertical rotation of the camera around the focal point renderer.pitch(10) renderer.pitch(-10) # rotate around the direction of projection renderer.roll(90) # inverted normalized distance from focal point along the direction # of the camera position, _, _ = renderer.get_camera() renderer.dolly(0.5) new_position, _, _ = renderer.get_camera() npt.assert_almost_equal(position[2], 0.5 * new_position[2])
def weighting_streamlines( out_folder_name, streamlines, bvec_file, weight_by="1.5_2_AxPasi5", hue=[0.0, 1.0], saturation=[0.0, 1.0], scale=[2, 7], fig_type="", ): """ weight_by = '1.5_2_AxPasi5' hue = [0.0,1.0] saturation = [0.0,1.0] scale = [3,6] """ from dipy.viz import window, actor from dipy.tracking.streamline import values_from_volume weight_by_data, affine = load_weight_by_img(bvec_file, weight_by) stream = list(streamlines) vol_per_tract = values_from_volume(weight_by_data, stream, affine=affine) pfr_data = load_weight_by_img(bvec_file, "1.5_2_AxFr5")[0] pfr_per_tract = values_from_volume(pfr_data, stream, affine=affine) # Leave out from the calculation of mean value per tract, a chosen quantile: vol_vec = weight_by_data.flatten() q = np.quantile(vol_vec[vol_vec > 0], 0.95) mean_vol_per_tract = [] for s, pfr in zip(vol_per_tract, pfr_per_tract): s = np.asanyarray(s) non_out = [s < q] pfr = np.asanyarray(pfr) high_pfr = [pfr > 0.5] mean_vol_per_tract.append(np.nanmean(s[tuple(non_out and high_pfr)])) lut_cmap = actor.colormap_lookup_table(hue_range=hue, saturation_range=saturation, scale_range=scale) streamlines_actor = actor.line(streamlines, mean_vol_per_tract, linewidth=1, lookup_colormap=lut_cmap) bar = actor.scalar_bar(lut_cmap) r = window.Renderer() r.add(streamlines_actor) r.add(bar) # mean_pasi_weighted_img = out_folder_name+'\streamlines\mean_pasi_weighted' + fig_type + '.png' mean_pasi_weighted_img = f"{out_folder_name}/mean_pasi_weighted{fig_type}.png" # window.show(r) # r.set_camera(r.camera_info()) r.set_camera( position=(-389.00, 225.24, 62.02), focal_point=(1.78, -3.27, -12.65), view_up=(0.00, -0.31, 0.95), ) # window.record(r, out_path=mean_pasi_weighted_img, size=(800, 800)) window.snapshot(r, fname=mean_pasi_weighted_img, size=(800, 800)) return r
def Render_3D(file_name, view, camera_pos, focal_point, view_up, color): # load VTK #from vtk import * import vtk from vtk.util.numpy_support import vtk_to_numpy from dipy.viz import window, actor import os # Read the surface from file if file_name[-3:] == 'vtk': object = vtk.vtkPolyDataReader() if file_name[-3:] == 'ply': object = vtk.vtkPLYReader() if file_name[-3:] == 'stl': object = vtk.vtkSTLReader() object.SetFileName(file_name) objectMapper = vtk.vtkPolyDataMapper() objectMapper.SetInputConnection(object.GetOutputPort()) objectMapper.ScalarVisibilityOff() objectActor=vtk.vtkActor() objectActor.SetMapper(objectMapper) #objectActor.GetProperty().SetColor(0.5,0.5,0.5) # grey #objectActor.GetProperty().SetColor(.24, .70, .44) #mediumseagreen #objectActor.GetProperty().SetColor(0.498039, 1, 0.831373) #springgreen objectActor.GetProperty().SetColor(color[0],color[1],color[2]) # Attach to a renderer # ren = vtk.vtkRenderer() # ren.AddActor(objectActor) # ren.SetBackground(0.1, 0.1, 0.1) # Attach to a window # renWin = vtk.vtkRenderWindow() # renWin.AddRenderer(ren) # #renWin.SetWindowName("surface") # renWin.SetSize(500,500) # Attach to an interactor # iren = vtk.vtkRenderWindowInteractor() # iren.SetRenderWindow(renWin) # style = vtk.vtkInteractorStyleSwitch() # style.SetCurrentStyleToTrackballCamera() # iren.SetInteractorStyle(style) # iren.Initialize() # iren.Start() # renderer = window.Renderer() renderer.clear() # renderer.set_camera(position=camera_pos[d], # focal_point=focal_point[d], # view_up=view_up[d]) renderer.set_camera(position=camera_pos, focal_point=focal_point, view_up=view_up) renderer.add(objectActor) show_m = window.ShowManager(renderer, size=(800, 800)) # show_m.initialize() # show_m.render() # show_m.start() # renderer.camera_info() fname = os.path.basename(file_name) fname = 'images/'+fname[0:-4]+'_'+view+'.png' window.snapshot(renderer, fname=fname, size=(800, 800), offscreen=True, order_transparent=False)
def Render_All(folder, view, camera_pos, focal_point, view_up, norm): # load VTK #from vtk import * import vtk from vtk.util.numpy_support import vtk_to_numpy from dipy.viz import window, actor import os import glob from matplotlib import cm #tractseg surfaces to ignore ignorefiles = ['CC.vtk', 'CA.vtk', 'FX_left.vtk', 'FX_right.vtk'] renderer = window.Renderer() renderer.clear() # renderer.set_camera(position=camera_pos[d], # focal_point=focal_point[d], # view_up=view_up[d]) renderer.set_camera(position=camera_pos, focal_point=focal_point, view_up=view_up) count = 1 # Add files # for file_name in glob.glob('/Users/lindseykitchell/Downloads/surfaces/' + "/*.vtk"): for file_name in glob.glob(folder + "/*.vtk"): color = list(cm.rainbow(norm(count)))[0:3] if os.path.basename(file_name)in ignorefiles: count += 1 else: # Read the surface from file if file_name[-3:] == 'vtk': object = vtk.vtkPolyDataReader() if file_name[-3:] == 'ply': object = vtk.vtkPLYReader() if file_name[-3:] == 'stl': object = vtk.vtkSTLReader() object.SetFileName(file_name) objectMapper = vtk.vtkPolyDataMapper() objectMapper.SetInputConnection(object.GetOutputPort()) objectMapper.ScalarVisibilityOff() objectActor=vtk.vtkActor() objectActor.SetMapper(objectMapper) #objectActor.GetProperty().SetColor(0.5,0.5,0.5) # grey #objectActor.GetProperty().SetColor(.24, .70, .44) #mediumseagreen #objectActor.GetProperty().SetColor(0.498039, 1, 0.831373) #springgreen objectActor.GetProperty().SetColor(color[0],color[1],color[2]) renderer.add(objectActor) count += 1 show_m = window.ShowManager(renderer, size=(800, 800)) # show_m.initialize() # show_m.render() # show_m.start() # renderer.camera_info() fname = 'all_surfaces' fname = 'images/'+fname+'_'+view+'.png' window.snapshot(renderer, fname=fname, size=(800, 800), offscreen=True, order_transparent=False)
def test_text_block_2d_justification(): window_size = (700, 700) show_manager = window.ShowManager(size=window_size) # To help visualize the text positions. grid_size = (500, 500) bottom, middle, top = 50, 300, 550 left, center, right = 50, 300, 550 line_color = (1, 0, 0) grid_top = (center, top), (grid_size[0], 1) grid_bottom = (center, bottom), (grid_size[0], 1) grid_left = (left, middle), (1, grid_size[1]) grid_right = (right, middle), (1, grid_size[1]) grid_middle = (center, middle), (grid_size[0], 1) grid_center = (center, middle), (1, grid_size[1]) grid_specs = [grid_top, grid_bottom, grid_left, grid_right, grid_middle, grid_center] for spec in grid_specs: line = ui.Rectangle2D(center=spec[0], size=spec[1], color=line_color) show_manager.ren.add(line) font_size = 60 bg_color = (1, 1, 1) texts = [] texts += [ui.TextBlock2D("HH", position=(left, top), font_size=font_size, color=(1, 0, 0), bg_color=bg_color, justification="left", vertical_justification="top")] texts += [ui.TextBlock2D("HH", position=(center, top), font_size=font_size, color=(0, 1, 0), bg_color=bg_color, justification="center", vertical_justification="top")] texts += [ui.TextBlock2D("HH", position=(right, top), font_size=font_size, color=(0, 0, 1), bg_color=bg_color, justification="right", vertical_justification="top")] texts += [ui.TextBlock2D("HH", position=(left, middle), font_size=font_size, color=(1, 1, 0), bg_color=bg_color, justification="left", vertical_justification="middle")] texts += [ui.TextBlock2D("HH", position=(center, middle), font_size=font_size, color=(0, 1, 1), bg_color=bg_color, justification="center", vertical_justification="middle")] texts += [ui.TextBlock2D("HH", position=(right, middle), font_size=font_size, color=(1, 0, 1), bg_color=bg_color, justification="right", vertical_justification="middle")] texts += [ui.TextBlock2D("HH", position=(left, bottom), font_size=font_size, color=(0.5, 0, 1), bg_color=bg_color, justification="left", vertical_justification="bottom")] texts += [ui.TextBlock2D("HH", position=(center, bottom), font_size=font_size, color=(1, 0.5, 0), bg_color=bg_color, justification="center", vertical_justification="bottom")] texts += [ui.TextBlock2D("HH", position=(right, bottom), font_size=font_size, color=(0, 1, 0.5), bg_color=bg_color, justification="right", vertical_justification="bottom")] show_manager.ren.add(*texts) # Uncomment this to start the visualisation # show_manager.start() arr = window.snapshot(show_manager.ren, size=window_size, offscreen=True) if vtk.vtkVersion.GetVTKVersion() == "6.0.0": expected = np.load(pjoin(DATA_DIR, "test_ui_text_block.npz")) npt.assert_array_almost_equal(arr, expected["arr_0"])
def plotTrk(trkFile, target, anatFile, roi=None, xSlice=None, ySlice=None, zSlice=None, xRot=None, yRot=None, zRot=None): anatImage = nibabel.load(anatFile) trkImage = [s[0] for s in nibabel.trackvis.read(trkFile, points_space='rasmm')[0]] ren = window.Renderer() trkActor = actor.line( trkImage, dipy.viz.colormap.line_colors(trkImage)) if xSlice is not None: anatActorSliceX = actor.slicer(anatImage.get_data(), anatImage.affine) anatActorSliceX.display(xSlice, None, None) # Apply rotation anatActorSliceX.RotateX(xRot) anatActorSliceX.RotateY(yRot) anatActorSliceX.RotateZ(zRot) ren.add(anatActorSliceX) if ySlice is not None: anatActorSliceY = actor.slicer(anatImage.get_data(), anatImage.affine) anatActorSliceY.display(None, ySlice, None) # Apply rotation anatActorSliceY.RotateX(xRot) anatActorSliceY.RotateY(yRot) anatActorSliceY.RotateZ(zRot) ren.add(anatActorSliceY) if zSlice is not None: anatActorSliceZ = actor.slicer(anatImage.get_data(), anatImage.affine) anatActorSliceZ.display(None, None, zSlice) # Apply rotation anatActorSliceZ.RotateX(xRot) anatActorSliceZ.RotateY(yRot) anatActorSliceZ.RotateZ(zRot) ren.add(anatActorSliceZ) trkActor.RotateX(xRot) trkActor.RotateY(yRot) trkActor.RotateZ(zRot) ren.add(trkActor) # Not in dipy 0.11.0 # Wait until next version # Already fixed here: https://github.com/nipy/dipy/pull/1163 #if roi is not None: # roiImage= nibabel.load(roi) # roiActor = dipy.viz.fvtk.contour( # roiImage.get_data(), affine=anatomicalImage.affine, levels=[1], # colors=[(1., 1., 0.)], opacities=[1.]) # roiActor.RotateX(xRot) # roiActor.RotateY(yRot) # roiActor.RotateZ(zRot) # ren.add(roiActor) ren.set_camera( position=(0,0,1), focal_point=(0,0,0), view_up=(0,1,0))#, verbose=False) #window.record(ren, out_path=target, size=(1200, 1200), n_frames=1) window.snapshot(ren, fname=target, size=(1200, 1200), offscreen=True)
if d == 0: slice_actor.display(z=int(slice_view[0])) elif d == 2: slice_actor.display(y=int(slice_view[2])) else: slice_actor.display(x=int(slice_view[1])) renderer.add(slice_actor) # show_m = window.ShowManager(renderer, size=(800, 700)) # show_m.initialize() # show_m.render() # show_m.start() # renderer.camera_info() #get location of camera window.snapshot(renderer, fname='images/'+imagename+'_'+views[d]+'.png', size=(800, 800), offscreen=True, order_transparent=False) for d in range(len(camera_pos)): # directions: axial, sagittal, coronal renderer = window.Renderer() for z in range(len(all_bundles)): stream_actor = actor.streamtube(all_bundles[z], colors=all_colors[z], linewidth=.5) renderer.set_camera(position=camera_pos[d], focal_point=focal_point[d], view_up=view_up[d]) renderer.add(stream_actor) slice_actor = actor.slicer(data, affine, value_range) if d == 0: