def test_text_widget(): interactive = False renderer = window.Renderer() axes = actor.axes() window.add(renderer, axes) renderer.ResetCamera() show_manager = window.ShowManager(renderer, size=(900, 900)) if interactive: show_manager.initialize() show_manager.render() fetch_viz_icons() button_png = read_viz_icons(fname='home3.png') def button_callback(obj, event): print('Button Pressed') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.8, 1.2), (100, 100)) global rulez rulez = True def text_callback(obj, event): global rulez print('Text selected') if rulez: obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!") rulez = False else: obj.GetTextActor().SetInput("Diffusion Imaging in Python") rulez = True show_manager.render() text = widget.text(show_manager.iren, show_manager.ren, text_callback, message="Diffusion Imaging in Python", left_down_pos=(0., 0.), right_top_pos=(0.4, 0.05), opacity=1., border=False) if not interactive: button.Off() text.Off() pass if interactive: show_manager.render() show_manager.start() arr = window.snapshot(renderer, size=(900, 900)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3)
def test_renderer(): ren = window.Renderer() # background color for renderer (1, 0.5, 0) # 0.001 added here to remove numerical errors when moving from float # to int values bg_float = (1, 0.501, 0) # that will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) ren.background(bg_float) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) axes = actor.axes() ren.add(axes) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm(axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) window.add(ren, axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm_all() arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) ren2 = window.renderer(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_renderer(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) ren2.add(axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 3) window.rm(ren2, axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 0)
def test_streamtube_and_line_actors(): renderer = window.renderer() line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]]) line2 = line1 + np.array([0.5, 0., 0.]) lines = [line1, line2] colors = np.array([[1, 0, 0], [0, 0, 1.]]) c = actor.line(lines, colors, linewidth=3) window.add(renderer, c) c = actor.line(lines, colors, spline_subdiv=5, linewidth=3) window.add(renderer, c) # create streamtubes of the same lines and shift them a bit c2 = actor.streamtube(lines, colors, linewidth=.1) c2.SetPosition(2, 0, 0) window.add(renderer, c2) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0), (0, 0, 255)], find_objects=True) npt.assert_equal(report.objects, 4) npt.assert_equal(report.colors_found, [True, True]) # as before with splines c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1) c2.SetPosition(2, 0, 0) window.add(renderer, c2) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0), (0, 0, 255)], find_objects=True) npt.assert_equal(report.objects, 4) npt.assert_equal(report.colors_found, [True, True])
def test_slicer(): renderer = window.renderer() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) window.add(renderer, slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # copy pixels in numpy array directly arr = window.snapshot(renderer, 'test_slicer.png') import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) print(report) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) renderer.ResetCamera() window.add(renderer, slicer) # save pixels in png file not a numpy array with TemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(renderer) arr = window.snapshot(renderer, fname) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) renderer.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) renderer.add(rgb_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) renderer.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut2 = slicer_lut.copy() slicer_lut2.display(None, None, 10) renderer.add(slicer_lut2) renderer.reset_clipping_range() arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1)
def test_bundle_maps(): renderer = window.renderer() bundle = fornix_streamlines() bundle, shift = center_streamlines(bundle) mat = np.array([[1, 0, 0, 100], [0, 1, 0, 100], [0, 0, 1, 100], [0, 0, 0, 1.]]) bundle = transform_streamlines(bundle, mat) # metric = np.random.rand(*(200, 200, 200)) metric = 100 * np.ones((200, 200, 200)) # add lower values metric[100, :, :] = 100 * 0.5 # create a nice orange-red colormap lut = actor.colormap_lookup_table(scale_range=(0., 100.), hue_range=(0., 0.1), saturation_range=(1, 1), value_range=(1., 1)) line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut) window.add(renderer, line) window.add(renderer, actor.scalar_bar(lut, ' ')) report = window.analyze_renderer(renderer) npt.assert_almost_equal(report.actors, 1) # window.show(renderer) renderer.clear() nb_points = np.sum([len(b) for b in bundle]) values = 100 * np.random.rand(nb_points) # values[:nb_points/2] = 0 line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut) renderer.add(line) # window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') renderer.clear() colors = np.random.rand(nb_points, 3) # values[:nb_points/2] = 0 line = actor.line(bundle, colors, linewidth=2) renderer.add(line) # window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') # window.show(renderer) arr = window.snapshot(renderer) report2 = window.analyze_snapshot(arr) npt.assert_equal(report2.objects, 1) # try other input options for colors renderer.clear() actor.line(bundle, (1., 0.5, 0)) actor.line(bundle, np.arange(len(bundle))) actor.line(bundle) colors = [np.random.rand(*b.shape) for b in bundle] actor.line(bundle, colors=colors)
def test_text_widget(): interactive = False renderer = window.Renderer() axes = fvtk.axes() window.add(renderer, axes) renderer.ResetCamera() show_manager = window.ShowManager(renderer, size=(900, 900)) if interactive: show_manager.initialize() show_manager.render() fetch_viz_icons() button_png = read_viz_icons(fname='home3.png') def button_callback(obj, event): print('Button Pressed') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.8, 1.2), (100, 100)) global rulez rulez = True def text_callback(obj, event): global rulez print('Text selected') if rulez: obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!") rulez = False else: obj.GetTextActor().SetInput("Diffusion Imaging in Python") rulez = True show_manager.render() text = widget.text(show_manager.iren, show_manager.ren, text_callback, message="Diffusion Imaging in Python", left_down_pos=(0., 0.), right_top_pos=(0.4, 0.05), opacity=1., border=False) if not interactive: button.Off() text.Off() pass if interactive: show_manager.render() show_manager.start() arr = window.snapshot(renderer, size=(900, 900)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3)