def test_parallel_projection(): ren = window.Renderer() axes = actor.axes() ren.add(axes) axes2 = actor.axes() axes2.SetPosition((2, 0, 0)) ren.add(axes2) # Put the camera on a angle so that the # camera can show the difference between perspective # and parallel projection ren.set_camera((1.5, 1.5, 1.5)) ren.GetActiveCamera().Zoom(2) # window.show(ren, reset_camera=True) ren.reset_camera() arr = window.snapshot(ren) ren.projection('parallel') # window.show(ren, reset_camera=False) arr2 = window.snapshot(ren) # Because of the parallel projection the two axes # will have the same size and therefore occupy more # pixels rather than in perspective projection were # the axes being further will be smaller. npt.assert_equal(np.sum(arr2 > 0) > np.sum(arr > 0), True)
def test_peak_slicer(interactive=False): _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4') # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape peak_dirs = np.zeros((11, 11, 11, 3, 3)) peak_values = np.random.rand(11, 11, 11, 3) peak_dirs[:, :, :] = _peak_dirs renderer = window.Renderer() peak_actor = actor.peak_slicer(peak_dirs) renderer.add(peak_actor) renderer.add(actor.axes((11, 11, 11))) if interactive: window.show(renderer) renderer.clear() renderer.add(peak_actor) renderer.add(actor.axes((11, 11, 11))) for k in range(11): peak_actor.display_extent(0, 10, 0, 10, k, k) for j in range(11): peak_actor.display_extent(0, 10, j, j, 0, 10) for i in range(11): peak_actor.display(i, None, None) renderer.rm_all() peak_actor = actor.peak_slicer( peak_dirs, peak_values, mask=None, affine=np.diag([3, 2, 1, 1]), colors=None, opacity=1, linewidth=3, lod=True, lod_points=10 ** 4, lod_points_size=3) renderer.add(peak_actor) renderer.add(actor.axes((11, 11, 11))) if interactive: window.show(renderer) report = window.analyze_renderer(renderer) ex = ['vtkLODActor', 'vtkOpenGLActor', 'vtkOpenGLActor', 'vtkOpenGLActor'] npt.assert_equal(report.actors_classnames, ex)
def test_text_widget(): interactive = False renderer = window.Renderer() axes = actor.axes() window.add(renderer, axes) renderer.ResetCamera() show_manager = window.ShowManager(renderer, size=(900, 900)) if interactive: show_manager.initialize() show_manager.render() fetch_viz_icons() button_png = read_viz_icons(fname='home3.png') def button_callback(obj, event): print('Button Pressed') button = widget.button(show_manager.iren, show_manager.ren, button_callback, button_png, (.8, 1.2), (100, 100)) global rulez rulez = True def text_callback(obj, event): global rulez print('Text selected') if rulez: obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!") rulez = False else: obj.GetTextActor().SetInput("Diffusion Imaging in Python") rulez = True show_manager.render() text = widget.text(show_manager.iren, show_manager.ren, text_callback, message="Diffusion Imaging in Python", left_down_pos=(0., 0.), right_top_pos=(0.4, 0.05), opacity=1., border=False) if not interactive: button.Off() text.Off() pass if interactive: show_manager.render() show_manager.start() arr = window.snapshot(renderer, size=(900, 900)) report = window.analyze_snapshot(arr) npt.assert_equal(report.objects, 3)
def test_renderer(): ren = window.Renderer() # background color for renderer (1, 0.5, 0) # 0.001 added here to remove numerical errors when moving from float # to int values bg_float = (1, 0.501, 0) # that will come in the image in the 0-255 uint scale bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8')) ren.background(bg_float) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color=bg_color, colors=[bg_color, (0, 127, 0)]) npt.assert_equal(report.objects, 0) npt.assert_equal(report.colors_found, [True, False]) axes = actor.axes() ren.add(axes) # window.show(ren) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm(axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) window.add(ren, axes) arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 1) ren.rm_all() arr = window.snapshot(ren) report = window.analyze_snapshot(arr, bg_color) npt.assert_equal(report.objects, 0) ren2 = window.renderer(bg_float) ren2.background((0, 0, 0.)) report = window.analyze_renderer(ren2) npt.assert_equal(report.bg_color, (0, 0, 0)) ren2.add(axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 3) window.rm(ren2, axes) report = window.analyze_renderer(ren2) npt.assert_equal(report.actors, 0)
def genren_AGG(sls, sls2=None, niidata=None, roi1=None, roi2=None, roi3=None, aff=np.eye(4), putpath='test.png', showme=False, showaxes=False): renderer = window.Renderer() renderer.set_camera(position=(-606.93, -153.23, 28.70), focal_point=(2.78, 11.06, 15.66), view_up=(0, 0, 1)) stream_actor = actor.line(sls) renderer.add(stream_actor) if sls2 is not None: stream_actor2 = actor.line(sls2, colors=(1, 1, 1)) renderer.add(stream_actor2) if roi1 is not None: contour_actor1 = actor.contour_from_roi(roi1, affine=aff, color=(1., 1., 0.), opacity=0.5) renderer.add(contour_actor1) if roi2 is not None: contour_actor2 = actor.contour_from_roi(roi2, affine=aff, color=(1., 0., 0.), opacity=0.5) renderer.add(contour_actor2) if roi3 is not None: contour_actor3 = actor.contour_from_roi(roi3, affine=aff, color=(0., 0., 1.), opacity=0.5) renderer.add(contour_actor3) if niidata is not None: slice_actor = actor.slicer(niidata, affine=aff) renderer.add(slice_actor) if showaxes: axes = actor.axes() renderer.add(axes) if showme: window.show(renderer, size=(500, 500), reset_camera=False) window.record(renderer, out_path=putpath, size=(500, 500)) # renderer.camera_info() del renderer return putpath
def test_active_camera(): renderer = window.Renderer() renderer.add(actor.axes(scale=(1, 1, 1))) renderer.reset_camera() renderer.reset_clipping_range() direction = renderer.camera_direction() position, focal_point, view_up = renderer.get_camera() renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up) position, focal_point, view_up = renderer.get_camera() npt.assert_almost_equal(np.dot(direction, position), -1) renderer.zoom(1.5) new_position, _, _ = renderer.get_camera() npt.assert_array_almost_equal(position, new_position) renderer.zoom(1) # rotate around focal point renderer.azimuth(90) position, _, _ = renderer.get_camera() npt.assert_almost_equal(position, (1.0, 0.0, 0)) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.colors_found, [True]) # rotate around camera's center renderer.yaw(90) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=[(0, 0, 0)]) npt.assert_equal(report.colors_found, [True]) renderer.yaw(-90) renderer.elevation(90) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, colors=(0, 255, 0)) npt.assert_equal(report.colors_found, [True]) renderer.set_camera((0., 0., 1.), (0., 0., 0), view_up) # vertical rotation of the camera around the focal point renderer.pitch(10) renderer.pitch(-10) # rotate around the direction of projection renderer.roll(90) # inverted normalized distance from focal point along the direction # of the camera position, _, _ = renderer.get_camera() renderer.dolly(0.5) new_position, _, _ = renderer.get_camera() npt.assert_almost_equal(position[2], 0.5 * new_position[2])
def test_odf_slicer(interactive=False): sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') print(fid) print(fname) odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) renderer = window.Renderer() mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='jet') fa = 0. * np.zeros(odfs.shape[:3]) fa[:, 0, :] = 1. fa[:, -1, :] = 1. fa[0, :, :] = 1. fa[-1, :, :] = 1. fa[5, 5, 5] = 1 k = 5 I, J, K = odfs.shape[:3] fa_actor = actor.slicer(fa, affine) fa_actor.display_extent(0, I, 0, J, k, k) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) if interactive: window.show(renderer, reset_camera=False) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11) renderer.clear() renderer.add(fa_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) mask[:] = 0 mask[5, 5, 5] = 1 fa[5, 5, 5] = 0 fa_actor = actor.slicer(fa, None) fa_actor.display(None, None, 5) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='jet', norm=False, global_cm=True) renderer.clear() renderer.add(fa_actor) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) odfs[:, :, :] = 1 mask = np.ones(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='jet', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) renderer.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) fa_actor.display(i, None, None) if interactive: window.show(renderer) for j in range(11): odf_actor.display(None, j, None) fa_actor.display(None, j, None) if interactive: window.show(renderer) # with mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') del odf_actor odfs._mmap.close() del odfs os.close(fid) os.remove(fname)